[kernel] r15304 - in dists/sid/linux-2.6/debian: . config config/alpha config/ia64 config/kernelarch-x86 config/mips config/powerpc config/sparc patches/debian patches/features/all patches/series

Ben Hutchings benh at alioth.debian.org
Tue Mar 2 00:09:50 UTC 2010


Author: benh
Date: Tue Mar  2 00:09:46 2010
New Revision: 15304

Log:
drm: Apply all changes from 2.6.33

Add nouveau driver.
i915: Restore video overlay support (Closes: #560033).
radeon: Enable KMS support.

Added:
   dists/sid/linux-2.6/debian/patches/debian/drm-restore-private-list_sort.patch
   dists/sid/linux-2.6/debian/patches/debian/drm-staging-2.6.32.9-2.6.33.patch
   dists/sid/linux-2.6/debian/patches/features/all/drm-2.6.32.9-2.6.33.patch
   dists/sid/linux-2.6/debian/patches/features/all/radeon-autoload-without-CONFIG_DRM_RADEON_KMS.patch
Modified:
   dists/sid/linux-2.6/debian/changelog
   dists/sid/linux-2.6/debian/config/alpha/config
   dists/sid/linux-2.6/debian/config/config
   dists/sid/linux-2.6/debian/config/ia64/config
   dists/sid/linux-2.6/debian/config/kernelarch-x86/config
   dists/sid/linux-2.6/debian/config/mips/config.4kc-malta
   dists/sid/linux-2.6/debian/config/mips/config.5kc-malta
   dists/sid/linux-2.6/debian/config/powerpc/config
   dists/sid/linux-2.6/debian/config/sparc/config
   dists/sid/linux-2.6/debian/patches/series/10

Modified: dists/sid/linux-2.6/debian/changelog
==============================================================================
--- dists/sid/linux-2.6/debian/changelog	Tue Mar  2 00:05:07 2010	(r15303)
+++ dists/sid/linux-2.6/debian/changelog	Tue Mar  2 00:09:46 2010	(r15304)
@@ -14,6 +14,12 @@
   [ Bastian Blank ]
   * Add support for Xen dom0 into its featureset.
 
+  [ Ben Hutchings ]
+  * drm: Apply all changes from 2.6.33:
+    - Add nouveau driver
+    - i915: Restore video overlay support (Closes: #560033)
+    - radeon: Enable KMS support
+
  -- maximilian attems <maks at debian.org>  Thu, 25 Feb 2010 13:07:47 +0100
 
 linux-2.6 (2.6.32-9) unstable; urgency=high

Modified: dists/sid/linux-2.6/debian/config/alpha/config
==============================================================================
--- dists/sid/linux-2.6/debian/config/alpha/config	Tue Mar  2 00:05:07 2010	(r15303)
+++ dists/sid/linux-2.6/debian/config/alpha/config	Tue Mar  2 00:09:46 2010	(r15304)
@@ -200,6 +200,14 @@
 CONFIG_DRM_SIS=m
 
 ##
+## file: drivers/gpu/drm/nouveau/Kconfig
+##
+CONFIG_DRM_NOUVEAU=m
+# CONFIG_DRM_NOUVEAU_BACKLIGHT is not set
+# CONFIG_DRM_NOUVEAU_DEBUG is not set
+CONFIG_DRM_I2C_CH7006=m
+
+##
 ## file: drivers/hid/usbhid/Kconfig
 ##
 CONFIG_USB_HID=m

Modified: dists/sid/linux-2.6/debian/config/config
==============================================================================
--- dists/sid/linux-2.6/debian/config/config	Tue Mar  2 00:05:07 2010	(r15303)
+++ dists/sid/linux-2.6/debian/config/config	Tue Mar  2 00:09:46 2010	(r15304)
@@ -319,11 +319,21 @@
 CONFIG_DRM_SAVAGE=m
 
 ##
+## file: drivers/gpu/drm/nouveau/Kconfig
+##
+# CONFIG_DRM_NOUVEAU is not set
+
+##
 ## file: drivers/gpu/drm/radeon/Kconfig
 ##
 # CONFIG_DRM_RADEON_KMS is not set
 
 ##
+## file: drivers/gpu/drm/vmwgfx/Kconfig
+##
+# CONFIG_DRM_VMWGFX is not set
+
+##
 ## file: drivers/hid/Kconfig
 ##
 CONFIG_HID_SUPPORT=y

Modified: dists/sid/linux-2.6/debian/config/ia64/config
==============================================================================
--- dists/sid/linux-2.6/debian/config/ia64/config	Tue Mar  2 00:05:07 2010	(r15303)
+++ dists/sid/linux-2.6/debian/config/ia64/config	Tue Mar  2 00:09:46 2010	(r15304)
@@ -136,6 +136,14 @@
 CONFIG_DRM_SIS=m
 
 ##
+## file: drivers/gpu/drm/nouveau/Kconfig
+##
+CONFIG_DRM_NOUVEAU=m
+# CONFIG_DRM_NOUVEAU_BACKLIGHT is not set
+# CONFIG_DRM_NOUVEAU_DEBUG is not set
+CONFIG_DRM_I2C_CH7006=m
+
+##
 ## file: drivers/hid/usbhid/Kconfig
 ##
 CONFIG_USB_HID=m

Modified: dists/sid/linux-2.6/debian/config/kernelarch-x86/config
==============================================================================
--- dists/sid/linux-2.6/debian/config/kernelarch-x86/config	Tue Mar  2 00:05:07 2010	(r15303)
+++ dists/sid/linux-2.6/debian/config/kernelarch-x86/config	Tue Mar  2 00:09:46 2010	(r15304)
@@ -368,6 +368,14 @@
 CONFIG_DRM_SIS=m
 
 ##
+## file: drivers/gpu/drm/nouveau/Kconfig
+##
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_NOUVEAU_BACKLIGHT=y
+# CONFIG_DRM_NOUVEAU_DEBUG is not set
+CONFIG_DRM_I2C_CH7006=m
+
+##
 ## file: drivers/hid/usbhid/Kconfig
 ##
 CONFIG_USB_HID=m

Modified: dists/sid/linux-2.6/debian/config/mips/config.4kc-malta
==============================================================================
--- dists/sid/linux-2.6/debian/config/mips/config.4kc-malta	Tue Mar  2 00:05:07 2010	(r15303)
+++ dists/sid/linux-2.6/debian/config/mips/config.4kc-malta	Tue Mar  2 00:09:46 2010	(r15304)
@@ -260,6 +260,14 @@
 CONFIG_DRM_SAVAGE=m
 
 ##
+## file: drivers/gpu/drm/nouveau/Kconfig
+##
+CONFIG_DRM_NOUVEAU=m
+# CONFIG_DRM_NOUVEAU_BACKLIGHT is not set
+# CONFIG_DRM_NOUVEAU_DEBUG is not set
+CONFIG_DRM_I2C_CH7006=m
+
+##
 ## file: drivers/hwmon/Kconfig
 ##
 CONFIG_HWMON=y

Modified: dists/sid/linux-2.6/debian/config/mips/config.5kc-malta
==============================================================================
--- dists/sid/linux-2.6/debian/config/mips/config.5kc-malta	Tue Mar  2 00:05:07 2010	(r15303)
+++ dists/sid/linux-2.6/debian/config/mips/config.5kc-malta	Tue Mar  2 00:09:46 2010	(r15304)
@@ -252,6 +252,14 @@
 CONFIG_DRM_SAVAGE=m
 
 ##
+## file: drivers/gpu/drm/nouveau/Kconfig
+##
+CONFIG_DRM_NOUVEAU=m
+# CONFIG_DRM_NOUVEAU_BACKLIGHT is not set
+# CONFIG_DRM_NOUVEAU_DEBUG is not set
+CONFIG_DRM_I2C_CH7006=m
+
+##
 ## file: drivers/hwmon/Kconfig
 ##
 CONFIG_HWMON=y

Modified: dists/sid/linux-2.6/debian/config/powerpc/config
==============================================================================
--- dists/sid/linux-2.6/debian/config/powerpc/config	Tue Mar  2 00:05:07 2010	(r15303)
+++ dists/sid/linux-2.6/debian/config/powerpc/config	Tue Mar  2 00:09:46 2010	(r15304)
@@ -178,6 +178,14 @@
 # CONFIG_DRM_SIS is not set
 
 ##
+## file: drivers/gpu/drm/nouveau/Kconfig
+##
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_NOUVEAU_BACKLIGHT=y
+# CONFIG_DRM_NOUVEAU_DEBUG is not set
+CONFIG_DRM_I2C_CH7006=m
+
+##
 ## file: drivers/hid/usbhid/Kconfig
 ##
 CONFIG_USB_HID=m

Modified: dists/sid/linux-2.6/debian/config/sparc/config
==============================================================================
--- dists/sid/linux-2.6/debian/config/sparc/config	Tue Mar  2 00:05:07 2010	(r15303)
+++ dists/sid/linux-2.6/debian/config/sparc/config	Tue Mar  2 00:09:46 2010	(r15304)
@@ -57,6 +57,14 @@
 CONFIG_DRM_MGA=m
 
 ##
+## file: drivers/gpu/drm/nouveau/Kconfig
+##
+CONFIG_DRM_NOUVEAU=m
+# CONFIG_DRM_NOUVEAU_BACKLIGHT is not set
+# CONFIG_DRM_NOUVEAU_DEBUG is not set
+CONFIG_DRM_I2C_CH7006=m
+
+##
 ## file: drivers/hid/usbhid/Kconfig
 ##
 CONFIG_USB_HID=y

Added: dists/sid/linux-2.6/debian/patches/debian/drm-restore-private-list_sort.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/debian/drm-restore-private-list_sort.patch	Tue Mar  2 00:09:46 2010	(r15304)
@@ -0,0 +1,140 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Subject: [PATCH] drm: Revert "lib: Introduce generic list_sort function"
+
+This reverts commit 2c761270d5520dd84ab0b4e47c24d99ff8503c38
+"lib: Introduce generic list_sort function" in drivers/gpu/drm, which
+was included in drm-2.6.32.9-2.6.33.patch.  We don't want to include
+that change from 2.6.33.  However, we do declare list_sort() as static.
+
+--- a/drivers/gpu/drm/drm_modes.c
++++ b/drivers/gpu/drm/drm_modes.c
+@@ -1,4 +1,9 @@
+ /*
++ * The list_sort function is (presumably) licensed under the GPL (see the
++ * top level "COPYING" file for details).
++ *
++ * The remainder of this file is:
++ *
+  * Copyright © 1997-2003 by The XFree86 Project, Inc.
+  * Copyright © 2007 Dave Airlie
+  * Copyright © 2007-2008 Intel Corporation
+@@ -31,7 +36,6 @@
+  */
+ 
+ #include <linux/list.h>
+-#include <linux/list_sort.h>
+ #include "drmP.h"
+ #include "drm.h"
+ #include "drm_crtc.h"
+@@ -851,7 +855,6 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
+ 
+ /**
+  * drm_mode_compare - compare modes for favorability
+- * @priv: unused
+  * @lh_a: list_head for first mode
+  * @lh_b: list_head for second mode
+  *
+@@ -865,7 +868,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
+  * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+  * positive if @lh_b is better than @lh_a.
+  */
+-static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
++static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
+ {
+ 	struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
+ 	struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+@@ -882,6 +885,85 @@ static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head
+ 	return diff;
+ }
+ 
++/* FIXME: what we don't have a list sort function? */
++/* list sort from Mark J Roberts (mjr at znex.org) */
++static void list_sort(struct list_head *head,
++	       int (*cmp)(struct list_head *a, struct list_head *b))
++{
++	struct list_head *p, *q, *e, *list, *tail, *oldhead;
++	int insize, nmerges, psize, qsize, i;
++
++	list = head->next;
++	list_del(head);
++	insize = 1;
++	for (;;) {
++		p = oldhead = list;
++		list = tail = NULL;
++		nmerges = 0;
++
++		while (p) {
++			nmerges++;
++			q = p;
++			psize = 0;
++			for (i = 0; i < insize; i++) {
++				psize++;
++				q = q->next == oldhead ? NULL : q->next;
++				if (!q)
++					break;
++			}
++
++			qsize = insize;
++			while (psize > 0 || (qsize > 0 && q)) {
++				if (!psize) {
++					e = q;
++					q = q->next;
++					qsize--;
++					if (q == oldhead)
++						q = NULL;
++				} else if (!qsize || !q) {
++					e = p;
++					p = p->next;
++					psize--;
++					if (p == oldhead)
++						p = NULL;
++				} else if (cmp(p, q) <= 0) {
++					e = p;
++					p = p->next;
++					psize--;
++					if (p == oldhead)
++						p = NULL;
++				} else {
++					e = q;
++					q = q->next;
++					qsize--;
++					if (q == oldhead)
++						q = NULL;
++				}
++				if (tail)
++					tail->next = e;
++				else
++					list = e;
++				e->prev = tail;
++				tail = e;
++			}
++			p = q;
++		}
++
++		tail->next = list;
++		list->prev = tail;
++
++		if (nmerges <= 1)
++			break;
++
++		insize *= 2;
++	}
++
++	head->next = list;
++	head->prev = list->prev;
++	list->prev->next = head;
++	list->prev = head;
++}
++
+ /**
+  * drm_mode_sort - sort mode list
+  * @mode_list: list to sort
+@@ -893,7 +975,7 @@ static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head
+  */
+ void drm_mode_sort(struct list_head *mode_list)
+ {
+-	list_sort(NULL, mode_list, drm_mode_compare);
++	list_sort(mode_list, drm_mode_compare);
+ }
+ EXPORT_SYMBOL(drm_mode_sort);
+ 

Added: dists/sid/linux-2.6/debian/patches/debian/drm-staging-2.6.32.9-2.6.33.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/debian/drm-staging-2.6.32.9-2.6.33.patch	Tue Mar  2 00:09:46 2010	(r15304)
@@ -0,0 +1,15 @@
+--- a/drivers/staging/Kconfig
++++ b/drivers/staging/Kconfig
+@@ -101,8 +101,10 @@
+ 
+ source "drivers/staging/line6/Kconfig"
+ 
+-source "drivers/gpu/drm/radeon/Kconfig"
+-
++source "drivers/gpu/drm/vmwgfx/Kconfig"
++
++source "drivers/gpu/drm/nouveau/Kconfig"
++
+ source "drivers/staging/octeon/Kconfig"
+ 
+ source "drivers/staging/serqt_usb2/Kconfig"

Added: dists/sid/linux-2.6/debian/patches/features/all/drm-2.6.32.9-2.6.33.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/drm-2.6.32.9-2.6.33.patch	Tue Mar  2 00:09:46 2010	(r15304)
@@ -0,0 +1,84466 @@
+git diff v2.6.32.9..v2.6.33 -- drivers/gpu/drm include/drm
+
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 96eddd1..305c590 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -66,6 +66,8 @@ config DRM_RADEON
+ 
+ 	  If M is selected, the module will be called radeon.
+ 
++source "drivers/gpu/drm/radeon/Kconfig"
++
+ config DRM_I810
+ 	tristate "Intel I810"
+ 	depends on DRM && AGP && AGP_INTEL
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index 3c8827a..39c5aa7 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -15,7 +15,7 @@ drm-y       :=	drm_auth.o drm_bufs.o drm_cache.o \
+ 
+ drm-$(CONFIG_COMPAT) += drm_ioc32.o
+ 
+-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o
++drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
+ 
+ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
+ 
+@@ -30,4 +30,7 @@ obj-$(CONFIG_DRM_I830)	+= i830/
+ obj-$(CONFIG_DRM_I915)  += i915/
+ obj-$(CONFIG_DRM_SIS)   += sis/
+ obj-$(CONFIG_DRM_SAVAGE)+= savage/
++obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
+ obj-$(CONFIG_DRM_VIA)	+=via/
++obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
++obj-y			+= i2c/
+diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
+index a1fce68..17be051 100644
+--- a/drivers/gpu/drm/ati_pcigart.c
++++ b/drivers/gpu/drm/ati_pcigart.c
+@@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
+ 
+ 		if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
+ 			DRM_ERROR("fail to set dma mask to 0x%Lx\n",
+-				  gart_info->table_mask);
++				  (unsigned long long)gart_info->table_mask);
+ 			ret = 1;
+ 			goto done;
+ 		}
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 5cae0b3..d91fb8c 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
+ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
+ 		 drm_tv_subconnector_enum_list)
+ 
++static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
++	{ DRM_MODE_DIRTY_OFF,      "Off"      },
++	{ DRM_MODE_DIRTY_ON,       "On"       },
++	{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
++};
++
++DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
++		 drm_dirty_info_enum_list)
++
+ struct drm_conn_prop_enum_list {
+ 	int type;
+ 	char *name;
+@@ -149,6 +158,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
+ 	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
+ 	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
+ 	{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
++	{ DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
+ };
+ 
+ static struct drm_prop_enum_list drm_encoder_enum_list[] =
+@@ -247,7 +257,8 @@ static void drm_mode_object_put(struct drm_device *dev,
+ 	mutex_unlock(&dev->mode_config.idr_mutex);
+ }
+ 
+-void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
++struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
++		uint32_t id, uint32_t type)
+ {
+ 	struct drm_mode_object *obj = NULL;
+ 
+@@ -272,7 +283,7 @@ EXPORT_SYMBOL(drm_mode_object_find);
+  * functions & device file and adds it to the master fd list.
+  *
+  * RETURNS:
+- * Zero on success, error code on falure.
++ * Zero on success, error code on failure.
+  */
+ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
+ 			 const struct drm_framebuffer_funcs *funcs)
+@@ -802,6 +813,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev)
+ EXPORT_SYMBOL(drm_mode_create_dithering_property);
+ 
+ /**
++ * drm_mode_create_dirty_property - create dirty property
++ * @dev: DRM device
++ *
++ * Called by a driver the first time it's needed, must be attached to desired
++ * connectors.
++ */
++int drm_mode_create_dirty_info_property(struct drm_device *dev)
++{
++	struct drm_property *dirty_info;
++	int i;
++
++	if (dev->mode_config.dirty_info_property)
++		return 0;
++
++	dirty_info =
++		drm_property_create(dev, DRM_MODE_PROP_ENUM |
++				    DRM_MODE_PROP_IMMUTABLE,
++				    "dirty",
++				    ARRAY_SIZE(drm_dirty_info_enum_list));
++	for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
++		drm_property_add_enum(dirty_info, i,
++				      drm_dirty_info_enum_list[i].type,
++				      drm_dirty_info_enum_list[i].name);
++	dev->mode_config.dirty_info_property = dirty_info;
++
++	return 0;
++}
++EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
++
++/**
+  * drm_mode_config_init - initialize DRM mode_configuration structure
+  * @dev: DRM device
+  *
+@@ -1753,6 +1794,71 @@ out:
+ 	return ret;
+ }
+ 
++int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
++			   void *data, struct drm_file *file_priv)
++{
++	struct drm_clip_rect __user *clips_ptr;
++	struct drm_clip_rect *clips = NULL;
++	struct drm_mode_fb_dirty_cmd *r = data;
++	struct drm_mode_object *obj;
++	struct drm_framebuffer *fb;
++	unsigned flags;
++	int num_clips;
++	int ret = 0;
++
++	mutex_lock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
++	if (!obj) {
++		DRM_ERROR("invalid framebuffer id\n");
++		ret = -EINVAL;
++		goto out_err1;
++	}
++	fb = obj_to_fb(obj);
++
++	num_clips = r->num_clips;
++	clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
++
++	if (!num_clips != !clips_ptr) {
++		ret = -EINVAL;
++		goto out_err1;
++	}
++
++	flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
++
++	/* If userspace annotates copy, clips must come in pairs */
++	if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
++		ret = -EINVAL;
++		goto out_err1;
++	}
++
++	if (num_clips && clips_ptr) {
++		clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
++		if (!clips) {
++			ret = -ENOMEM;
++			goto out_err1;
++		}
++
++		ret = copy_from_user(clips, clips_ptr,
++				     num_clips * sizeof(*clips));
++		if (ret)
++			goto out_err2;
++	}
++
++	if (fb->funcs->dirty) {
++		ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
++	} else {
++		ret = -ENOSYS;
++		goto out_err2;
++	}
++
++out_err2:
++	kfree(clips);
++out_err1:
++	mutex_unlock(&dev->mode_config.mutex);
++	return ret;
++}
++
++
+ /**
+  * drm_fb_release - remove and free the FBs on this file
+  * @filp: file * from the ioctl
+@@ -2328,7 +2434,7 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
+ 	} else if (connector->funcs->set_property)
+ 		ret = connector->funcs->set_property(connector, property, out_resp->value);
+ 
+-	/* store the property value if succesful */
++	/* store the property value if successful */
+ 	if (!ret)
+ 		drm_connector_property_set_value(connector, property, out_resp->value);
+ out:
+@@ -2478,3 +2584,72 @@ out:
+ 	mutex_unlock(&dev->mode_config.mutex);
+ 	return ret;
+ }
++
++int drm_mode_page_flip_ioctl(struct drm_device *dev,
++			     void *data, struct drm_file *file_priv)
++{
++	struct drm_mode_crtc_page_flip *page_flip = data;
++	struct drm_mode_object *obj;
++	struct drm_crtc *crtc;
++	struct drm_framebuffer *fb;
++	struct drm_pending_vblank_event *e = NULL;
++	unsigned long flags;
++	int ret = -EINVAL;
++
++	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
++	    page_flip->reserved != 0)
++		return -EINVAL;
++
++	mutex_lock(&dev->mode_config.mutex);
++	obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
++	if (!obj)
++		goto out;
++	crtc = obj_to_crtc(obj);
++
++	if (crtc->funcs->page_flip == NULL)
++		goto out;
++
++	obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
++	if (!obj)
++		goto out;
++	fb = obj_to_fb(obj);
++
++	if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
++		ret = -ENOMEM;
++		spin_lock_irqsave(&dev->event_lock, flags);
++		if (file_priv->event_space < sizeof e->event) {
++			spin_unlock_irqrestore(&dev->event_lock, flags);
++			goto out;
++		}
++		file_priv->event_space -= sizeof e->event;
++		spin_unlock_irqrestore(&dev->event_lock, flags);
++
++		e = kzalloc(sizeof *e, GFP_KERNEL);
++		if (e == NULL) {
++			spin_lock_irqsave(&dev->event_lock, flags);
++			file_priv->event_space += sizeof e->event;
++			spin_unlock_irqrestore(&dev->event_lock, flags);
++			goto out;
++		}
++
++		e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
++		e->event.base.length = sizeof e->event;
++		e->event.user_data = page_flip->user_data;
++		e->base.event = &e->event.base;
++		e->base.file_priv = file_priv;
++		e->base.destroy =
++			(void (*) (struct drm_pending_event *)) kfree;
++	}
++
++	ret = crtc->funcs->page_flip(crtc, fb, e);
++	if (ret) {
++		spin_lock_irqsave(&dev->event_lock, flags);
++		file_priv->event_space += sizeof e->event;
++		spin_unlock_irqrestore(&dev->event_lock, flags);
++		kfree(e);
++	}
++
++out:
++	mutex_unlock(&dev->mode_config.mutex);
++	return ret;
++}
+diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
+index afed886..7d0f00a 100644
+--- a/drivers/gpu/drm/drm_crtc_helper.c
++++ b/drivers/gpu/drm/drm_crtc_helper.c
+@@ -109,7 +109,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
+ 
+ 	count = (*connector_funcs->get_modes)(connector);
+ 	if (!count) {
+-		count = drm_add_modes_noedid(connector, 800, 600);
++		count = drm_add_modes_noedid(connector, 1024, 768);
+ 		if (!count)
+ 			return 0;
+ 	}
+@@ -216,7 +216,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
+ EXPORT_SYMBOL(drm_helper_crtc_in_use);
+ 
+ /**
+- * drm_disable_unused_functions - disable unused objects
++ * drm_helper_disable_unused_functions - disable unused objects
+  * @dev: DRM device
+  *
+  * LOCKING:
+@@ -702,7 +702,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ 		if (encoder->crtc != crtc)
+ 			continue;
+ 
+-		DRM_INFO("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
++		DRM_DEBUG("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
+ 			 mode->name, mode->base.id);
+ 		encoder_funcs = encoder->helper_private;
+ 		encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+@@ -1032,7 +1032,8 @@ bool drm_helper_initial_config(struct drm_device *dev)
+ 	/*
+ 	 * we shouldn't end up with no modes here.
+ 	 */
+-	WARN(!count, "No connectors reported connected with modes\n");
++	if (count == 0)
++		printk(KERN_INFO "No connectors reported connected with modes\n");
+ 
+ 	drm_setup_crtcs(dev);
+ 
+@@ -1162,6 +1163,9 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
+ int drm_helper_resume_force_mode(struct drm_device *dev)
+ {
+ 	struct drm_crtc *crtc;
++	struct drm_encoder *encoder;
++	struct drm_encoder_helper_funcs *encoder_funcs;
++	struct drm_crtc_helper_funcs *crtc_funcs;
+ 	int ret;
+ 
+ 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+@@ -1174,6 +1178,25 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
+ 
+ 		if (ret == false)
+ 			DRM_ERROR("failed to set mode on crtc %p\n", crtc);
++
++		/* Turn off outputs that were already powered off */
++		if (drm_helper_choose_crtc_dpms(crtc)) {
++			list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++
++				if(encoder->crtc != crtc)
++					continue;
++
++				encoder_funcs = encoder->helper_private;
++				if (encoder_funcs->dpms)
++					(*encoder_funcs->dpms) (encoder,
++								drm_helper_choose_encoder_dpms(encoder));
++
++				crtc_funcs = crtc->helper_private;
++				if (crtc_funcs->dpms)
++					(*crtc_funcs->dpms) (crtc,
++							     drm_helper_choose_crtc_dpms(crtc));
++			}
++		}
+ 	}
+ 	/* disable the unused connectors while restoring the modesetting */
+ 	drm_helper_disable_unused_functions(dev);
+diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_i2c_helper.c
+new file mode 100644
+index 0000000..548887c
+--- /dev/null
++++ b/drivers/gpu/drm/drm_dp_i2c_helper.c
+@@ -0,0 +1,209 @@
++/*
++ * Copyright © 2009 Keith Packard
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that copyright
++ * notice and this permission notice appear in supporting documentation, and
++ * that the name of the copyright holders not be used in advertising or
++ * publicity pertaining to distribution of the software without specific,
++ * written prior permission.  The copyright holders make no representations
++ * about the suitability of this software for any purpose.  It is provided "as
++ * is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
++ * OF THIS SOFTWARE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/i2c.h>
++#include "drm_dp_helper.h"
++#include "drmP.h"
++
++/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
++static int
++i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
++			    uint8_t write_byte, uint8_t *read_byte)
++{
++	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
++	int ret;
++	
++	ret = (*algo_data->aux_ch)(adapter, mode,
++				   write_byte, read_byte);
++	return ret;
++}
++
++/*
++ * I2C over AUX CH
++ */
++
++/*
++ * Send the address. If the I2C link is running, this 'restarts'
++ * the connection with the new address, this is used for doing
++ * a write followed by a read (as needed for DDC)
++ */
++static int
++i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
++{
++	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
++	int mode = MODE_I2C_START;
++	int ret;
++
++	if (reading)
++		mode |= MODE_I2C_READ;
++	else
++		mode |= MODE_I2C_WRITE;
++	algo_data->address = address;
++	algo_data->running = true;
++	ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
++	return ret;
++}
++
++/*
++ * Stop the I2C transaction. This closes out the link, sending
++ * a bare address packet with the MOT bit turned off
++ */
++static void
++i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
++{
++	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
++	int mode = MODE_I2C_STOP;
++
++	if (reading)
++		mode |= MODE_I2C_READ;
++	else
++		mode |= MODE_I2C_WRITE;
++	if (algo_data->running) {
++		(void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
++		algo_data->running = false;
++	}
++}
++
++/*
++ * Write a single byte to the current I2C address, the
++ * the I2C link must be running or this returns -EIO
++ */
++static int
++i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
++{
++	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
++	int ret;
++
++	if (!algo_data->running)
++		return -EIO;
++
++	ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
++	return ret;
++}
++
++/*
++ * Read a single byte from the current I2C address, the
++ * I2C link must be running or this returns -EIO
++ */
++static int
++i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
++{
++	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
++	int ret;
++
++	if (!algo_data->running)
++		return -EIO;
++
++	ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
++	return ret;
++}
++
++static int
++i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
++		     struct i2c_msg *msgs,
++		     int num)
++{
++	int ret = 0;
++	bool reading = false;
++	int m;
++	int b;
++
++	for (m = 0; m < num; m++) {
++		u16 len = msgs[m].len;
++		u8 *buf = msgs[m].buf;
++		reading = (msgs[m].flags & I2C_M_RD) != 0;
++		ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
++		if (ret < 0)
++			break;
++		if (reading) {
++			for (b = 0; b < len; b++) {
++				ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
++				if (ret < 0)
++					break;
++			}
++		} else {
++			for (b = 0; b < len; b++) {
++				ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
++				if (ret < 0)
++					break;
++			}
++		}
++		if (ret < 0)
++			break;
++	}
++	if (ret >= 0)
++		ret = num;
++	i2c_algo_dp_aux_stop(adapter, reading);
++	DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
++	return ret;
++}
++
++static u32
++i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
++{
++	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
++	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
++	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
++	       I2C_FUNC_10BIT_ADDR;
++}
++
++static const struct i2c_algorithm i2c_dp_aux_algo = {
++	.master_xfer	= i2c_algo_dp_aux_xfer,
++	.functionality	= i2c_algo_dp_aux_functionality,
++};
++
++static void
++i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
++{
++	(void) i2c_algo_dp_aux_address(adapter, 0, false);
++	(void) i2c_algo_dp_aux_stop(adapter, false);
++					   
++}
++
++static int
++i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
++{
++	adapter->algo = &i2c_dp_aux_algo;
++	adapter->retries = 3;
++	i2c_dp_aux_reset_bus(adapter);
++	return 0;
++}
++
++int
++i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
++{
++	int error;
++	
++	error = i2c_dp_aux_prepare_bus(adapter);
++	if (error)
++		return error;
++	error = i2c_add_adapter(adapter);
++	return error;
++}
++EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index a75ca63..766c468 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -145,6 +145,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+ 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
++	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
+ };
+ 
+ #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
+@@ -366,6 +368,29 @@ module_init(drm_core_init);
+ module_exit(drm_core_exit);
+ 
+ /**
++ * Copy and IOCTL return string to user space
++ */
++static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
++{
++	int len;
++
++	/* don't overflow userbuf */
++	len = strlen(value);
++	if (len > *buf_len)
++		len = *buf_len;
++
++	/* let userspace know exact length of driver value (which could be
++	 * larger than the userspace-supplied buffer) */
++	*buf_len = strlen(value);
++
++	/* finally, try filling in the userbuf */
++	if (len && buf)
++		if (copy_to_user(buf, value, len))
++			return -EFAULT;
++	return 0;
++}
++
++/**
+  * Get version information
+  *
+  * \param inode device inode.
+@@ -380,16 +405,21 @@ static int drm_version(struct drm_device *dev, void *data,
+ 		       struct drm_file *file_priv)
+ {
+ 	struct drm_version *version = data;
+-	int len;
++	int err;
+ 
+ 	version->version_major = dev->driver->major;
+ 	version->version_minor = dev->driver->minor;
+ 	version->version_patchlevel = dev->driver->patchlevel;
+-	DRM_COPY(version->name, dev->driver->name);
+-	DRM_COPY(version->date, dev->driver->date);
+-	DRM_COPY(version->desc, dev->driver->desc);
+-
+-	return 0;
++	err = drm_copy_field(version->name, &version->name_len,
++			dev->driver->name);
++	if (!err)
++		err = drm_copy_field(version->date, &version->date_len,
++				dev->driver->date);
++	if (!err)
++		err = drm_copy_field(version->desc, &version->desc_len,
++				dev->driver->desc);
++
++	return err;
+ }
+ 
+ /**
+@@ -404,11 +434,11 @@ static int drm_version(struct drm_device *dev, void *data,
+  * Looks up the ioctl function in the ::ioctls table, checking for root
+  * previleges if so required, and dispatches to the respective function.
+  */
+-int drm_ioctl(struct inode *inode, struct file *filp,
++long drm_ioctl(struct file *filp,
+ 	      unsigned int cmd, unsigned long arg)
+ {
+ 	struct drm_file *file_priv = filp->private_data;
+-	struct drm_device *dev = file_priv->minor->dev;
++	struct drm_device *dev;
+ 	struct drm_ioctl_desc *ioctl;
+ 	drm_ioctl_t *func;
+ 	unsigned int nr = DRM_IOCTL_NR(cmd);
+@@ -416,6 +446,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
+ 	char stack_kdata[128];
+ 	char *kdata = NULL;
+ 
++	dev = file_priv->minor->dev;
+ 	atomic_inc(&dev->ioctl_count);
+ 	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
+ 	++file_priv->ioctl_count;
+@@ -471,7 +502,13 @@ int drm_ioctl(struct inode *inode, struct file *filp,
+ 				goto err_i1;
+ 			}
+ 		}
+-		retcode = func(dev, kdata, file_priv);
++		if (ioctl->flags & DRM_UNLOCKED)
++			retcode = func(dev, kdata, file_priv);
++		else {
++			lock_kernel();
++			retcode = func(dev, kdata, file_priv);
++			unlock_kernel();
++		}
+ 
+ 		if (cmd & IOC_OUT) {
+ 			if (copy_to_user((void __user *)arg, kdata,
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index b54ba63..ab6c973 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -123,18 +123,20 @@ static const u8 edid_header[] = {
+  */
+ static bool edid_is_valid(struct edid *edid)
+ {
+-	int i;
++	int i, score = 0;
+ 	u8 csum = 0;
+ 	u8 *raw_edid = (u8 *)edid;
+ 
+-	if (memcmp(edid->header, edid_header, sizeof(edid_header)))
+-		goto bad;
+-	if (edid->version != 1) {
+-		DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
++	for (i = 0; i < sizeof(edid_header); i++)
++		if (raw_edid[i] == edid_header[i])
++			score++;
++
++	if (score == 8) ;
++	else if (score >= 6) {
++		DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
++		memcpy(raw_edid, edid_header, sizeof(edid_header));
++	} else
+ 		goto bad;
+-	}
+-	if (edid->revision > 4)
+-		DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+ 
+ 	for (i = 0; i < EDID_LENGTH; i++)
+ 		csum += raw_edid[i];
+@@ -143,6 +145,14 @@ static bool edid_is_valid(struct edid *edid)
+ 		goto bad;
+ 	}
+ 
++	if (edid->version != 1) {
++		DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
++		goto bad;
++	}
++
++	if (edid->revision > 4)
++		DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
++
+ 	return 1;
+ 
+ bad:
+@@ -481,16 +491,17 @@ static struct drm_display_mode drm_dmt_modes[] = {
+ 		   3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+ 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ };
++static const int drm_num_dmt_modes =
++	sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+ 
+ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
+ 			int hsize, int vsize, int fresh)
+ {
+-	int i, count;
++	int i;
+ 	struct drm_display_mode *ptr, *mode;
+ 
+-	count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+ 	mode = NULL;
+-	for (i = 0; i < count; i++) {
++	for (i = 0; i < drm_num_dmt_modes; i++) {
+ 		ptr = &drm_dmt_modes[i];
+ 		if (hsize == ptr->hdisplay &&
+ 			vsize == ptr->vdisplay &&
+@@ -587,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
+ 	return mode;
+ }
+ 
++/*
++ * EDID is delightfully ambiguous about how interlaced modes are to be
++ * encoded.  Our internal representation is of frame height, but some
++ * HDTV detailed timings are encoded as field height.
++ *
++ * The format list here is from CEA, in frame size.  Technically we
++ * should be checking refresh rate too.  Whatever.
++ */
++static void
++drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
++			    struct detailed_pixel_timing *pt)
++{
++	int i;
++	static const struct {
++		int w, h;
++	} cea_interlaced[] = {
++		{ 1920, 1080 },
++		{  720,  480 },
++		{ 1440,  480 },
++		{ 2880,  480 },
++		{  720,  576 },
++		{ 1440,  576 },
++		{ 2880,  576 },
++	};
++	static const int n_sizes =
++		sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
++
++	if (!(pt->misc & DRM_EDID_PT_INTERLACED))
++		return;
++
++	for (i = 0; i < n_sizes; i++) {
++		if ((mode->hdisplay == cea_interlaced[i].w) &&
++		    (mode->vdisplay == cea_interlaced[i].h / 2)) {
++			mode->vdisplay *= 2;
++			mode->vsync_start *= 2;
++			mode->vsync_end *= 2;
++			mode->vtotal *= 2;
++			mode->vtotal |= 1;
++		}
++	}
++
++	mode->flags |= DRM_MODE_FLAG_INTERLACE;
++}
++
+ /**
+  * drm_mode_detailed - create a new mode from an EDID detailed timing section
+  * @dev: DRM device (needed to create new mode)
+@@ -622,8 +677,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ 		return NULL;
+ 	}
+ 	if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
+-		printk(KERN_WARNING "integrated sync not supported\n");
+-		return NULL;
++		printk(KERN_WARNING "composite sync not supported\n");
+ 	}
+ 
+ 	/* it is incorrect if hsync/vsync width is zero */
+@@ -670,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
+ 
+ 	drm_mode_set_name(mode);
+ 
+-	if (pt->misc & DRM_EDID_PT_INTERLACED)
+-		mode->flags |= DRM_MODE_FLAG_INTERLACE;
++	drm_mode_do_interlace_quirk(mode, pt);
+ 
+ 	if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ 		pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
+@@ -834,8 +887,169 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
+ 	return modes;
+ }
+ 
++/*
++ * XXX fix this for:
++ * - GTF secondary curve formula
++ * - EDID 1.4 range offsets
++ * - CVT extended bits
++ */
++static bool
++mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
++{
++	struct detailed_data_monitor_range *range;
++	int hsync, vrefresh;
++
++	range = &timing->data.other_data.data.range;
++
++	hsync = drm_mode_hsync(mode);
++	vrefresh = drm_mode_vrefresh(mode);
++
++	if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
++		return false;
++
++	if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
++		return false;
++
++	if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
++		/* be forgiving since it's in units of 10MHz */
++		int max_clock = range->pixel_clock_mhz * 10 + 9;
++		max_clock *= 1000;
++		if (mode->clock > max_clock)
++			return false;
++	}
++
++	return true;
++}
++
++/*
++ * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
++ * need to account for them.
++ */
++static int drm_gtf_modes_for_range(struct drm_connector *connector,
++				   struct detailed_timing *timing)
++{
++	int i, modes = 0;
++	struct drm_display_mode *newmode;
++	struct drm_device *dev = connector->dev;
++
++	for (i = 0; i < drm_num_dmt_modes; i++) {
++		if (mode_in_range(drm_dmt_modes + i, timing)) {
++			newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
++			if (newmode) {
++				drm_mode_probed_add(connector, newmode);
++				modes++;
++			}
++		}
++	}
++
++	return modes;
++}
++
++static int drm_cvt_modes(struct drm_connector *connector,
++			 struct detailed_timing *timing)
++{
++	int i, j, modes = 0;
++	struct drm_display_mode *newmode;
++	struct drm_device *dev = connector->dev;
++	struct cvt_timing *cvt;
++	const int rates[] = { 60, 85, 75, 60, 50 };
++	const u8 empty[3] = { 0, 0, 0 };
++
++	for (i = 0; i < 4; i++) {
++		int uninitialized_var(width), height;
++		cvt = &(timing->data.other_data.data.cvt[i]);
++
++		if (!memcmp(cvt->code, empty, 3))
++			continue;
++
++		height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
++		switch (cvt->code[1] & 0x0c) {
++		case 0x00:
++			width = height * 4 / 3;
++			break;
++		case 0x04:
++			width = height * 16 / 9;
++			break;
++		case 0x08:
++			width = height * 16 / 10;
++			break;
++		case 0x0c:
++			width = height * 15 / 9;
++			break;
++		}
++
++		for (j = 1; j < 5; j++) {
++			if (cvt->code[2] & (1 << j)) {
++				newmode = drm_cvt_mode(dev, width, height,
++						       rates[j], j == 0,
++						       false, false);
++				if (newmode) {
++					drm_mode_probed_add(connector, newmode);
++					modes++;
++				}
++			}
++		}
++	}
++
++	return modes;
++}
++
++static int add_detailed_modes(struct drm_connector *connector,
++			      struct detailed_timing *timing,
++			      struct edid *edid, u32 quirks, int preferred)
++{
++	int i, modes = 0;
++	struct detailed_non_pixel *data = &timing->data.other_data;
++	int timing_level = standard_timing_level(edid);
++	int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
++	struct drm_display_mode *newmode;
++	struct drm_device *dev = connector->dev;
++
++	if (timing->pixel_clock) {
++		newmode = drm_mode_detailed(dev, edid, timing, quirks);
++		if (!newmode)
++			return 0;
++
++		if (preferred)
++			newmode->type |= DRM_MODE_TYPE_PREFERRED;
++
++		drm_mode_probed_add(connector, newmode);
++		return 1;
++	}
++
++	/* other timing types */
++	switch (data->type) {
++	case EDID_DETAIL_MONITOR_RANGE:
++		if (gtf)
++			modes += drm_gtf_modes_for_range(connector, timing);
++		break;
++	case EDID_DETAIL_STD_MODES:
++		/* Six modes per detailed section */
++		for (i = 0; i < 6; i++) {
++			struct std_timing *std;
++			struct drm_display_mode *newmode;
++
++			std = &data->data.timings[i];
++			newmode = drm_mode_std(dev, std, edid->revision,
++					       timing_level);
++			if (newmode) {
++				drm_mode_probed_add(connector, newmode);
++				modes++;
++			}
++		}
++		break;
++	case EDID_DETAIL_CVT_3BYTE:
++		modes += drm_cvt_modes(connector, timing);
++		break;
++	default:
++		break;
++	}
++
++	return modes;
++}
++
+ /**
+- * add_detailed_modes - get detailed mode info from EDID data
++ * add_detailed_info - get detailed mode info from EDID data
+  * @connector: attached connector
+  * @edid: EDID block to scan
+  * @quirks: quirks to apply
+@@ -846,67 +1060,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
+ static int add_detailed_info(struct drm_connector *connector,
+ 			     struct edid *edid, u32 quirks)
+ {
+-	struct drm_device *dev = connector->dev;
+-	int i, j, modes = 0;
+-	int timing_level;
+-
+-	timing_level = standard_timing_level(edid);
++	int i, modes = 0;
+ 
+ 	for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
+ 		struct detailed_timing *timing = &edid->detailed_timings[i];
+-		struct detailed_non_pixel *data = &timing->data.other_data;
+-		struct drm_display_mode *newmode;
+-
+-		/* X server check is version 1.1 or higher */
+-		if (edid->version == 1 && edid->revision >= 1 &&
+-		    !timing->pixel_clock) {
+-			/* Other timing or info */
+-			switch (data->type) {
+-			case EDID_DETAIL_MONITOR_SERIAL:
+-				break;
+-			case EDID_DETAIL_MONITOR_STRING:
+-				break;
+-			case EDID_DETAIL_MONITOR_RANGE:
+-				/* Get monitor range data */
+-				break;
+-			case EDID_DETAIL_MONITOR_NAME:
+-				break;
+-			case EDID_DETAIL_MONITOR_CPDATA:
+-				break;
+-			case EDID_DETAIL_STD_MODES:
+-				for (j = 0; j < 6; i++) {
+-					struct std_timing *std;
+-					struct drm_display_mode *newmode;
+-
+-					std = &data->data.timings[j];
+-					newmode = drm_mode_std(dev, std,
+-							       edid->revision,
+-							       timing_level);
+-					if (newmode) {
+-						drm_mode_probed_add(connector, newmode);
+-						modes++;
+-					}
+-				}
+-				break;
+-			default:
+-				break;
+-			}
+-		} else {
+-			newmode = drm_mode_detailed(dev, edid, timing, quirks);
+-			if (!newmode)
+-				continue;
++		int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+ 
+-			/* First detailed mode is preferred */
+-			if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
+-				newmode->type |= DRM_MODE_TYPE_PREFERRED;
+-			drm_mode_probed_add(connector, newmode);
++		/* In 1.0, only timings are allowed */
++		if (!timing->pixel_clock && edid->version == 1 &&
++			edid->revision == 0)
++			continue;
+ 
+-			modes++;
+-		}
++		modes += add_detailed_modes(connector, timing, edid, quirks,
++					    preferred);
+ 	}
+ 
+ 	return modes;
+ }
++
+ /**
+  * add_detailed_mode_eedid - get detailed mode info from addtional timing
+  * 			EDID block
+@@ -920,12 +1091,9 @@ static int add_detailed_info(struct drm_connector *connector,
+ static int add_detailed_info_eedid(struct drm_connector *connector,
+ 			     struct edid *edid, u32 quirks)
+ {
+-	struct drm_device *dev = connector->dev;
+-	int i, j, modes = 0;
++	int i, modes = 0;
+ 	char *edid_ext = NULL;
+ 	struct detailed_timing *timing;
+-	struct detailed_non_pixel *data;
+-	struct drm_display_mode *newmode;
+ 	int edid_ext_num;
+ 	int start_offset, end_offset;
+ 	int timing_level;
+@@ -976,51 +1144,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
+ 	for (i = start_offset; i < end_offset;
+ 			i += sizeof(struct detailed_timing)) {
+ 		timing = (struct detailed_timing *)(edid_ext + i);
+-		data = &timing->data.other_data;
+-		/* Detailed mode timing */
+-		if (timing->pixel_clock) {
+-			newmode = drm_mode_detailed(dev, edid, timing, quirks);
+-			if (!newmode)
+-				continue;
+-
+-			drm_mode_probed_add(connector, newmode);
+-
+-			modes++;
+-			continue;
+-		}
+-
+-		/* Other timing or info */
+-		switch (data->type) {
+-		case EDID_DETAIL_MONITOR_SERIAL:
+-			break;
+-		case EDID_DETAIL_MONITOR_STRING:
+-			break;
+-		case EDID_DETAIL_MONITOR_RANGE:
+-			/* Get monitor range data */
+-			break;
+-		case EDID_DETAIL_MONITOR_NAME:
+-			break;
+-		case EDID_DETAIL_MONITOR_CPDATA:
+-			break;
+-		case EDID_DETAIL_STD_MODES:
+-			/* Five modes per detailed section */
+-			for (j = 0; j < 5; i++) {
+-				struct std_timing *std;
+-				struct drm_display_mode *newmode;
+-
+-				std = &data->data.timings[j];
+-				newmode = drm_mode_std(dev, std,
+-						       edid->revision,
+-						       timing_level);
+-				if (newmode) {
+-					drm_mode_probed_add(connector, newmode);
+-					modes++;
+-				}
+-			}
+-			break;
+-		default:
+-			break;
+-		}
++		modes += add_detailed_modes(connector, timing, edid, quirks, 0);
+ 	}
+ 
+ 	return modes;
+@@ -1066,19 +1190,19 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
+ 			     struct i2c_adapter *adapter,
+ 			     char *buf, int len)
+ {
+-	int ret;
++	int i;
+ 
+-	ret = drm_do_probe_ddc_edid(adapter, buf, len);
+-	if (ret != 0) {
+-		goto end;
+-	}
+-	if (!edid_is_valid((struct edid *)buf)) {
+-		dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+-			 drm_get_connector_name(connector));
+-		ret = -1;
++	for (i = 0; i < 4; i++) {
++		if (drm_do_probe_ddc_edid(adapter, buf, len))
++			return -1;
++		if (edid_is_valid((struct edid *)buf))
++			return 0;
+ 	}
+-end:
+-	return ret;
++
++	/* repeated checksum failures; warn, but carry on */
++	dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
++		 drm_get_connector_name(connector));
++	return -1;
+ }
+ 
+ /**
+@@ -1296,6 +1420,8 @@ int drm_add_modes_noedid(struct drm_connector *connector,
+ 					ptr->vdisplay > vdisplay)
+ 				continue;
+ 		}
++		if (drm_mode_vrefresh(ptr) > 61)
++			continue;
+ 		mode = drm_mode_duplicate(dev, ptr);
+ 		if (mode) {
+ 			drm_mode_probed_add(connector, mode);
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 65ef011..0f9e905 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -156,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
+ 			force = DRM_FORCE_ON;
+ 			break;
+ 		case 'D':
+-			if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) ||
++			if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
+ 			    (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
+ 				force = DRM_FORCE_ON;
+ 			else
+@@ -373,11 +373,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
+ 					mutex_unlock(&dev->mode_config.mutex);
+ 				}
+ 			}
+-			if (dpms_mode == DRM_MODE_DPMS_OFF) {
+-				mutex_lock(&dev->mode_config.mutex);
+-				crtc_funcs->dpms(crtc, dpms_mode);
+-				mutex_unlock(&dev->mode_config.mutex);
+-			}
++			mutex_lock(&dev->mode_config.mutex);
++			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++			mutex_unlock(&dev->mode_config.mutex);
+ 		}
+ 	}
+ }
+@@ -385,18 +383,23 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
+ int drm_fb_helper_blank(int blank, struct fb_info *info)
+ {
+ 	switch (blank) {
++	/* Display: On; HSync: On, VSync: On */
+ 	case FB_BLANK_UNBLANK:
+ 		drm_fb_helper_on(info);
+ 		break;
++	/* Display: Off; HSync: On, VSync: On */
+ 	case FB_BLANK_NORMAL:
+ 		drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+ 		break;
++	/* Display: Off; HSync: Off, VSync: On */
+ 	case FB_BLANK_HSYNC_SUSPEND:
+ 		drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+ 		break;
++	/* Display: Off; HSync: On, VSync: Off */
+ 	case FB_BLANK_VSYNC_SUSPEND:
+ 		drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
+ 		break;
++	/* Display: Off; HSync: Off, VSync: Off */
+ 	case FB_BLANK_POWERDOWN:
+ 		drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
+ 		break;
+@@ -603,11 +606,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
+ 		return -EINVAL;
+ 
+ 	/* Need to resize the fb object !!! */
+-	if (var->xres > fb->width || var->yres > fb->height) {
+-		DRM_ERROR("Requested width/height is greater than current fb "
+-			   "object %dx%d > %dx%d\n", var->xres, var->yres,
+-			   fb->width, fb->height);
+-		DRM_ERROR("Need resizing code.\n");
++	if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
++		DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
++			  "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
++			  fb->width, fb->height, fb->bits_per_pixel);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -905,8 +907,13 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
+ 
+ 	if (new_fb) {
+ 		info->var.pixclock = 0;
+-		if (register_framebuffer(info) < 0)
++		ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
++		if (ret)
++			return ret;
++		if (register_framebuffer(info) < 0) {
++			fb_dealloc_cmap(&info->cmap);
+ 			return -EINVAL;
++		}
+ 	} else {
+ 		drm_fb_helper_set_par(info);
+ 	}
+@@ -936,6 +943,7 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
+ 		unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
+ 	}
+ 	drm_fb_helper_crtc_free(helper);
++	fb_dealloc_cmap(&helper->fb->fbdev->cmap);
+ }
+ EXPORT_SYMBOL(drm_fb_helper_free);
+ 
+diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
+index 251bc0e..08d14df 100644
+--- a/drivers/gpu/drm/drm_fops.c
++++ b/drivers/gpu/drm/drm_fops.c
+@@ -257,6 +257,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
+ 
+ 	INIT_LIST_HEAD(&priv->lhead);
+ 	INIT_LIST_HEAD(&priv->fbs);
++	INIT_LIST_HEAD(&priv->event_list);
++	init_waitqueue_head(&priv->event_wait);
++	priv->event_space = 4096; /* set aside 4k for event buffer */
+ 
+ 	if (dev->driver->driver_features & DRIVER_GEM)
+ 		drm_gem_open(dev, priv);
+@@ -297,6 +300,18 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
+ 				goto out_free;
+ 			}
+ 		}
++		mutex_lock(&dev->struct_mutex);
++		if (dev->driver->master_set) {
++			ret = dev->driver->master_set(dev, priv, true);
++			if (ret) {
++				/* drop both references if this fails */
++				drm_master_put(&priv->minor->master);
++				drm_master_put(&priv->master);
++				mutex_unlock(&dev->struct_mutex);
++				goto out_free;
++			}
++		}
++		mutex_unlock(&dev->struct_mutex);
+ 	} else {
+ 		/* get a reference to the master */
+ 		priv->master = drm_master_get(priv->minor->master);
+@@ -413,6 +428,30 @@ static void drm_master_release(struct drm_device *dev, struct file *filp)
+ 	}
+ }
+ 
++static void drm_events_release(struct drm_file *file_priv)
++{
++	struct drm_device *dev = file_priv->minor->dev;
++	struct drm_pending_event *e, *et;
++	struct drm_pending_vblank_event *v, *vt;
++	unsigned long flags;
++
++	spin_lock_irqsave(&dev->event_lock, flags);
++
++	/* Remove pending flips */
++	list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
++		if (v->base.file_priv == file_priv) {
++			list_del(&v->base.link);
++			drm_vblank_put(dev, v->pipe);
++			v->base.destroy(&v->base);
++		}
++
++	/* Remove unconsumed events */
++	list_for_each_entry_safe(e, et, &file_priv->event_list, link)
++		e->destroy(e);
++
++	spin_unlock_irqrestore(&dev->event_lock, flags);
++}
++
+ /**
+  * Release file.
+  *
+@@ -451,6 +490,8 @@ int drm_release(struct inode *inode, struct file *filp)
+ 	if (file_priv->minor->master)
+ 		drm_master_release(dev, filp);
+ 
++	drm_events_release(file_priv);
++
+ 	if (dev->driver->driver_features & DRIVER_GEM)
+ 		drm_gem_release(dev, file_priv);
+ 
+@@ -504,6 +545,8 @@ int drm_release(struct inode *inode, struct file *filp)
+ 
+ 		if (file_priv->minor->master == file_priv->master) {
+ 			/* drop the reference held my the minor */
++			if (dev->driver->master_drop)
++				dev->driver->master_drop(dev, file_priv, true);
+ 			drm_master_put(&file_priv->minor->master);
+ 		}
+ 	}
+@@ -544,9 +587,74 @@ int drm_release(struct inode *inode, struct file *filp)
+ }
+ EXPORT_SYMBOL(drm_release);
+ 
+-/** No-op. */
++static bool
++drm_dequeue_event(struct drm_file *file_priv,
++		  size_t total, size_t max, struct drm_pending_event **out)
++{
++	struct drm_device *dev = file_priv->minor->dev;
++	struct drm_pending_event *e;
++	unsigned long flags;
++	bool ret = false;
++
++	spin_lock_irqsave(&dev->event_lock, flags);
++
++	*out = NULL;
++	if (list_empty(&file_priv->event_list))
++		goto out;
++	e = list_first_entry(&file_priv->event_list,
++			     struct drm_pending_event, link);
++	if (e->event->length + total > max)
++		goto out;
++
++	file_priv->event_space += e->event->length;
++	list_del(&e->link);
++	*out = e;
++	ret = true;
++
++out:
++	spin_unlock_irqrestore(&dev->event_lock, flags);
++	return ret;
++}
++
++ssize_t drm_read(struct file *filp, char __user *buffer,
++		 size_t count, loff_t *offset)
++{
++	struct drm_file *file_priv = filp->private_data;
++	struct drm_pending_event *e;
++	size_t total;
++	ssize_t ret;
++
++	ret = wait_event_interruptible(file_priv->event_wait,
++				       !list_empty(&file_priv->event_list));
++	if (ret < 0)
++		return ret;
++
++	total = 0;
++	while (drm_dequeue_event(file_priv, total, count, &e)) {
++		if (copy_to_user(buffer + total,
++				 e->event, e->event->length)) {
++			total = -EFAULT;
++			break;
++		}
++
++		total += e->event->length;
++		e->destroy(e);
++	}
++
++	return total;
++}
++EXPORT_SYMBOL(drm_read);
++
+ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+ {
+-	return 0;
++	struct drm_file *file_priv = filp->private_data;
++	unsigned int mask = 0;
++
++	poll_wait(filp, &file_priv->event_wait, wait);
++
++	if (!list_empty(&file_priv->event_list))
++		mask |= POLLIN | POLLRDNORM;
++
++	return mask;
+ }
+ EXPORT_SYMBOL(drm_poll);
+diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
+index 282d9fd..d61d185 100644
+--- a/drivers/gpu/drm/drm_ioc32.c
++++ b/drivers/gpu/drm/drm_ioc32.c
+@@ -104,7 +104,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
+ 			  &version->desc))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
++	err = drm_ioctl(file,
+ 			DRM_IOCTL_VERSION, (unsigned long)version);
+ 	if (err)
+ 		return err;
+@@ -145,8 +145,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
+ 			  &u->unique))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
++	err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
+ 	if (err)
+ 		return err;
+ 
+@@ -174,8 +173,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
+ 			  &u->unique))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
++	return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
+ }
+ 
+ typedef struct drm_map32 {
+@@ -205,8 +203,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
+ 	if (__put_user(idx, &map->offset))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_GET_MAP, (unsigned long)map);
++	err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
+ 	if (err)
+ 		return err;
+ 
+@@ -246,8 +243,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
+ 	    || __put_user(m32.flags, &map->flags))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_ADD_MAP, (unsigned long)map);
++	err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
+ 	if (err)
+ 		return err;
+ 
+@@ -284,8 +280,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
+ 	if (__put_user((void *)(unsigned long)handle, &map->handle))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_RM_MAP, (unsigned long)map);
++	return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
+ }
+ 
+ typedef struct drm_client32 {
+@@ -314,8 +309,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
+ 	if (__put_user(idx, &client->idx))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_GET_CLIENT, (unsigned long)client);
++	err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
+ 	if (err)
+ 		return err;
+ 
+@@ -351,8 +345,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
+ 	if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_GET_STATS, (unsigned long)stats);
++	err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
+ 	if (err)
+ 		return err;
+ 
+@@ -395,8 +388,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
+ 	    || __put_user(agp_start, &buf->agp_start))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
++	err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
+ 	if (err)
+ 		return err;
+ 
+@@ -427,8 +419,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
+ 	    || __put_user(b32.high_mark, &buf->high_mark))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
++	return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
+ }
+ 
+ typedef struct drm_buf_info32 {
+@@ -469,8 +460,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
+ 	    || __put_user(list, &request->list))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_INFO_BUFS, (unsigned long)request);
++	err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
+ 	if (err)
+ 		return err;
+ 
+@@ -531,8 +521,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
+ 	    || __put_user(list, &request->list))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_MAP_BUFS, (unsigned long)request);
++	err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
+ 	if (err)
+ 		return err;
+ 
+@@ -578,8 +567,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
+ 			  &request->list))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_FREE_BUFS, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
+ }
+ 
+ typedef struct drm_ctx_priv_map32 {
+@@ -605,8 +593,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
+ 			  &request->handle))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
+ }
+ 
+ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
+@@ -628,8 +615,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
+ 	if (__put_user(ctx_id, &request->ctx_id))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
++	err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
+ 	if (err)
+ 		return err;
+ 
+@@ -664,8 +650,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
+ 			  &res->contexts))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_RES_CTX, (unsigned long)res);
++	err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
+ 	if (err)
+ 		return err;
+ 
+@@ -718,8 +703,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
+ 			  &d->request_sizes))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_DMA, (unsigned long)d);
++	err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
+ 	if (err)
+ 		return err;
+ 
+@@ -751,8 +735,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
+ 	if (put_user(m32.mode, &mode->mode))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
++	return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
+ }
+ 
+ typedef struct drm_agp_info32 {
+@@ -781,8 +764,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
+ 	if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_AGP_INFO, (unsigned long)info);
++	err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
+ 	if (err)
+ 		return err;
+ 
+@@ -827,16 +809,14 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
+ 	    || __put_user(req32.type, &request->type))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
++	err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
+ 	if (err)
+ 		return err;
+ 
+ 	if (__get_user(req32.handle, &request->handle)
+ 	    || __get_user(req32.physical, &request->physical)
+ 	    || copy_to_user(argp, &req32, sizeof(req32))) {
+-		drm_ioctl(file->f_path.dentry->d_inode, file,
+-			  DRM_IOCTL_AGP_FREE, (unsigned long)request);
++		drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
+ 		return -EFAULT;
+ 	}
+ 
+@@ -856,8 +836,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
+ 	    || __put_user(handle, &request->handle))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_AGP_FREE, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
+ }
+ 
+ typedef struct drm_agp_binding32 {
+@@ -881,8 +860,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
+ 	    || __put_user(req32.offset, &request->offset))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_AGP_BIND, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
+ }
+ 
+ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
+@@ -898,8 +876,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
+ 	    || __put_user(handle, &request->handle))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
+ }
+ #endif				/* __OS_HAS_AGP */
+ 
+@@ -923,8 +900,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
+ 	    || __put_user(x, &request->size))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_SG_ALLOC, (unsigned long)request);
++	err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
+ 	if (err)
+ 		return err;
+ 
+@@ -950,8 +926,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
+ 	    || __put_user(x << PAGE_SHIFT, &request->handle))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_SG_FREE, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
+ }
+ 
+ #if defined(CONFIG_X86) || defined(CONFIG_IA64)
+@@ -981,8 +956,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
+ 	    __put_user(update32.data, &request->data))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
++	err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
+ 	return err;
+ }
+ #endif
+@@ -1023,8 +997,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+ 	    || __put_user(req32.request.signal, &request->request.signal))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
++	err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
+ 	if (err)
+ 		return err;
+ 
+@@ -1094,16 +1067,14 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 	 * than always failing.
+ 	 */
+ 	if (nr >= ARRAY_SIZE(drm_compat_ioctls))
+-		return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
++		return drm_ioctl(filp, cmd, arg);
+ 
+ 	fn = drm_compat_ioctls[nr];
+ 
+-	lock_kernel();		/* XXX for now */
+ 	if (fn != NULL)
+ 		ret = (*fn) (filp, cmd, arg);
+ 	else
+-		ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+-	unlock_kernel();
++		ret = drm_ioctl(filp, cmd, arg);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index 332d743..b98384d 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -115,6 +115,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
+ 
+ 	dev->num_crtcs = 0;
+ }
++EXPORT_SYMBOL(drm_vblank_cleanup);
+ 
+ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+ {
+@@ -163,7 +164,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+ 	}
+ 
+ 	dev->vblank_disable_allowed = 0;
+-
+ 	return 0;
+ 
+ err:
+@@ -493,6 +493,9 @@ EXPORT_SYMBOL(drm_vblank_off);
+  */
+ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
+ {
++	/* vblank is not initialized (IRQ not installed ?) */
++	if (!dev->num_crtcs)
++		return;
+ 	/*
+ 	 * To avoid all the problems that might happen if interrupts
+ 	 * were enabled/disabled around or between these calls, we just
+@@ -568,6 +571,63 @@ out:
+ 	return ret;
+ }
+ 
++static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
++				  union drm_wait_vblank *vblwait,
++				  struct drm_file *file_priv)
++{
++	struct drm_pending_vblank_event *e;
++	struct timeval now;
++	unsigned long flags;
++	unsigned int seq;
++
++	e = kzalloc(sizeof *e, GFP_KERNEL);
++	if (e == NULL)
++		return -ENOMEM;
++
++	e->pipe = pipe;
++	e->event.base.type = DRM_EVENT_VBLANK;
++	e->event.base.length = sizeof e->event;
++	e->event.user_data = vblwait->request.signal;
++	e->base.event = &e->event.base;
++	e->base.file_priv = file_priv;
++	e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
++
++	do_gettimeofday(&now);
++	spin_lock_irqsave(&dev->event_lock, flags);
++
++	if (file_priv->event_space < sizeof e->event) {
++		spin_unlock_irqrestore(&dev->event_lock, flags);
++		kfree(e);
++		return -ENOMEM;
++	}
++
++	file_priv->event_space -= sizeof e->event;
++	seq = drm_vblank_count(dev, pipe);
++	if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
++	    (seq - vblwait->request.sequence) <= (1 << 23)) {
++		vblwait->request.sequence = seq + 1;
++		vblwait->reply.sequence = vblwait->request.sequence;
++	}
++
++	DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
++		  vblwait->request.sequence, seq, pipe);
++
++	e->event.sequence = vblwait->request.sequence;
++	if ((seq - vblwait->request.sequence) <= (1 << 23)) {
++		e->event.tv_sec = now.tv_sec;
++		e->event.tv_usec = now.tv_usec;
++		drm_vblank_put(dev, e->pipe);
++		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
++		wake_up_interruptible(&e->base.file_priv->event_wait);
++	} else {
++		list_add_tail(&e->base.link, &dev->vblank_event_list);
++	}
++
++	spin_unlock_irqrestore(&dev->event_lock, flags);
++
++	return 0;
++}
++
+ /**
+  * Wait for VBLANK.
+  *
+@@ -627,6 +687,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
+ 		goto done;
+ 	}
+ 
++	if (flags & _DRM_VBLANK_EVENT)
++		return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
++
+ 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+ 	    (seq - vblwait->request.sequence) <= (1<<23)) {
+ 		vblwait->request.sequence = seq + 1;
+@@ -659,6 +722,38 @@ done:
+ 	return ret;
+ }
+ 
++void drm_handle_vblank_events(struct drm_device *dev, int crtc)
++{
++	struct drm_pending_vblank_event *e, *t;
++	struct timeval now;
++	unsigned long flags;
++	unsigned int seq;
++
++	do_gettimeofday(&now);
++	seq = drm_vblank_count(dev, crtc);
++
++	spin_lock_irqsave(&dev->event_lock, flags);
++
++	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
++		if (e->pipe != crtc)
++			continue;
++		if ((seq - e->event.sequence) > (1<<23))
++			continue;
++
++		DRM_DEBUG("vblank event on %d, current %d\n",
++			  e->event.sequence, seq);
++
++		e->event.sequence = seq;
++		e->event.tv_sec = now.tv_sec;
++		e->event.tv_usec = now.tv_usec;
++		drm_vblank_put(dev, e->pipe);
++		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
++		wake_up_interruptible(&e->base.file_priv->event_wait);
++	}
++
++	spin_unlock_irqrestore(&dev->event_lock, flags);
++}
++
+ /**
+  * drm_handle_vblank - handle a vblank event
+  * @dev: DRM device
+@@ -669,7 +764,11 @@ done:
+  */
+ void drm_handle_vblank(struct drm_device *dev, int crtc)
+ {
++	if (!dev->num_crtcs)
++		return;
++
+ 	atomic_inc(&dev->_vblank_count[crtc]);
+ 	DRM_WAKEUP(&dev->vbl_queue[crtc]);
++	drm_handle_vblank_events(dev, crtc);
+ }
+ EXPORT_SYMBOL(drm_handle_vblank);
+diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
+index 97dc5a4..2ac074c 100644
+--- a/drivers/gpu/drm/drm_mm.c
++++ b/drivers/gpu/drm/drm_mm.c
+@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+ }
+ EXPORT_SYMBOL(drm_mm_get_block_generic);
+ 
++struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
++						unsigned long size,
++						unsigned alignment,
++						unsigned long start,
++						unsigned long end,
++						int atomic)
++{
++	struct drm_mm_node *align_splitoff = NULL;
++	unsigned tmp = 0;
++	unsigned wasted = 0;
++
++	if (node->start < start)
++		wasted += start - node->start;
++	if (alignment)
++		tmp = ((node->start + wasted) % alignment);
++
++	if (tmp)
++		wasted += alignment - tmp;
++	if (wasted) {
++		align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
++		if (unlikely(align_splitoff == NULL))
++			return NULL;
++	}
++
++	if (node->size == size) {
++		list_del_init(&node->fl_entry);
++		node->free = 0;
++	} else {
++		node = drm_mm_split_at_start(node, size, atomic);
++	}
++
++	if (align_splitoff)
++		drm_mm_put_block(align_splitoff);
++
++	return node;
++}
++EXPORT_SYMBOL(drm_mm_get_block_range_generic);
++
+ /*
+  * Put a block. Merge with the previous and / or next block if they are free.
+  * Otherwise add to the free stack.
+@@ -320,7 +358,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ 		if (entry->size >= size + wasted) {
+ 			if (!best_match)
+ 				return entry;
+-			if (size < best_size) {
++			if (entry->size < best_size) {
+ 				best = entry;
+ 				best_size = entry->size;
+ 			}
+@@ -331,6 +369,57 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ }
+ EXPORT_SYMBOL(drm_mm_search_free);
+ 
++struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
++						unsigned long size,
++						unsigned alignment,
++						unsigned long start,
++						unsigned long end,
++						int best_match)
++{
++	struct list_head *list;
++	const struct list_head *free_stack = &mm->fl_entry;
++	struct drm_mm_node *entry;
++	struct drm_mm_node *best;
++	unsigned long best_size;
++	unsigned wasted;
++
++	best = NULL;
++	best_size = ~0UL;
++
++	list_for_each(list, free_stack) {
++		entry = list_entry(list, struct drm_mm_node, fl_entry);
++		wasted = 0;
++
++		if (entry->size < size)
++			continue;
++
++		if (entry->start > end || (entry->start+entry->size) < start)
++			continue;
++
++		if (entry->start < start)
++			wasted += start - entry->start;
++
++		if (alignment) {
++			register unsigned tmp = (entry->start + wasted) % alignment;
++			if (tmp)
++				wasted += alignment - tmp;
++		}
++
++		if (entry->size >= size + wasted &&
++		    (entry->start + wasted + size) <= end) {
++			if (!best_match)
++				return entry;
++			if (entry->size < best_size) {
++				best = entry;
++				best_size = entry->size;
++			}
++		}
++	}
++
++	return best;
++}
++EXPORT_SYMBOL(drm_mm_search_free_in_range);
++
+ int drm_mm_clean(struct drm_mm * mm)
+ {
+ 	struct list_head *head = &mm->ml_entry;
+@@ -381,6 +470,26 @@ void drm_mm_takedown(struct drm_mm * mm)
+ }
+ EXPORT_SYMBOL(drm_mm_takedown);
+ 
++void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
++{
++	struct drm_mm_node *entry;
++	int total_used = 0, total_free = 0, total = 0;
++
++	list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
++		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
++			prefix, entry->start, entry->start + entry->size,
++			entry->size, entry->free ? "free" : "used");
++		total += entry->size;
++		if (entry->free)
++			total_free += entry->size;
++		else
++			total_used += entry->size;
++	}
++	printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
++		total_used, total_free);
++}
++EXPORT_SYMBOL(drm_mm_debug_table);
++
+ #if defined(CONFIG_DEBUG_FS)
+ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+ {
+@@ -395,7 +504,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
+ 		else
+ 			total_used += entry->size;
+ 	}
+-	seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used);
++	seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(drm_mm_dump_table);
+diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
+index 51f6772..76d6339 100644
+--- a/drivers/gpu/drm/drm_modes.c
++++ b/drivers/gpu/drm/drm_modes.c
+@@ -1,9 +1,4 @@
+ /*
+- * The list_sort function is (presumably) licensed under the GPL (see the
+- * top level "COPYING" file for details).
+- *
+- * The remainder of this file is:
+- *
+  * Copyright © 1997-2003 by The XFree86 Project, Inc.
+  * Copyright © 2007 Dave Airlie
+  * Copyright © 2007-2008 Intel Corporation
+@@ -36,6 +31,7 @@
+  */
+ 
+ #include <linux/list.h>
++#include <linux/list_sort.h>
+ #include "drmP.h"
+ #include "drm.h"
+ #include "drm_crtc.h"
+@@ -553,6 +549,32 @@ int drm_mode_height(struct drm_display_mode *mode)
+ }
+ EXPORT_SYMBOL(drm_mode_height);
+ 
++/** drm_mode_hsync - get the hsync of a mode
++ * @mode: mode
++ *
++ * LOCKING:
++ * None.
++ *
++ * Return @modes's hsync rate in kHz, rounded to the nearest int.
++ */
++int drm_mode_hsync(struct drm_display_mode *mode)
++{
++	unsigned int calc_val;
++
++	if (mode->hsync)
++		return mode->hsync;
++
++	if (mode->htotal < 0)
++		return 0;
++
++	calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
++	calc_val += 500;				/* round to 1000Hz */
++	calc_val /= 1000;				/* truncate to kHz */
++
++	return calc_val;
++}
++EXPORT_SYMBOL(drm_mode_hsync);
++
+ /**
+  * drm_mode_vrefresh - get the vrefresh of a mode
+  * @mode: mode
+@@ -560,7 +582,7 @@ EXPORT_SYMBOL(drm_mode_height);
+  * LOCKING:
+  * None.
+  *
+- * Return @mode's vrefresh rate or calculate it if necessary.
++ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
+  *
+  * FIXME: why is this needed?  shouldn't vrefresh be set already?
+  *
+@@ -829,6 +851,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
+ 
+ /**
+  * drm_mode_compare - compare modes for favorability
++ * @priv: unused
+  * @lh_a: list_head for first mode
+  * @lh_b: list_head for second mode
+  *
+@@ -842,7 +865,7 @@ EXPORT_SYMBOL(drm_mode_prune_invalid);
+  * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or
+  * positive if @lh_b is better than @lh_a.
+  */
+-static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
++static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b)
+ {
+ 	struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head);
+ 	struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head);
+@@ -859,85 +882,6 @@ static int drm_mode_compare(struct list_head *lh_a, struct list_head *lh_b)
+ 	return diff;
+ }
+ 
+-/* FIXME: what we don't have a list sort function? */
+-/* list sort from Mark J Roberts (mjr at znex.org) */
+-void list_sort(struct list_head *head,
+-	       int (*cmp)(struct list_head *a, struct list_head *b))
+-{
+-	struct list_head *p, *q, *e, *list, *tail, *oldhead;
+-	int insize, nmerges, psize, qsize, i;
+-
+-	list = head->next;
+-	list_del(head);
+-	insize = 1;
+-	for (;;) {
+-		p = oldhead = list;
+-		list = tail = NULL;
+-		nmerges = 0;
+-
+-		while (p) {
+-			nmerges++;
+-			q = p;
+-			psize = 0;
+-			for (i = 0; i < insize; i++) {
+-				psize++;
+-				q = q->next == oldhead ? NULL : q->next;
+-				if (!q)
+-					break;
+-			}
+-
+-			qsize = insize;
+-			while (psize > 0 || (qsize > 0 && q)) {
+-				if (!psize) {
+-					e = q;
+-					q = q->next;
+-					qsize--;
+-					if (q == oldhead)
+-						q = NULL;
+-				} else if (!qsize || !q) {
+-					e = p;
+-					p = p->next;
+-					psize--;
+-					if (p == oldhead)
+-						p = NULL;
+-				} else if (cmp(p, q) <= 0) {
+-					e = p;
+-					p = p->next;
+-					psize--;
+-					if (p == oldhead)
+-						p = NULL;
+-				} else {
+-					e = q;
+-					q = q->next;
+-					qsize--;
+-					if (q == oldhead)
+-						q = NULL;
+-				}
+-				if (tail)
+-					tail->next = e;
+-				else
+-					list = e;
+-				e->prev = tail;
+-				tail = e;
+-			}
+-			p = q;
+-		}
+-
+-		tail->next = list;
+-		list->prev = tail;
+-
+-		if (nmerges <= 1)
+-			break;
+-
+-		insize *= 2;
+-	}
+-
+-	head->next = list;
+-	head->prev = list->prev;
+-	list->prev->next = head;
+-	list->prev = head;
+-}
+-
+ /**
+  * drm_mode_sort - sort mode list
+  * @mode_list: list to sort
+@@ -949,7 +893,7 @@ void list_sort(struct list_head *head,
+  */
+ void drm_mode_sort(struct list_head *mode_list)
+ {
+-	list_sort(mode_list, drm_mode_compare);
++	list_sort(NULL, mode_list, drm_mode_compare);
+ }
+ EXPORT_SYMBOL(drm_mode_sort);
+ 
+diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
+index 55bb8a8..ad73e14 100644
+--- a/drivers/gpu/drm/drm_stub.c
++++ b/drivers/gpu/drm/drm_stub.c
+@@ -128,6 +128,7 @@ struct drm_master *drm_master_get(struct drm_master *master)
+ 	kref_get(&master->refcount);
+ 	return master;
+ }
++EXPORT_SYMBOL(drm_master_get);
+ 
+ static void drm_master_destroy(struct kref *kref)
+ {
+@@ -170,10 +171,13 @@ void drm_master_put(struct drm_master **master)
+ 	kref_put(&(*master)->refcount, drm_master_destroy);
+ 	*master = NULL;
+ }
++EXPORT_SYMBOL(drm_master_put);
+ 
+ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+ 			struct drm_file *file_priv)
+ {
++	int ret = 0;
++
+ 	if (file_priv->is_master)
+ 		return 0;
+ 
+@@ -188,6 +192,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+ 		mutex_lock(&dev->struct_mutex);
+ 		file_priv->minor->master = drm_master_get(file_priv->master);
+ 		file_priv->is_master = 1;
++		if (dev->driver->master_set) {
++			ret = dev->driver->master_set(dev, file_priv, false);
++			if (unlikely(ret != 0)) {
++				file_priv->is_master = 0;
++				drm_master_put(&file_priv->minor->master);
++			}
++		}
+ 		mutex_unlock(&dev->struct_mutex);
+ 	}
+ 
+@@ -204,6 +215,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&dev->struct_mutex);
++	if (dev->driver->master_drop)
++		dev->driver->master_drop(dev, file_priv, false);
+ 	drm_master_put(&file_priv->minor->master);
+ 	file_priv->is_master = 0;
+ 	mutex_unlock(&dev->struct_mutex);
+@@ -220,9 +233,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
+ 	INIT_LIST_HEAD(&dev->ctxlist);
+ 	INIT_LIST_HEAD(&dev->vmalist);
+ 	INIT_LIST_HEAD(&dev->maplist);
++	INIT_LIST_HEAD(&dev->vblank_event_list);
+ 
+ 	spin_lock_init(&dev->count_lock);
+ 	spin_lock_init(&dev->drw_lock);
++	spin_lock_init(&dev->event_lock);
+ 	init_timer(&dev->timer);
+ 	mutex_init(&dev->struct_mutex);
+ 	mutex_init(&dev->ctxlist_mutex);
+diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
+new file mode 100644
+index 0000000..6d2abaf
+--- /dev/null
++++ b/drivers/gpu/drm/i2c/Makefile
+@@ -0,0 +1,4 @@
++ccflags-y := -Iinclude/drm
++
++ch7006-y := ch7006_drv.o ch7006_mode.o
++obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
+diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
+new file mode 100644
+index 0000000..81681a0
+--- /dev/null
++++ b/drivers/gpu/drm/i2c/ch7006_drv.c
+@@ -0,0 +1,536 @@
++/*
++ * Copyright (C) 2009 Francisco Jerez.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "ch7006_priv.h"
++
++/* DRM encoder functions */
++
++static void ch7006_encoder_set_config(struct drm_encoder *encoder,
++				      void *params)
++{
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++
++	priv->params = params;
++}
++
++static void ch7006_encoder_destroy(struct drm_encoder *encoder)
++{
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++
++	drm_property_destroy(encoder->dev, priv->scale_property);
++
++	kfree(priv);
++	to_encoder_slave(encoder)->slave_priv = NULL;
++
++	drm_i2c_encoder_destroy(encoder);
++}
++
++static void  ch7006_encoder_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++	struct ch7006_state *state = &priv->state;
++
++	ch7006_dbg(client, "\n");
++
++	if (mode == priv->last_dpms)
++		return;
++	priv->last_dpms = mode;
++
++	ch7006_setup_power_state(encoder);
++
++	ch7006_load_reg(client, state, CH7006_POWER);
++}
++
++static void ch7006_encoder_save(struct drm_encoder *encoder)
++{
++	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++
++	ch7006_dbg(client, "\n");
++
++	ch7006_state_save(client, &priv->saved_state);
++}
++
++static void ch7006_encoder_restore(struct drm_encoder *encoder)
++{
++	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++
++	ch7006_dbg(client, "\n");
++
++	ch7006_state_load(client, &priv->saved_state);
++}
++
++static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
++				      struct drm_display_mode *mode,
++				      struct drm_display_mode *adjusted_mode)
++{
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++
++	/* The ch7006 is painfully picky with the input timings so no
++	 * custom modes for now... */
++
++	priv->mode = ch7006_lookup_mode(encoder, mode);
++
++	return !!priv->mode;
++}
++
++static int ch7006_encoder_mode_valid(struct drm_encoder *encoder,
++				     struct drm_display_mode *mode)
++{
++	if (ch7006_lookup_mode(encoder, mode))
++		return MODE_OK;
++	else
++		return MODE_BAD;
++}
++
++static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
++				     struct drm_display_mode *drm_mode,
++				     struct drm_display_mode *adjusted_mode)
++{
++	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++	struct ch7006_encoder_params *params = priv->params;
++	struct ch7006_state *state = &priv->state;
++	uint8_t *regs = state->regs;
++	struct ch7006_mode *mode = priv->mode;
++	struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
++	int start_active;
++
++	ch7006_dbg(client, "\n");
++
++	regs[CH7006_DISPMODE] = norm->dispmode | mode->dispmode;
++	regs[CH7006_BWIDTH] = 0;
++	regs[CH7006_INPUT_FORMAT] = bitf(CH7006_INPUT_FORMAT_FORMAT,
++					 params->input_format);
++
++	regs[CH7006_CLKMODE] = CH7006_CLKMODE_SUBC_LOCK
++		| bitf(CH7006_CLKMODE_XCM, params->xcm)
++		| bitf(CH7006_CLKMODE_PCM, params->pcm);
++	if (params->clock_mode)
++		regs[CH7006_CLKMODE] |= CH7006_CLKMODE_MASTER;
++	if (params->clock_edge)
++		regs[CH7006_CLKMODE] |= CH7006_CLKMODE_POS_EDGE;
++
++	start_active = (drm_mode->htotal & ~0x7) - (drm_mode->hsync_start & ~0x7);
++	regs[CH7006_POV] = bitf(CH7006_POV_START_ACTIVE_8, start_active);
++	regs[CH7006_START_ACTIVE] = bitf(CH7006_START_ACTIVE_0, start_active);
++
++	regs[CH7006_INPUT_SYNC] = 0;
++	if (params->sync_direction)
++		regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_OUTPUT;
++	if (params->sync_encoding)
++		regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_EMBEDDED;
++	if (drm_mode->flags & DRM_MODE_FLAG_PVSYNC)
++		regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PVSYNC;
++	if (drm_mode->flags & DRM_MODE_FLAG_PHSYNC)
++		regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PHSYNC;
++
++	regs[CH7006_DETECT] = 0;
++	regs[CH7006_BCLKOUT] = 0;
++
++	regs[CH7006_SUBC_INC3] = 0;
++	if (params->pout_level)
++		regs[CH7006_SUBC_INC3] |= CH7006_SUBC_INC3_POUT_3_3V;
++
++	regs[CH7006_SUBC_INC4] = 0;
++	if (params->active_detect)
++		regs[CH7006_SUBC_INC4] |= CH7006_SUBC_INC4_DS_INPUT;
++
++	regs[CH7006_PLL_CONTROL] = priv->saved_state.regs[CH7006_PLL_CONTROL];
++
++	ch7006_setup_levels(encoder);
++	ch7006_setup_subcarrier(encoder);
++	ch7006_setup_pll(encoder);
++	ch7006_setup_power_state(encoder);
++	ch7006_setup_properties(encoder);
++
++	ch7006_state_load(client, state);
++}
++
++static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encoder,
++						       struct drm_connector *connector)
++{
++	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++	struct ch7006_state *state = &priv->state;
++	int det;
++
++	ch7006_dbg(client, "\n");
++
++	ch7006_save_reg(client, state, CH7006_DETECT);
++	ch7006_save_reg(client, state, CH7006_POWER);
++	ch7006_save_reg(client, state, CH7006_CLKMODE);
++
++	ch7006_write(client, CH7006_POWER, CH7006_POWER_RESET |
++					   bitfs(CH7006_POWER_LEVEL, NORMAL));
++	ch7006_write(client, CH7006_CLKMODE, CH7006_CLKMODE_MASTER);
++
++	ch7006_write(client, CH7006_DETECT, CH7006_DETECT_SENSE);
++
++	ch7006_write(client, CH7006_DETECT, 0);
++
++	det = ch7006_read(client, CH7006_DETECT);
++
++	ch7006_load_reg(client, state, CH7006_CLKMODE);
++	ch7006_load_reg(client, state, CH7006_POWER);
++	ch7006_load_reg(client, state, CH7006_DETECT);
++
++	if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
++		    CH7006_DETECT_SVIDEO_C_TEST|
++		    CH7006_DETECT_CVBS_TEST)) == 0)
++		priv->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
++	else if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
++			 CH7006_DETECT_SVIDEO_C_TEST)) == 0)
++		priv->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
++	else if ((det & CH7006_DETECT_CVBS_TEST) == 0)
++		priv->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
++	else
++		priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
++
++	drm_connector_property_set_value(connector,
++			encoder->dev->mode_config.tv_subconnector_property,
++							priv->subconnector);
++
++	return priv->subconnector ? connector_status_connected :
++					connector_status_disconnected;
++}
++
++static int ch7006_encoder_get_modes(struct drm_encoder *encoder,
++				    struct drm_connector *connector)
++{
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++	struct ch7006_mode *mode;
++	int n = 0;
++
++	for (mode = ch7006_modes; mode->mode.clock; mode++) {
++		if (~mode->valid_scales & 1<<priv->scale ||
++		    ~mode->valid_norms & 1<<priv->norm)
++			continue;
++
++		drm_mode_probed_add(connector,
++				drm_mode_duplicate(encoder->dev, &mode->mode));
++
++		n++;
++	}
++
++	return n;
++}
++
++static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
++					   struct drm_connector *connector)
++{
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++	struct drm_device *dev = encoder->dev;
++	struct drm_mode_config *conf = &dev->mode_config;
++
++	drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
++
++	priv->scale_property = drm_property_create(dev, DRM_MODE_PROP_RANGE,
++						   "scale", 2);
++	priv->scale_property->values[0] = 0;
++	priv->scale_property->values[1] = 2;
++
++	drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
++				      priv->select_subconnector);
++	drm_connector_attach_property(connector, conf->tv_subconnector_property,
++				      priv->subconnector);
++	drm_connector_attach_property(connector, conf->tv_left_margin_property,
++				      priv->hmargin);
++	drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
++				      priv->vmargin);
++	drm_connector_attach_property(connector, conf->tv_mode_property,
++				      priv->norm);
++	drm_connector_attach_property(connector, conf->tv_brightness_property,
++				      priv->brightness);
++	drm_connector_attach_property(connector, conf->tv_contrast_property,
++				      priv->contrast);
++	drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
++				      priv->flicker);
++	drm_connector_attach_property(connector, priv->scale_property,
++				      priv->scale);
++
++	return 0;
++}
++
++static int ch7006_encoder_set_property(struct drm_encoder *encoder,
++				       struct drm_connector *connector,
++				       struct drm_property *property,
++				       uint64_t val)
++{
++	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++	struct ch7006_state *state = &priv->state;
++	struct drm_mode_config *conf = &encoder->dev->mode_config;
++	struct drm_crtc *crtc = encoder->crtc;
++	bool modes_changed = false;
++
++	ch7006_dbg(client, "\n");
++
++	if (property == conf->tv_select_subconnector_property) {
++		priv->select_subconnector = val;
++
++		ch7006_setup_power_state(encoder);
++
++		ch7006_load_reg(client, state, CH7006_POWER);
++
++	} else if (property == conf->tv_left_margin_property) {
++		priv->hmargin = val;
++
++		ch7006_setup_properties(encoder);
++
++		ch7006_load_reg(client, state, CH7006_POV);
++		ch7006_load_reg(client, state, CH7006_HPOS);
++
++	} else if (property == conf->tv_bottom_margin_property) {
++		priv->vmargin = val;
++
++		ch7006_setup_properties(encoder);
++
++		ch7006_load_reg(client, state, CH7006_POV);
++		ch7006_load_reg(client, state, CH7006_VPOS);
++
++	} else if (property == conf->tv_mode_property) {
++		if (connector->dpms != DRM_MODE_DPMS_OFF)
++			return -EINVAL;
++
++		priv->norm = val;
++
++		modes_changed = true;
++
++	} else if (property == conf->tv_brightness_property) {
++		priv->brightness = val;
++
++		ch7006_setup_levels(encoder);
++
++		ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
++
++	} else if (property == conf->tv_contrast_property) {
++		priv->contrast = val;
++
++		ch7006_setup_properties(encoder);
++
++		ch7006_load_reg(client, state, CH7006_CONTRAST);
++
++	} else if (property == conf->tv_flicker_reduction_property) {
++		priv->flicker = val;
++
++		ch7006_setup_properties(encoder);
++
++		ch7006_load_reg(client, state, CH7006_FFILTER);
++
++	} else if (property == priv->scale_property) {
++		if (connector->dpms != DRM_MODE_DPMS_OFF)
++			return -EINVAL;
++
++		priv->scale = val;
++
++		modes_changed = true;
++
++	} else {
++		return -EINVAL;
++	}
++
++	if (modes_changed) {
++		drm_helper_probe_single_connector_modes(connector, 0, 0);
++
++		/* Disable the crtc to ensure a full modeset is
++		 * performed whenever it's turned on again. */
++		if (crtc) {
++			struct drm_mode_set modeset = {
++				.crtc = crtc,
++			};
++
++			crtc->funcs->set_config(&modeset);
++		}
++	}
++
++	return 0;
++}
++
++static struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
++	.set_config = ch7006_encoder_set_config,
++	.destroy = ch7006_encoder_destroy,
++	.dpms = ch7006_encoder_dpms,
++	.save = ch7006_encoder_save,
++	.restore = ch7006_encoder_restore,
++	.mode_fixup = ch7006_encoder_mode_fixup,
++	.mode_valid = ch7006_encoder_mode_valid,
++	.mode_set = ch7006_encoder_mode_set,
++	.detect = ch7006_encoder_detect,
++	.get_modes = ch7006_encoder_get_modes,
++	.create_resources = ch7006_encoder_create_resources,
++	.set_property = ch7006_encoder_set_property,
++};
++
++
++/* I2C driver functions */
++
++static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *id)
++{
++	uint8_t addr = CH7006_VERSION_ID;
++	uint8_t val;
++	int ret;
++
++	ch7006_dbg(client, "\n");
++
++	ret = i2c_master_send(client, &addr, sizeof(addr));
++	if (ret < 0)
++		goto fail;
++
++	ret = i2c_master_recv(client, &val, sizeof(val));
++	if (ret < 0)
++		goto fail;
++
++	ch7006_info(client, "Detected version ID: %x\n", val);
++
++	/* I don't know what this is for, but otherwise I get no
++	 * signal.
++	 */
++	ch7006_write(client, 0x3d, 0x0);
++
++	return 0;
++
++fail:
++	ch7006_err(client, "Error %d reading version ID\n", ret);
++
++	return -ENODEV;
++}
++
++static int ch7006_remove(struct i2c_client *client)
++{
++	ch7006_dbg(client, "\n");
++
++	return 0;
++}
++
++static int ch7006_encoder_init(struct i2c_client *client,
++			       struct drm_device *dev,
++			       struct drm_encoder_slave *encoder)
++{
++	struct ch7006_priv *priv;
++	int i;
++
++	ch7006_dbg(client, "\n");
++
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++
++	encoder->slave_priv = priv;
++	encoder->slave_funcs = &ch7006_encoder_funcs;
++
++	priv->norm = TV_NORM_PAL;
++	priv->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
++	priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
++	priv->scale = 1;
++	priv->contrast = 50;
++	priv->brightness = 50;
++	priv->flicker = 50;
++	priv->hmargin = 50;
++	priv->vmargin = 50;
++	priv->last_dpms = -1;
++
++	if (ch7006_tv_norm) {
++		for (i = 0; i < NUM_TV_NORMS; i++) {
++			if (!strcmp(ch7006_tv_norm_names[i], ch7006_tv_norm)) {
++				priv->norm = i;
++				break;
++			}
++		}
++
++		if (i == NUM_TV_NORMS)
++			ch7006_err(client, "Invalid TV norm setting \"%s\".\n",
++				   ch7006_tv_norm);
++	}
++
++	if (ch7006_scale >= 0 && ch7006_scale <= 2)
++		priv->scale = ch7006_scale;
++	else
++		ch7006_err(client, "Invalid scale setting \"%d\".\n",
++			   ch7006_scale);
++
++	return 0;
++}
++
++static struct i2c_device_id ch7006_ids[] = {
++	{ "ch7006", 0 },
++	{ }
++};
++MODULE_DEVICE_TABLE(i2c, ch7006_ids);
++
++static struct drm_i2c_encoder_driver ch7006_driver = {
++	.i2c_driver = {
++		.probe = ch7006_probe,
++		.remove = ch7006_remove,
++
++		.driver = {
++			.name = "ch7006",
++		},
++
++		.id_table = ch7006_ids,
++	},
++
++	.encoder_init = ch7006_encoder_init,
++};
++
++
++/* Module initialization */
++
++static int __init ch7006_init(void)
++{
++	return drm_i2c_encoder_register(THIS_MODULE, &ch7006_driver);
++}
++
++static void __exit ch7006_exit(void)
++{
++	drm_i2c_encoder_unregister(&ch7006_driver);
++}
++
++int ch7006_debug;
++module_param_named(debug, ch7006_debug, int, 0600);
++MODULE_PARM_DESC(debug, "Enable debug output.");
++
++char *ch7006_tv_norm;
++module_param_named(tv_norm, ch7006_tv_norm, charp, 0600);
++MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
++		 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, PAL-60, NTSC-M, NTSC-J.\n"
++		 "\t\tDefault: PAL");
++
++int ch7006_scale = 1;
++module_param_named(scale, ch7006_scale, int, 0600);
++MODULE_PARM_DESC(scale, "Default scale.\n"
++		 "\t\tSupported: 0 -> Select video modes with a higher blanking ratio.\n"
++		 "\t\t\t1 -> Select default video modes.\n"
++		 "\t\t\t2 -> Select video modes with a lower blanking ratio.");
++
++MODULE_AUTHOR("Francisco Jerez <currojerez at riseup.net>");
++MODULE_DESCRIPTION("Chrontel ch7006 TV encoder driver");
++MODULE_LICENSE("GPL and additional rights");
++
++module_init(ch7006_init);
++module_exit(ch7006_exit);
+diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
+new file mode 100644
+index 0000000..e447dfb
+--- /dev/null
++++ b/drivers/gpu/drm/i2c/ch7006_mode.c
+@@ -0,0 +1,468 @@
++/*
++ * Copyright (C) 2009 Francisco Jerez.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "ch7006_priv.h"
++
++char *ch7006_tv_norm_names[] = {
++	[TV_NORM_PAL] = "PAL",
++	[TV_NORM_PAL_M] = "PAL-M",
++	[TV_NORM_PAL_N] = "PAL-N",
++	[TV_NORM_PAL_NC] = "PAL-Nc",
++	[TV_NORM_PAL_60] = "PAL-60",
++	[TV_NORM_NTSC_M] = "NTSC-M",
++	[TV_NORM_NTSC_J] = "NTSC-J",
++};
++
++#define NTSC_LIKE_TIMINGS .vrefresh = 60 * fixed1/1.001,		\
++		.vdisplay = 480,					\
++		.vtotal = 525,						\
++		.hvirtual = 660
++
++#define PAL_LIKE_TIMINGS .vrefresh = 50 * fixed1,		\
++		.vdisplay = 576,				\
++		.vtotal = 625,					\
++		.hvirtual = 810
++
++struct ch7006_tv_norm_info ch7006_tv_norms[] = {
++	[TV_NORM_NTSC_M] = {
++		NTSC_LIKE_TIMINGS,
++		.black_level = 0.339 * fixed1,
++		.subc_freq = 3579545 * fixed1,
++		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC),
++		.voffset = 0,
++	},
++	[TV_NORM_NTSC_J] = {
++		NTSC_LIKE_TIMINGS,
++		.black_level = 0.286 * fixed1,
++		.subc_freq = 3579545 * fixed1,
++		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC_J),
++		.voffset = 0,
++	},
++	[TV_NORM_PAL] = {
++		PAL_LIKE_TIMINGS,
++		.black_level = 0.3 * fixed1,
++		.subc_freq = 4433618.75 * fixed1,
++		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
++		.voffset = 0,
++	},
++	[TV_NORM_PAL_M] = {
++		NTSC_LIKE_TIMINGS,
++		.black_level = 0.339 * fixed1,
++		.subc_freq = 3575611.433 * fixed1,
++		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
++		.voffset = 16,
++	},
++
++	/* The following modes seem to work right but they're
++	 * undocumented */
++
++	[TV_NORM_PAL_N] = {
++		PAL_LIKE_TIMINGS,
++		.black_level = 0.339 * fixed1,
++		.subc_freq = 4433618.75 * fixed1,
++		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
++		.voffset = 0,
++	},
++	[TV_NORM_PAL_NC] = {
++		PAL_LIKE_TIMINGS,
++		.black_level = 0.3 * fixed1,
++		.subc_freq = 3582056.25 * fixed1,
++		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
++		.voffset = 0,
++	},
++	[TV_NORM_PAL_60] = {
++		NTSC_LIKE_TIMINGS,
++		.black_level = 0.3 * fixed1,
++		.subc_freq = 4433618.75 * fixed1,
++		.dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
++		.voffset = 16,
++	},
++};
++
++#define __MODE(f, hd, vd, ht, vt, hsynp, vsynp,				\
++	       subc, scale, scale_mask, norm_mask, e_hd, e_vd) {	\
++		.mode = {						\
++			.name = #hd "x" #vd,				\
++			.status = 0,					\
++			.type = DRM_MODE_TYPE_DRIVER,			\
++			.clock = f,					\
++			.hdisplay = hd,					\
++			.hsync_start = e_hd + 16,			\
++			.hsync_end = e_hd + 80,				\
++			.htotal = ht,					\
++			.hskew = 0,					\
++			.vdisplay = vd,					\
++			.vsync_start = vd + 10,				\
++			.vsync_end = vd + 26,				\
++			.vtotal = vt,					\
++			.vscan = 0,					\
++			.flags = DRM_MODE_FLAG_##hsynp##HSYNC |		\
++				DRM_MODE_FLAG_##vsynp##VSYNC,		\
++			.vrefresh = 0,					\
++		},							\
++		.enc_hdisp = e_hd,					\
++		.enc_vdisp = e_vd,					\
++		.subc_coeff = subc * fixed1,				\
++		.dispmode = bitfs(CH7006_DISPMODE_SCALING_RATIO, scale) | \
++			    bitfs(CH7006_DISPMODE_INPUT_RES, e_hd##x##e_vd), \
++		.valid_scales = scale_mask,				\
++		.valid_norms = norm_mask				\
++	 }
++
++#define MODE(f, hd, vd, ht, vt, hsynp, vsynp,				\
++	     subc, scale, scale_mask, norm_mask)			\
++	__MODE(f, hd, vd, ht, vt, hsynp, vsynp, subc, scale,		\
++	       scale_mask, norm_mask, hd, vd)
++
++#define NTSC_LIKE (1 << TV_NORM_NTSC_M | 1 << TV_NORM_NTSC_J |		\
++		   1 << TV_NORM_PAL_M | 1 << TV_NORM_PAL_60)
++
++#define PAL_LIKE (1 << TV_NORM_PAL | 1 << TV_NORM_PAL_N | 1 << TV_NORM_PAL_NC)
++
++struct ch7006_mode ch7006_modes[] = {
++	MODE(21000, 512, 384, 840, 500, N, N, 181.797557582, 5_4, 0x6, PAL_LIKE),
++	MODE(26250, 512, 384, 840, 625, N, N, 145.438046066, 1_1, 0x1, PAL_LIKE),
++	MODE(20140, 512, 384, 800, 420, N, N, 213.257083791, 5_4, 0x4, NTSC_LIKE),
++	MODE(24671, 512, 384, 784, 525, N, N, 174.0874153, 1_1, 0x3, NTSC_LIKE),
++	MODE(28125, 720, 400, 1125, 500, N, N, 135.742176298, 5_4, 0x6, PAL_LIKE),
++	MODE(34875, 720, 400, 1116, 625, N, N, 109.469496898, 1_1, 0x1, PAL_LIKE),
++	MODE(23790, 720, 400, 945, 420, N, N, 160.475642016, 5_4, 0x4, NTSC_LIKE),
++	MODE(29455, 720, 400, 936, 525, N, N, 129.614941843, 1_1, 0x3, NTSC_LIKE),
++	MODE(25000, 640, 400, 1000, 500, N, N, 152.709948279, 5_4, 0x6, PAL_LIKE),
++	MODE(31500, 640, 400, 1008, 625, N, N, 121.198371646, 1_1, 0x1, PAL_LIKE),
++	MODE(21147, 640, 400, 840, 420, N, N, 180.535097338, 5_4, 0x4, NTSC_LIKE),
++	MODE(26434, 640, 400, 840, 525, N, N, 144.42807787, 1_1, 0x2, NTSC_LIKE),
++	MODE(30210, 640, 400, 840, 600, N, N, 126.374568276, 7_8, 0x1, NTSC_LIKE),
++	MODE(21000, 640, 480, 840, 500, N, N, 181.797557582, 5_4, 0x4, PAL_LIKE),
++	MODE(26250, 640, 480, 840, 625, N, N, 145.438046066, 1_1, 0x2, PAL_LIKE),
++	MODE(31500, 640, 480, 840, 750, N, N, 121.198371646, 5_6, 0x1, PAL_LIKE),
++	MODE(24671, 640, 480, 784, 525, N, N, 174.0874153, 1_1, 0x4, NTSC_LIKE),
++	MODE(28196, 640, 480, 784, 600, N, N, 152.326488422, 7_8, 0x2, NTSC_LIKE),
++	MODE(30210, 640, 480, 800, 630, N, N, 142.171389101, 5_6, 0x1, NTSC_LIKE),
++	__MODE(29500, 720, 576, 944, 625, P, P, 145.592111636, 1_1, 0x7, PAL_LIKE, 800, 600),
++	MODE(36000, 800, 600, 960, 750, P, P, 119.304647022, 5_6, 0x6, PAL_LIKE),
++	MODE(39000, 800, 600, 936, 836, P, P, 110.127366499, 3_4, 0x1, PAL_LIKE),
++	MODE(39273, 800, 600, 1040, 630, P, P, 145.816809399, 5_6, 0x4, NTSC_LIKE),
++	MODE(43636, 800, 600, 1040, 700, P, P, 131.235128487, 3_4, 0x2, NTSC_LIKE),
++	MODE(47832, 800, 600, 1064, 750, P, P, 119.723275165, 7_10, 0x1, NTSC_LIKE),
++	{}
++};
++
++struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
++				       struct drm_display_mode *drm_mode)
++{
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++	struct ch7006_mode *mode;
++
++	for (mode = ch7006_modes; mode->mode.clock; mode++) {
++
++		if (~mode->valid_norms & 1<<priv->norm)
++			continue;
++
++		if (mode->mode.hdisplay != drm_mode->hdisplay ||
++		    mode->mode.vdisplay != drm_mode->vdisplay ||
++		    mode->mode.vtotal != drm_mode->vtotal ||
++		    mode->mode.htotal != drm_mode->htotal ||
++		    mode->mode.clock != drm_mode->clock)
++			continue;
++
++		return mode;
++	}
++
++	return NULL;
++}
++
++/* Some common HW state calculation code */
++
++void ch7006_setup_levels(struct drm_encoder *encoder)
++{
++	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++	uint8_t *regs = priv->state.regs;
++	struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
++	int gain;
++	int black_level;
++
++	/* Set DAC_GAIN if the voltage drop between white and black is
++	 * high enough. */
++	if (norm->black_level < 339*fixed1/1000) {
++		gain = 76;
++
++		regs[CH7006_INPUT_FORMAT] |= CH7006_INPUT_FORMAT_DAC_GAIN;
++	} else {
++		gain = 71;
++
++		regs[CH7006_INPUT_FORMAT] &= ~CH7006_INPUT_FORMAT_DAC_GAIN;
++	}
++
++	black_level = round_fixed(norm->black_level*26625)/gain;
++
++	/* Correct it with the specified brightness. */
++	black_level = interpolate(90, black_level, 208, priv->brightness);
++
++	regs[CH7006_BLACK_LEVEL] = bitf(CH7006_BLACK_LEVEL_0, black_level);
++
++	ch7006_dbg(client, "black level: %d\n", black_level);
++}
++
++void ch7006_setup_subcarrier(struct drm_encoder *encoder)
++{
++	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++	struct ch7006_state *state = &priv->state;
++	struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
++	struct ch7006_mode *mode = priv->mode;
++	uint32_t subc_inc;
++
++	subc_inc = round_fixed((mode->subc_coeff >> 8)
++			       * (norm->subc_freq >> 24));
++
++	setbitf(state, CH7006_SUBC_INC0, 28, subc_inc);
++	setbitf(state, CH7006_SUBC_INC1, 24, subc_inc);
++	setbitf(state, CH7006_SUBC_INC2, 20, subc_inc);
++	setbitf(state, CH7006_SUBC_INC3, 16, subc_inc);
++	setbitf(state, CH7006_SUBC_INC4, 12, subc_inc);
++	setbitf(state, CH7006_SUBC_INC5, 8, subc_inc);
++	setbitf(state, CH7006_SUBC_INC6, 4, subc_inc);
++	setbitf(state, CH7006_SUBC_INC7, 0, subc_inc);
++
++	ch7006_dbg(client, "subcarrier inc: %u\n", subc_inc);
++}
++
++void ch7006_setup_pll(struct drm_encoder *encoder)
++{
++	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++	uint8_t *regs = priv->state.regs;
++	struct ch7006_mode *mode = priv->mode;
++	int n, best_n = 0;
++	int m, best_m = 0;
++	int freq, best_freq = 0;
++
++	for (n = 0; n < CH7006_MAXN; n++) {
++		for (m = 0; m < CH7006_MAXM; m++) {
++			freq = CH7006_FREQ0*(n+2)/(m+2);
++
++			if (abs(freq - mode->mode.clock) <
++			    abs(best_freq - mode->mode.clock)) {
++				best_freq = freq;
++				best_n = n;
++				best_m = m;
++			}
++		}
++	}
++
++	regs[CH7006_PLLOV] = bitf(CH7006_PLLOV_N_8, best_n) |
++		bitf(CH7006_PLLOV_M_8, best_m);
++
++	regs[CH7006_PLLM] = bitf(CH7006_PLLM_0, best_m);
++	regs[CH7006_PLLN] = bitf(CH7006_PLLN_0, best_n);
++
++	if (best_n < 108)
++		regs[CH7006_PLL_CONTROL] |= CH7006_PLL_CONTROL_CAPACITOR;
++	else
++		regs[CH7006_PLL_CONTROL] &= ~CH7006_PLL_CONTROL_CAPACITOR;
++
++	ch7006_dbg(client, "n=%d m=%d f=%d c=%d\n",
++		   best_n, best_m, best_freq, best_n < 108);
++}
++
++void ch7006_setup_power_state(struct drm_encoder *encoder)
++{
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++	uint8_t *power = &priv->state.regs[CH7006_POWER];
++	int subconnector;
++
++	subconnector = priv->select_subconnector ? priv->select_subconnector :
++							priv->subconnector;
++
++	*power = CH7006_POWER_RESET;
++
++	if (priv->last_dpms == DRM_MODE_DPMS_ON) {
++		switch (subconnector) {
++		case DRM_MODE_SUBCONNECTOR_SVIDEO:
++			*power |= bitfs(CH7006_POWER_LEVEL, CVBS_OFF);
++			break;
++		case DRM_MODE_SUBCONNECTOR_Composite:
++			*power |= bitfs(CH7006_POWER_LEVEL, SVIDEO_OFF);
++			break;
++		case DRM_MODE_SUBCONNECTOR_SCART:
++			*power |= bitfs(CH7006_POWER_LEVEL, NORMAL) |
++				CH7006_POWER_SCART;
++			break;
++		}
++
++	} else {
++		*power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
++	}
++}
++
++void ch7006_setup_properties(struct drm_encoder *encoder)
++{
++	struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
++	struct ch7006_priv *priv = to_ch7006_priv(encoder);
++	struct ch7006_state *state = &priv->state;
++	struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
++	struct ch7006_mode *ch_mode = priv->mode;
++	struct drm_display_mode *mode = &ch_mode->mode;
++	uint8_t *regs = state->regs;
++	int flicker, contrast, hpos, vpos;
++	uint64_t scale, aspect;
++
++	flicker = interpolate(0, 2, 3, priv->flicker);
++	regs[CH7006_FFILTER] = bitf(CH7006_FFILTER_TEXT, flicker) |
++		bitf(CH7006_FFILTER_LUMA, flicker) |
++		bitf(CH7006_FFILTER_CHROMA, 1);
++
++	contrast = interpolate(0, 5, 7, priv->contrast);
++	regs[CH7006_CONTRAST] = bitf(CH7006_CONTRAST_0, contrast);
++
++	scale = norm->vtotal*fixed1;
++	do_div(scale, mode->vtotal);
++
++	aspect = ch_mode->enc_hdisp*fixed1;
++	do_div(aspect, ch_mode->enc_vdisp);
++
++	hpos = round_fixed((norm->hvirtual * aspect - mode->hdisplay * scale)
++			   * priv->hmargin * mode->vtotal) / norm->vtotal / 100 / 4;
++
++	setbitf(state, CH7006_POV, HPOS_8, hpos);
++	setbitf(state, CH7006_HPOS, 0, hpos);
++
++	vpos = max(0, norm->vdisplay - round_fixed(mode->vdisplay*scale)
++		   + norm->voffset) * priv->vmargin / 100 / 2;
++
++	setbitf(state, CH7006_POV, VPOS_8, vpos);
++	setbitf(state, CH7006_VPOS, 0, vpos);
++
++	ch7006_dbg(client, "hpos: %d, vpos: %d\n", hpos, vpos);
++}
++
++/* HW access functions */
++
++void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val)
++{
++	uint8_t buf[] = {addr, val};
++	int ret;
++
++	ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
++	if (ret < 0)
++		ch7006_err(client, "Error %d writing to subaddress 0x%x\n",
++			   ret, addr);
++}
++
++uint8_t ch7006_read(struct i2c_client *client, uint8_t addr)
++{
++	uint8_t val;
++	int ret;
++
++	ret = i2c_master_send(client, &addr, sizeof(addr));
++	if (ret < 0)
++		goto fail;
++
++	ret = i2c_master_recv(client, &val, sizeof(val));
++	if (ret < 0)
++		goto fail;
++
++	return val;
++
++fail:
++	ch7006_err(client, "Error %d reading from subaddress 0x%x\n",
++		   ret, addr);
++	return 0;
++}
++
++void ch7006_state_load(struct i2c_client *client,
++		       struct ch7006_state *state)
++{
++	ch7006_load_reg(client, state, CH7006_POWER);
++
++	ch7006_load_reg(client, state, CH7006_DISPMODE);
++	ch7006_load_reg(client, state, CH7006_FFILTER);
++	ch7006_load_reg(client, state, CH7006_BWIDTH);
++	ch7006_load_reg(client, state, CH7006_INPUT_FORMAT);
++	ch7006_load_reg(client, state, CH7006_CLKMODE);
++	ch7006_load_reg(client, state, CH7006_START_ACTIVE);
++	ch7006_load_reg(client, state, CH7006_POV);
++	ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
++	ch7006_load_reg(client, state, CH7006_HPOS);
++	ch7006_load_reg(client, state, CH7006_VPOS);
++	ch7006_load_reg(client, state, CH7006_INPUT_SYNC);
++	ch7006_load_reg(client, state, CH7006_DETECT);
++	ch7006_load_reg(client, state, CH7006_CONTRAST);
++	ch7006_load_reg(client, state, CH7006_PLLOV);
++	ch7006_load_reg(client, state, CH7006_PLLM);
++	ch7006_load_reg(client, state, CH7006_PLLN);
++	ch7006_load_reg(client, state, CH7006_BCLKOUT);
++	ch7006_load_reg(client, state, CH7006_SUBC_INC0);
++	ch7006_load_reg(client, state, CH7006_SUBC_INC1);
++	ch7006_load_reg(client, state, CH7006_SUBC_INC2);
++	ch7006_load_reg(client, state, CH7006_SUBC_INC3);
++	ch7006_load_reg(client, state, CH7006_SUBC_INC4);
++	ch7006_load_reg(client, state, CH7006_SUBC_INC5);
++	ch7006_load_reg(client, state, CH7006_SUBC_INC6);
++	ch7006_load_reg(client, state, CH7006_SUBC_INC7);
++	ch7006_load_reg(client, state, CH7006_PLL_CONTROL);
++	ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0);
++}
++
++void ch7006_state_save(struct i2c_client *client,
++		       struct ch7006_state *state)
++{
++	ch7006_save_reg(client, state, CH7006_POWER);
++
++	ch7006_save_reg(client, state, CH7006_DISPMODE);
++	ch7006_save_reg(client, state, CH7006_FFILTER);
++	ch7006_save_reg(client, state, CH7006_BWIDTH);
++	ch7006_save_reg(client, state, CH7006_INPUT_FORMAT);
++	ch7006_save_reg(client, state, CH7006_CLKMODE);
++	ch7006_save_reg(client, state, CH7006_START_ACTIVE);
++	ch7006_save_reg(client, state, CH7006_POV);
++	ch7006_save_reg(client, state, CH7006_BLACK_LEVEL);
++	ch7006_save_reg(client, state, CH7006_HPOS);
++	ch7006_save_reg(client, state, CH7006_VPOS);
++	ch7006_save_reg(client, state, CH7006_INPUT_SYNC);
++	ch7006_save_reg(client, state, CH7006_DETECT);
++	ch7006_save_reg(client, state, CH7006_CONTRAST);
++	ch7006_save_reg(client, state, CH7006_PLLOV);
++	ch7006_save_reg(client, state, CH7006_PLLM);
++	ch7006_save_reg(client, state, CH7006_PLLN);
++	ch7006_save_reg(client, state, CH7006_BCLKOUT);
++	ch7006_save_reg(client, state, CH7006_SUBC_INC0);
++	ch7006_save_reg(client, state, CH7006_SUBC_INC1);
++	ch7006_save_reg(client, state, CH7006_SUBC_INC2);
++	ch7006_save_reg(client, state, CH7006_SUBC_INC3);
++	ch7006_save_reg(client, state, CH7006_SUBC_INC4);
++	ch7006_save_reg(client, state, CH7006_SUBC_INC5);
++	ch7006_save_reg(client, state, CH7006_SUBC_INC6);
++	ch7006_save_reg(client, state, CH7006_SUBC_INC7);
++	ch7006_save_reg(client, state, CH7006_PLL_CONTROL);
++	ch7006_save_reg(client, state, CH7006_CALC_SUBC_INC0);
++
++	state->regs[CH7006_FFILTER] = (state->regs[CH7006_FFILTER] & 0xf0) |
++		(state->regs[CH7006_FFILTER] & 0x0c) >> 2 |
++		(state->regs[CH7006_FFILTER] & 0x03) << 2;
++}
+diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
+new file mode 100644
+index 0000000..b06d3d9
+--- /dev/null
++++ b/drivers/gpu/drm/i2c/ch7006_priv.h
+@@ -0,0 +1,344 @@
++/*
++ * Copyright (C) 2009 Francisco Jerez.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __DRM_I2C_CH7006_PRIV_H__
++#define __DRM_I2C_CH7006_PRIV_H__
++
++#include "drmP.h"
++#include "drm_crtc_helper.h"
++#include "drm_encoder_slave.h"
++#include "i2c/ch7006.h"
++
++typedef int64_t fixed;
++#define fixed1 (1LL << 32)
++
++enum ch7006_tv_norm {
++	TV_NORM_PAL,
++	TV_NORM_PAL_M,
++	TV_NORM_PAL_N,
++	TV_NORM_PAL_NC,
++	TV_NORM_PAL_60,
++	TV_NORM_NTSC_M,
++	TV_NORM_NTSC_J,
++	NUM_TV_NORMS
++};
++
++struct ch7006_tv_norm_info {
++	fixed vrefresh;
++	int vdisplay;
++	int vtotal;
++	int hvirtual;
++
++	fixed subc_freq;
++	fixed black_level;
++
++	uint32_t dispmode;
++	int voffset;
++};
++
++struct ch7006_mode {
++	struct drm_display_mode mode;
++
++	int enc_hdisp;
++	int enc_vdisp;
++
++	fixed subc_coeff;
++	uint32_t dispmode;
++
++	uint32_t valid_scales;
++	uint32_t valid_norms;
++};
++
++struct ch7006_state {
++	uint8_t regs[0x26];
++};
++
++struct ch7006_priv {
++	struct ch7006_encoder_params *params;
++	struct ch7006_mode *mode;
++
++	struct ch7006_state state;
++	struct ch7006_state saved_state;
++
++	struct drm_property *scale_property;
++
++	int select_subconnector;
++	int subconnector;
++	int hmargin;
++	int vmargin;
++	enum ch7006_tv_norm norm;
++	int brightness;
++	int contrast;
++	int flicker;
++	int scale;
++
++	int last_dpms;
++};
++
++#define to_ch7006_priv(x) \
++	((struct ch7006_priv *)to_encoder_slave(x)->slave_priv)
++
++extern int ch7006_debug;
++extern char *ch7006_tv_norm;
++extern int ch7006_scale;
++
++extern char *ch7006_tv_norm_names[];
++extern struct ch7006_tv_norm_info ch7006_tv_norms[];
++extern struct ch7006_mode ch7006_modes[];
++
++struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
++				       struct drm_display_mode *drm_mode);
++
++void ch7006_setup_levels(struct drm_encoder *encoder);
++void ch7006_setup_subcarrier(struct drm_encoder *encoder);
++void ch7006_setup_pll(struct drm_encoder *encoder);
++void ch7006_setup_power_state(struct drm_encoder *encoder);
++void ch7006_setup_properties(struct drm_encoder *encoder);
++
++void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val);
++uint8_t ch7006_read(struct i2c_client *client, uint8_t addr);
++
++void ch7006_state_load(struct i2c_client *client,
++		       struct ch7006_state *state);
++void ch7006_state_save(struct i2c_client *client,
++		       struct ch7006_state *state);
++
++/* Some helper macros */
++
++#define ch7006_dbg(client, format, ...) do {				\
++		if (ch7006_debug)					\
++			dev_printk(KERN_DEBUG, &client->dev,		\
++				   "%s: " format, __func__, ## __VA_ARGS__); \
++	} while (0)
++#define ch7006_info(client, format, ...) \
++				dev_info(&client->dev, format, __VA_ARGS__)
++#define ch7006_err(client, format, ...) \
++				dev_err(&client->dev, format, __VA_ARGS__)
++
++#define __mask(src, bitfield) \
++		(((2 << (1 ? bitfield)) - 1) & ~((1 << (0 ? bitfield)) - 1))
++#define mask(bitfield) __mask(bitfield)
++
++#define __bitf(src, bitfield, x) \
++		(((x) >> (src) << (0 ? bitfield)) &  __mask(src, bitfield))
++#define bitf(bitfield, x) __bitf(bitfield, x)
++#define bitfs(bitfield, s) __bitf(bitfield, bitfield##_##s)
++#define setbitf(state, reg, bitfield, x)				\
++	state->regs[reg] = (state->regs[reg] & ~mask(reg##_##bitfield))	\
++		| bitf(reg##_##bitfield, x)
++
++#define __unbitf(src, bitfield, x) \
++		((x & __mask(src, bitfield)) >> (0 ? bitfield) << (src))
++#define unbitf(bitfield, x) __unbitf(bitfield, x)
++
++static inline int interpolate(int y0, int y1, int y2, int x)
++{
++	return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
++}
++
++static inline int32_t round_fixed(fixed x)
++{
++	return (x + fixed1/2) >> 32;
++}
++
++#define ch7006_load_reg(client, state, reg) ch7006_write(client, reg, state->regs[reg])
++#define ch7006_save_reg(client, state, reg) state->regs[reg] = ch7006_read(client, reg)
++
++/* Fixed hardware specs */
++
++#define CH7006_FREQ0				14318
++#define CH7006_MAXN				650
++#define CH7006_MAXM				315
++
++/* Register definitions */
++
++#define CH7006_DISPMODE				0x00
++#define CH7006_DISPMODE_INPUT_RES		0, 7:5
++#define CH7006_DISPMODE_INPUT_RES_512x384	0x0
++#define CH7006_DISPMODE_INPUT_RES_720x400	0x1
++#define CH7006_DISPMODE_INPUT_RES_640x400	0x2
++#define CH7006_DISPMODE_INPUT_RES_640x480	0x3
++#define CH7006_DISPMODE_INPUT_RES_800x600	0x4
++#define CH7006_DISPMODE_INPUT_RES_NATIVE	0x5
++#define CH7006_DISPMODE_OUTPUT_STD		0, 4:3
++#define CH7006_DISPMODE_OUTPUT_STD_PAL		0x0
++#define CH7006_DISPMODE_OUTPUT_STD_NTSC		0x1
++#define CH7006_DISPMODE_OUTPUT_STD_PAL_M	0x2
++#define CH7006_DISPMODE_OUTPUT_STD_NTSC_J	0x3
++#define CH7006_DISPMODE_SCALING_RATIO		0, 2:0
++#define CH7006_DISPMODE_SCALING_RATIO_5_4	0x0
++#define CH7006_DISPMODE_SCALING_RATIO_1_1	0x1
++#define CH7006_DISPMODE_SCALING_RATIO_7_8	0x2
++#define CH7006_DISPMODE_SCALING_RATIO_5_6	0x3
++#define CH7006_DISPMODE_SCALING_RATIO_3_4	0x4
++#define CH7006_DISPMODE_SCALING_RATIO_7_10	0x5
++
++#define CH7006_FFILTER				0x01
++#define CH7006_FFILTER_TEXT			0, 5:4
++#define CH7006_FFILTER_LUMA			0, 3:2
++#define CH7006_FFILTER_CHROMA			0, 1:0
++#define CH7006_FFILTER_CHROMA_NO_DCRAWL		0x3
++
++#define CH7006_BWIDTH				0x03
++#define CH7006_BWIDTH_5L_FFILER			(1 << 7)
++#define CH7006_BWIDTH_CVBS_NO_CHROMA		(1 << 6)
++#define CH7006_BWIDTH_CHROMA			0, 5:4
++#define CH7006_BWIDTH_SVIDEO_YPEAK		(1 << 3)
++#define CH7006_BWIDTH_SVIDEO_LUMA		0, 2:1
++#define CH7006_BWIDTH_CVBS_LUMA			0, 0:0
++
++#define CH7006_INPUT_FORMAT			0x04
++#define CH7006_INPUT_FORMAT_DAC_GAIN		(1 << 6)
++#define CH7006_INPUT_FORMAT_RGB_PASS_THROUGH	(1 << 5)
++#define CH7006_INPUT_FORMAT_FORMAT		0, 3:0
++#define CH7006_INPUT_FORMAT_FORMAT_RGB16	0x0
++#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m16	0x1
++#define CH7006_INPUT_FORMAT_FORMAT_RGB24m16	0x2
++#define CH7006_INPUT_FORMAT_FORMAT_RGB15	0x3
++#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12C	0x4
++#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12I	0x5
++#define CH7006_INPUT_FORMAT_FORMAT_RGB24m8	0x6
++#define CH7006_INPUT_FORMAT_FORMAT_RGB16m8	0x7
++#define CH7006_INPUT_FORMAT_FORMAT_RGB15m8	0x8
++#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m8	0x9
++
++#define CH7006_CLKMODE				0x06
++#define CH7006_CLKMODE_SUBC_LOCK		(1 << 7)
++#define CH7006_CLKMODE_MASTER			(1 << 6)
++#define CH7006_CLKMODE_POS_EDGE			(1 << 4)
++#define CH7006_CLKMODE_XCM			0, 3:2
++#define CH7006_CLKMODE_PCM			0, 1:0
++
++#define CH7006_START_ACTIVE			0x07
++#define CH7006_START_ACTIVE_0			0, 7:0
++
++#define CH7006_POV				0x08
++#define CH7006_POV_START_ACTIVE_8		8, 2:2
++#define CH7006_POV_HPOS_8			8, 1:1
++#define CH7006_POV_VPOS_8			8, 0:0
++
++#define CH7006_BLACK_LEVEL			0x09
++#define CH7006_BLACK_LEVEL_0			0, 7:0
++
++#define CH7006_HPOS				0x0a
++#define CH7006_HPOS_0				0, 7:0
++
++#define CH7006_VPOS				0x0b
++#define CH7006_VPOS_0				0, 7:0
++
++#define CH7006_INPUT_SYNC			0x0d
++#define CH7006_INPUT_SYNC_EMBEDDED		(1 << 3)
++#define CH7006_INPUT_SYNC_OUTPUT		(1 << 2)
++#define CH7006_INPUT_SYNC_PVSYNC		(1 << 1)
++#define CH7006_INPUT_SYNC_PHSYNC		(1 << 0)
++
++#define CH7006_POWER				0x0e
++#define CH7006_POWER_SCART			(1 << 4)
++#define CH7006_POWER_RESET			(1 << 3)
++#define CH7006_POWER_LEVEL			0, 2:0
++#define CH7006_POWER_LEVEL_CVBS_OFF		0x0
++#define CH7006_POWER_LEVEL_POWER_OFF		0x1
++#define CH7006_POWER_LEVEL_SVIDEO_OFF		0x2
++#define CH7006_POWER_LEVEL_NORMAL		0x3
++#define CH7006_POWER_LEVEL_FULL_POWER_OFF	0x4
++
++#define CH7006_DETECT				0x10
++#define CH7006_DETECT_SVIDEO_Y_TEST		(1 << 3)
++#define CH7006_DETECT_SVIDEO_C_TEST		(1 << 2)
++#define CH7006_DETECT_CVBS_TEST			(1 << 1)
++#define CH7006_DETECT_SENSE			(1 << 0)
++
++#define CH7006_CONTRAST				0x11
++#define CH7006_CONTRAST_0			0, 2:0
++
++#define CH7006_PLLOV	 			0x13
++#define CH7006_PLLOV_N_8	 		8, 2:1
++#define CH7006_PLLOV_M_8	 		8, 0:0
++
++#define CH7006_PLLM	 			0x14
++#define CH7006_PLLM_0	 			0, 7:0
++
++#define CH7006_PLLN	 			0x15
++#define CH7006_PLLN_0	 			0, 7:0
++
++#define CH7006_BCLKOUT	 			0x17
++
++#define CH7006_SUBC_INC0			0x18
++#define CH7006_SUBC_INC0_28			28, 3:0
++
++#define CH7006_SUBC_INC1			0x19
++#define CH7006_SUBC_INC1_24			24, 3:0
++
++#define CH7006_SUBC_INC2			0x1a
++#define CH7006_SUBC_INC2_20			20, 3:0
++
++#define CH7006_SUBC_INC3			0x1b
++#define CH7006_SUBC_INC3_GPIO1_VAL		(1 << 7)
++#define CH7006_SUBC_INC3_GPIO0_VAL		(1 << 6)
++#define CH7006_SUBC_INC3_POUT_3_3V		(1 << 5)
++#define CH7006_SUBC_INC3_POUT_INV		(1 << 4)
++#define CH7006_SUBC_INC3_16			16, 3:0
++
++#define CH7006_SUBC_INC4			0x1c
++#define CH7006_SUBC_INC4_GPIO1_IN		(1 << 7)
++#define CH7006_SUBC_INC4_GPIO0_IN		(1 << 6)
++#define CH7006_SUBC_INC4_DS_INPUT		(1 << 4)
++#define CH7006_SUBC_INC4_12			12, 3:0
++
++#define CH7006_SUBC_INC5			0x1d
++#define CH7006_SUBC_INC5_8			8, 3:0
++
++#define CH7006_SUBC_INC6			0x1e
++#define CH7006_SUBC_INC6_4			4, 3:0
++
++#define CH7006_SUBC_INC7			0x1f
++#define CH7006_SUBC_INC7_0			0, 3:0
++
++#define CH7006_PLL_CONTROL			0x20
++#define CH7006_PLL_CONTROL_CPI			(1 << 5)
++#define CH7006_PLL_CONTROL_CAPACITOR		(1 << 4)
++#define CH7006_PLL_CONTROL_7STAGES		(1 << 3)
++#define CH7006_PLL_CONTROL_DIGITAL_5V		(1 << 2)
++#define CH7006_PLL_CONTROL_ANALOG_5V		(1 << 1)
++#define CH7006_PLL_CONTROL_MEMORY_5V		(1 << 0)
++
++#define CH7006_CALC_SUBC_INC0			0x21
++#define CH7006_CALC_SUBC_INC0_24		24, 4:3
++#define CH7006_CALC_SUBC_INC0_HYST		0, 2:1
++#define CH7006_CALC_SUBC_INC0_AUTO		(1 << 0)
++
++#define CH7006_CALC_SUBC_INC1			0x22
++#define CH7006_CALC_SUBC_INC1_16		16, 7:0
++
++#define CH7006_CALC_SUBC_INC2			0x23
++#define CH7006_CALC_SUBC_INC2_8			8, 7:0
++
++#define CH7006_CALC_SUBC_INC3			0x24
++#define CH7006_CALC_SUBC_INC3_0			0, 7:0
++
++#define CH7006_VERSION_ID			0x25
++
++#endif
+diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
+index 7d1d88c..de32d22 100644
+--- a/drivers/gpu/drm/i810/i810_dma.c
++++ b/drivers/gpu/drm/i810/i810_dma.c
+@@ -115,7 +115,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
+ static const struct file_operations i810_buffer_fops = {
+ 	.open = drm_open,
+ 	.release = drm_release,
+-	.ioctl = drm_ioctl,
++	.unlocked_ioctl = drm_ioctl,
+ 	.mmap = i810_mmap_buffers,
+ 	.fasync = drm_fasync,
+ };
+diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
+index fabb9a8..c1e0275 100644
+--- a/drivers/gpu/drm/i810/i810_drv.c
++++ b/drivers/gpu/drm/i810/i810_drv.c
+@@ -59,7 +59,7 @@ static struct drm_driver driver = {
+ 		 .owner = THIS_MODULE,
+ 		 .open = drm_open,
+ 		 .release = drm_release,
+-		 .ioctl = drm_ioctl,
++		 .unlocked_ioctl = drm_ioctl,
+ 		 .mmap = drm_mmap,
+ 		 .poll = drm_poll,
+ 		 .fasync = drm_fasync,
+diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
+index 877bf6c..06bd732 100644
+--- a/drivers/gpu/drm/i830/i830_dma.c
++++ b/drivers/gpu/drm/i830/i830_dma.c
+@@ -117,7 +117,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
+ static const struct file_operations i830_buffer_fops = {
+ 	.open = drm_open,
+ 	.release = drm_release,
+-	.ioctl = drm_ioctl,
++	.unlocked_ioctl = drm_ioctl,
+ 	.mmap = i830_mmap_buffers,
+ 	.fasync = drm_fasync,
+ };
+diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
+index 389597e..44f990b 100644
+--- a/drivers/gpu/drm/i830/i830_drv.c
++++ b/drivers/gpu/drm/i830/i830_drv.c
+@@ -70,7 +70,7 @@ static struct drm_driver driver = {
+ 		 .owner = THIS_MODULE,
+ 		 .open = drm_open,
+ 		 .release = drm_release,
+-		 .ioctl = drm_ioctl,
++		 .unlocked_ioctl = drm_ioctl,
+ 		 .mmap = drm_mmap,
+ 		 .poll = drm_poll,
+ 		 .fasync = drm_fasync,
+diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
+index fa7b9be..9929f84 100644
+--- a/drivers/gpu/drm/i915/Makefile
++++ b/drivers/gpu/drm/i915/Makefile
+@@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
+ 	  intel_lvds.o \
+ 	  intel_bios.o \
+ 	  intel_dp.o \
+-	  intel_dp_i2c.o \
+ 	  intel_hdmi.o \
+ 	  intel_sdvo.o \
+ 	  intel_modes.o \
+@@ -23,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
+ 	  intel_fb.o \
+ 	  intel_tv.o \
+ 	  intel_dvo.o \
++	  intel_overlay.o \
+ 	  dvo_ch7xxx.o \
+ 	  dvo_ch7017.o \
+ 	  dvo_ivch.o \
+diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
+index 621815b..1184c14 100644
+--- a/drivers/gpu/drm/i915/dvo_ch7017.c
++++ b/drivers/gpu/drm/i915/dvo_ch7017.c
+@@ -249,7 +249,8 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
+ 	if (val != CH7017_DEVICE_ID_VALUE &&
+ 	    val != CH7018_DEVICE_ID_VALUE &&
+ 	    val != CH7019_DEVICE_ID_VALUE) {
+-		DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
++		DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
++				"Slave %d.\n",
+ 			  val, i2cbus->adapter.name,dvo->slave_addr);
+ 		goto fail;
+ 	}
+@@ -284,7 +285,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
+ 	uint8_t horizontal_active_pixel_output, vertical_active_line_output;
+ 	uint8_t active_input_line_output;
+ 
+-	DRM_DEBUG("Registers before mode setting\n");
++	DRM_DEBUG_KMS("Registers before mode setting\n");
+ 	ch7017_dump_regs(dvo);
+ 
+ 	/* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
+@@ -346,7 +347,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
+ 	/* Turn the LVDS back on with new settings. */
+ 	ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
+ 
+-	DRM_DEBUG("Registers after mode setting\n");
++	DRM_DEBUG_KMS("Registers after mode setting\n");
+ 	ch7017_dump_regs(dvo);
+ }
+ 
+@@ -386,7 +387,7 @@ static void ch7017_dump_regs(struct intel_dvo_device *dvo)
+ #define DUMP(reg)					\
+ do {							\
+ 	ch7017_read(dvo, reg, &val);			\
+-	DRM_DEBUG(#reg ": %02x\n", val);		\
++	DRM_DEBUG_KMS(#reg ": %02x\n", val);		\
+ } while (0)
+ 
+ 	DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
+diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
+index a9b8962..d56ff5c 100644
+--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
++++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
+@@ -152,7 +152,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ 	};
+ 
+ 	if (!ch7xxx->quiet) {
+-		DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
++		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+ 			  addr, i2cbus->adapter.name, dvo->slave_addr);
+ 	}
+ 	return false;
+@@ -179,7 +179,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ 		return true;
+ 
+ 	if (!ch7xxx->quiet) {
+-		DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
++		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ 			  addr, i2cbus->adapter.name, dvo->slave_addr);
+ 	}
+ 
+@@ -207,7 +207,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
+ 
+ 	name = ch7xxx_get_id(vendor);
+ 	if (!name) {
+-		DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
++		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
++				"slave %d.\n",
+ 			  vendor, adapter->name, dvo->slave_addr);
+ 		goto out;
+ 	}
+@@ -217,13 +218,14 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
+ 		goto out;
+ 
+ 	if (device != CH7xxx_DID) {
+-		DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
++		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
++				"slave %d.\n",
+ 			  vendor, adapter->name, dvo->slave_addr);
+ 		goto out;
+ 	}
+ 
+ 	ch7xxx->quiet = false;
+-	DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
++	DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+ 		  name, vendor, device);
+ 	return true;
+ out:
+@@ -315,8 +317,8 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
+ 
+ 	for (i = 0; i < CH7xxx_NUM_REGS; i++) {
+ 		if ((i % 8) == 0 )
+-			DRM_DEBUG("\n %02X: ", i);
+-		DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]);
++			DRM_LOG_KMS("\n %02X: ", i);
++		DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
+index aa176f9..24169e5 100644
+--- a/drivers/gpu/drm/i915/dvo_ivch.c
++++ b/drivers/gpu/drm/i915/dvo_ivch.c
+@@ -202,7 +202,8 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
+ 	};
+ 
+ 	if (!priv->quiet) {
+-		DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
++		DRM_DEBUG_KMS("Unable to read register 0x%02x from "
++				"%s:%02x.\n",
+ 			  addr, i2cbus->adapter.name, dvo->slave_addr);
+ 	}
+ 	return false;
+@@ -230,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
+ 		return true;
+ 
+ 	if (!priv->quiet) {
+-		DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
++		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ 			  addr, i2cbus->adapter.name, dvo->slave_addr);
+ 	}
+ 
+@@ -261,7 +262,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
+ 	 * the address it's responding on.
+ 	 */
+ 	if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
+-		DRM_DEBUG("ivch detect failed due to address mismatch "
++		DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
+ 			  "(%d vs %d)\n",
+ 			  (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
+ 		goto out;
+@@ -367,41 +368,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
+ 	uint16_t val;
+ 
+ 	ivch_read(dvo, VR00, &val);
+-	DRM_DEBUG("VR00: 0x%04x\n", val);
++	DRM_LOG_KMS("VR00: 0x%04x\n", val);
+ 	ivch_read(dvo, VR01, &val);
+-	DRM_DEBUG("VR01: 0x%04x\n", val);
++	DRM_LOG_KMS("VR01: 0x%04x\n", val);
+ 	ivch_read(dvo, VR30, &val);
+-	DRM_DEBUG("VR30: 0x%04x\n", val);
++	DRM_LOG_KMS("VR30: 0x%04x\n", val);
+ 	ivch_read(dvo, VR40, &val);
+-	DRM_DEBUG("VR40: 0x%04x\n", val);
++	DRM_LOG_KMS("VR40: 0x%04x\n", val);
+ 
+ 	/* GPIO registers */
+ 	ivch_read(dvo, VR80, &val);
+-	DRM_DEBUG("VR80: 0x%04x\n", val);
++	DRM_LOG_KMS("VR80: 0x%04x\n", val);
+ 	ivch_read(dvo, VR81, &val);
+-	DRM_DEBUG("VR81: 0x%04x\n", val);
++	DRM_LOG_KMS("VR81: 0x%04x\n", val);
+ 	ivch_read(dvo, VR82, &val);
+-	DRM_DEBUG("VR82: 0x%04x\n", val);
++	DRM_LOG_KMS("VR82: 0x%04x\n", val);
+ 	ivch_read(dvo, VR83, &val);
+-	DRM_DEBUG("VR83: 0x%04x\n", val);
++	DRM_LOG_KMS("VR83: 0x%04x\n", val);
+ 	ivch_read(dvo, VR84, &val);
+-	DRM_DEBUG("VR84: 0x%04x\n", val);
++	DRM_LOG_KMS("VR84: 0x%04x\n", val);
+ 	ivch_read(dvo, VR85, &val);
+-	DRM_DEBUG("VR85: 0x%04x\n", val);
++	DRM_LOG_KMS("VR85: 0x%04x\n", val);
+ 	ivch_read(dvo, VR86, &val);
+-	DRM_DEBUG("VR86: 0x%04x\n", val);
++	DRM_LOG_KMS("VR86: 0x%04x\n", val);
+ 	ivch_read(dvo, VR87, &val);
+-	DRM_DEBUG("VR87: 0x%04x\n", val);
++	DRM_LOG_KMS("VR87: 0x%04x\n", val);
+ 	ivch_read(dvo, VR88, &val);
+-	DRM_DEBUG("VR88: 0x%04x\n", val);
++	DRM_LOG_KMS("VR88: 0x%04x\n", val);
+ 
+ 	/* Scratch register 0 - AIM Panel type */
+ 	ivch_read(dvo, VR8E, &val);
+-	DRM_DEBUG("VR8E: 0x%04x\n", val);
++	DRM_LOG_KMS("VR8E: 0x%04x\n", val);
+ 
+ 	/* Scratch register 1 - Status register */
+ 	ivch_read(dvo, VR8F, &val);
+-	DRM_DEBUG("VR8F: 0x%04x\n", val);
++	DRM_LOG_KMS("VR8F: 0x%04x\n", val);
+ }
+ 
+ static void ivch_save(struct intel_dvo_device *dvo)
+diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
+index e1c1f73..0001c13 100644
+--- a/drivers/gpu/drm/i915/dvo_sil164.c
++++ b/drivers/gpu/drm/i915/dvo_sil164.c
+@@ -105,7 +105,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ 	};
+ 
+ 	if (!sil->quiet) {
+-		DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
++		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+ 			  addr, i2cbus->adapter.name, dvo->slave_addr);
+ 	}
+ 	return false;
+@@ -131,7 +131,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ 		return true;
+ 
+ 	if (!sil->quiet) {
+-		DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
++		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ 			  addr, i2cbus->adapter.name, dvo->slave_addr);
+ 	}
+ 
+@@ -158,7 +158,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
+ 		goto out;
+ 
+ 	if (ch != (SIL164_VID & 0xff)) {
+-		DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
++		DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
+ 			  ch, adapter->name, dvo->slave_addr);
+ 		goto out;
+ 	}
+@@ -167,13 +167,13 @@ static bool sil164_init(struct intel_dvo_device *dvo,
+ 		goto out;
+ 
+ 	if (ch != (SIL164_DID & 0xff)) {
+-		DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
++		DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
+ 			  ch, adapter->name, dvo->slave_addr);
+ 		goto out;
+ 	}
+ 	sil->quiet = false;
+ 
+-	DRM_DEBUG("init sil164 dvo controller successfully!\n");
++	DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
+ 	return true;
+ 
+ out:
+@@ -241,15 +241,15 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
+ 	uint8_t val;
+ 
+ 	sil164_readb(dvo, SIL164_FREQ_LO, &val);
+-	DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val);
++	DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
+ 	sil164_readb(dvo, SIL164_FREQ_HI, &val);
+-	DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val);
++	DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
+ 	sil164_readb(dvo, SIL164_REG8, &val);
+-	DRM_DEBUG("SIL164_REG8: 0x%02x\n", val);
++	DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
+ 	sil164_readb(dvo, SIL164_REG9, &val);
+-	DRM_DEBUG("SIL164_REG9: 0x%02x\n", val);
++	DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
+ 	sil164_readb(dvo, SIL164_REGC, &val);
+-	DRM_DEBUG("SIL164_REGC: 0x%02x\n", val);
++	DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
+ }
+ 
+ static void sil164_save(struct intel_dvo_device *dvo)
+diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
+index 9ecc907..c7c391b 100644
+--- a/drivers/gpu/drm/i915/dvo_tfp410.c
++++ b/drivers/gpu/drm/i915/dvo_tfp410.c
+@@ -130,7 +130,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
+ 	};
+ 
+ 	if (!tfp->quiet) {
+-		DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
++		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
+ 			  addr, i2cbus->adapter.name, dvo->slave_addr);
+ 	}
+ 	return false;
+@@ -156,7 +156,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
+ 		return true;
+ 
+ 	if (!tfp->quiet) {
+-		DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
++		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
+ 			  addr, i2cbus->adapter.name, dvo->slave_addr);
+ 	}
+ 
+@@ -191,13 +191,15 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
+ 	tfp->quiet = true;
+ 
+ 	if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
+-		DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
++		DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
++				"Slave %d.\n",
+ 			  id, adapter->name, dvo->slave_addr);
+ 		goto out;
+ 	}
+ 
+ 	if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
+-		DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
++		DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
++				"Slave %d.\n",
+ 			  id, adapter->name, dvo->slave_addr);
+ 		goto out;
+ 	}
+@@ -262,33 +264,33 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
+ 	uint8_t val, val2;
+ 
+ 	tfp410_readb(dvo, TFP410_REV, &val);
+-	DRM_DEBUG("TFP410_REV: 0x%02X\n", val);
++	DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
+ 	tfp410_readb(dvo, TFP410_CTL_1, &val);
+-	DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val);
++	DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
+ 	tfp410_readb(dvo, TFP410_CTL_2, &val);
+-	DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val);
++	DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
+ 	tfp410_readb(dvo, TFP410_CTL_3, &val);
+-	DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val);
++	DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
+ 	tfp410_readb(dvo, TFP410_USERCFG, &val);
+-	DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val);
++	DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
+ 	tfp410_readb(dvo, TFP410_DE_DLY, &val);
+-	DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val);
++	DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
+ 	tfp410_readb(dvo, TFP410_DE_CTL, &val);
+-	DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val);
++	DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
+ 	tfp410_readb(dvo, TFP410_DE_TOP, &val);
+-	DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val);
++	DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
+ 	tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
+ 	tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
+-	DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
++	DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+ 	tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
+ 	tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
+-	DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
++	DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+ 	tfp410_readb(dvo, TFP410_H_RES_LO, &val);
+ 	tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
+-	DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val);
++	DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+ 	tfp410_readb(dvo, TFP410_V_RES_LO, &val);
+ 	tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
+-	DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val);
++	DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+ }
+ 
+ static void tfp410_save(struct intel_dvo_device *dvo)
+diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
+index 7e859d6..a894ade 100644
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -27,6 +27,7 @@
+  */
+ 
+ #include <linux/seq_file.h>
++#include <linux/debugfs.h>
+ #include "drmP.h"
+ #include "drm.h"
+ #include "i915_drm.h"
+@@ -96,13 +97,14 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
+ 	{
+ 		struct drm_gem_object *obj = obj_priv->obj;
+ 
+-		seq_printf(m, "    %p: %s %8zd %08x %08x %d %s",
++		seq_printf(m, "    %p: %s %8zd %08x %08x %d%s%s",
+ 			   obj,
+ 			   get_pin_flag(obj_priv),
+ 			   obj->size,
+ 			   obj->read_domains, obj->write_domain,
+ 			   obj_priv->last_rendering_seqno,
+-			   obj_priv->dirty ? "dirty" : "");
++			   obj_priv->dirty ? " dirty" : "",
++			   obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ 
+ 		if (obj->name)
+ 			seq_printf(m, " (name: %d)", obj->name);
+@@ -160,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
+ 	struct drm_device *dev = node->minor->dev;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 
+-	if (!IS_IGDNG(dev)) {
++	if (!IS_IRONLAKE(dev)) {
+ 		seq_printf(m, "Interrupt enable:    %08x\n",
+ 			   I915_READ(IER));
+ 		seq_printf(m, "Interrupt identity:  %08x\n",
+@@ -270,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
+ 		mem = kmap_atomic(pages[page], KM_USER0);
+ 		for (i = 0; i < PAGE_SIZE; i += 4)
+ 			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
+-		kunmap_atomic(pages[page], KM_USER0);
++		kunmap_atomic(mem, KM_USER0);
+ 	}
+ }
+ 
+@@ -384,6 +386,110 @@ out:
+ 	return 0;
+ }
+ 
++static int
++i915_wedged_open(struct inode *inode,
++		 struct file *filp)
++{
++	filp->private_data = inode->i_private;
++	return 0;
++}
++
++static ssize_t
++i915_wedged_read(struct file *filp,
++		 char __user *ubuf,
++		 size_t max,
++		 loff_t *ppos)
++{
++	struct drm_device *dev = filp->private_data;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	char buf[80];
++	int len;
++
++	len = snprintf(buf, sizeof (buf),
++		       "wedged :  %d\n",
++		       atomic_read(&dev_priv->mm.wedged));
++
++	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
++}
++
++static ssize_t
++i915_wedged_write(struct file *filp,
++		  const char __user *ubuf,
++		  size_t cnt,
++		  loff_t *ppos)
++{
++	struct drm_device *dev = filp->private_data;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	char buf[20];
++	int val = 1;
++
++	if (cnt > 0) {
++		if (cnt > sizeof (buf) - 1)
++			return -EINVAL;
++
++		if (copy_from_user(buf, ubuf, cnt))
++			return -EFAULT;
++		buf[cnt] = 0;
++
++		val = simple_strtoul(buf, NULL, 0);
++	}
++
++	DRM_INFO("Manually setting wedged to %d\n", val);
++
++	atomic_set(&dev_priv->mm.wedged, val);
++	if (val) {
++		DRM_WAKEUP(&dev_priv->irq_queue);
++		queue_work(dev_priv->wq, &dev_priv->error_work);
++	}
++
++	return cnt;
++}
++
++static const struct file_operations i915_wedged_fops = {
++	.owner = THIS_MODULE,
++	.open = i915_wedged_open,
++	.read = i915_wedged_read,
++	.write = i915_wedged_write,
++};
++
++/* As the drm_debugfs_init() routines are called before dev->dev_private is
++ * allocated we need to hook into the minor for release. */
++static int
++drm_add_fake_info_node(struct drm_minor *minor,
++		       struct dentry *ent,
++		       const void *key)
++{
++	struct drm_info_node *node;
++
++	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
++	if (node == NULL) {
++		debugfs_remove(ent);
++		return -ENOMEM;
++	}
++
++	node->minor = minor;
++	node->dent = ent;
++	node->info_ent = (void *) key;
++	list_add(&node->list, &minor->debugfs_nodes.list);
++
++	return 0;
++}
++
++static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
++{
++	struct drm_device *dev = minor->dev;
++	struct dentry *ent;
++
++	ent = debugfs_create_file("i915_wedged",
++				  S_IRUGO | S_IWUSR,
++				  root, dev,
++				  &i915_wedged_fops);
++	if (IS_ERR(ent))
++		return PTR_ERR(ent);
++
++	return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
++}
++
+ static struct drm_info_list i915_debugfs_list[] = {
+ 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
+ 	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
+@@ -402,6 +508,12 @@ static struct drm_info_list i915_debugfs_list[] = {
+ 
+ int i915_debugfs_init(struct drm_minor *minor)
+ {
++	int ret;
++
++	ret = i915_wedged_create(minor->debugfs_root, minor);
++	if (ret)
++		return ret;
++
+ 	return drm_debugfs_create_files(i915_debugfs_list,
+ 					I915_DEBUGFS_ENTRIES,
+ 					minor->debugfs_root, minor);
+@@ -411,7 +523,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
+ {
+ 	drm_debugfs_remove_files(i915_debugfs_list,
+ 				 I915_DEBUGFS_ENTRIES, minor);
++	drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
++				 1, minor);
+ }
+ 
+ #endif /* CONFIG_DEBUG_FS */
+-
+diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
+index eaa1893..2307f98 100644
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -134,6 +134,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
+ 
+ 	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ 
++	if (IS_I965G(dev))
++		dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
++					     0xf0;
++
+ 	I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+ 	DRM_DEBUG_DRIVER("Enabled hardware status page\n");
+ 	return 0;
+@@ -731,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
+ 	if (cmdbuf->num_cliprects) {
+ 		cliprects = kcalloc(cmdbuf->num_cliprects,
+ 				    sizeof(struct drm_clip_rect), GFP_KERNEL);
+-		if (cliprects == NULL)
++		if (cliprects == NULL) {
++			ret = -ENOMEM;
+ 			goto fail_batch_free;
++		}
+ 
+ 		ret = copy_from_user(cliprects, cmdbuf->cliprects,
+ 				     cmdbuf->num_cliprects *
+@@ -807,9 +813,19 @@ static int i915_getparam(struct drm_device *dev, void *data,
+ 	case I915_PARAM_NUM_FENCES_AVAIL:
+ 		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
+ 		break;
++	case I915_PARAM_HAS_OVERLAY:
++		value = dev_priv->overlay ? 1 : 0;
++		break;
++	case I915_PARAM_HAS_PAGEFLIPPING:
++		value = 1;
++		break;
++	case I915_PARAM_HAS_EXECBUF2:
++		/* depends on GEM */
++		value = dev_priv->has_gem;
++		break;
+ 	default:
+ 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
+-					param->param);
++				 param->param);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -962,7 +978,7 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
+ 	 * Some of the preallocated space is taken by the GTT
+ 	 * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
+ 	 */
+-	if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
++	if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
+ 		overhead = 4096;
+ 	else
+ 		overhead = (*aperture_size / 1024) + 4096;
+@@ -1048,7 +1064,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
+ 	int gtt_offset, gtt_size;
+ 
+ 	if (IS_I965G(dev)) {
+-		if (IS_G4X(dev) || IS_IGDNG(dev)) {
++		if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ 			gtt_offset = 2*1024*1024;
+ 			gtt_size = 2*1024*1024;
+ 		} else {
+@@ -1070,7 +1086,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
+ 
+ 	entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
+ 
+-	DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
++	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
+ 
+ 	/* Mask out these reserved bits on this hardware. */
+ 	if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
+@@ -1096,7 +1112,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
+ 	phys =(entry & PTE_ADDRESS_MASK) |
+ 		((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
+ 
+-	DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
++	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
+ 
+ 	return phys;
+ }
+@@ -1195,14 +1211,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
+ 	dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
+ 		0xff000000;
+ 
+-	if (IS_MOBILE(dev) || IS_I9XX(dev))
+-		dev_priv->cursor_needs_physical = true;
+-	else
+-		dev_priv->cursor_needs_physical = false;
+-
+-	if (IS_I965G(dev) || IS_G33(dev))
+-		dev_priv->cursor_needs_physical = false;
+-
+ 	/* Basic memrange allocator for stolen space (aka vram) */
+ 	drm_mm_init(&dev_priv->vram, 0, prealloc_size);
+ 	DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
+@@ -1307,7 +1315,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	u32 tmp;
+ 
+-	if (!IS_IGD(dev))
++	if (!IS_PINEVIEW(dev))
+ 		return;
+ 
+ 	tmp = I915_READ(CLKCFG);
+@@ -1355,7 +1363,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	resource_size_t base, size;
+-	int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
++	int ret = 0, mmio_bar;
+ 	uint32_t agp_size, prealloc_size, prealloc_start;
+ 
+ 	/* i915 has 4 more counters */
+@@ -1371,8 +1379,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 
+ 	dev->dev_private = (void *)dev_priv;
+ 	dev_priv->dev = dev;
++	dev_priv->info = (struct intel_device_info *) flags;
+ 
+ 	/* Add register map (needed for suspend/resume) */
++	mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ 	base = drm_get_resource_start(dev, mmio_bar);
+ 	size = drm_get_resource_len(dev, mmio_bar);
+ 
+@@ -1414,7 +1424,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 	if (ret)
+ 		goto out_iomapfree;
+ 
+-	dev_priv->wq = create_workqueue("i915");
++	dev_priv->wq = create_singlethread_workqueue("i915");
+ 	if (dev_priv->wq == NULL) {
+ 		DRM_ERROR("Failed to create our workqueue.\n");
+ 		ret = -ENOMEM;
+@@ -1435,7 +1445,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 
+ 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+-	if (IS_G4X(dev) || IS_IGDNG(dev)) {
++	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ 	}
+@@ -1490,9 +1500,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
+ 	}
+ 
+ 	/* Must be done after probing outputs */
+-	/* FIXME: verify on IGDNG */
+-	if (!IS_IGDNG(dev))
+-		intel_opregion_init(dev, 0);
++	intel_opregion_init(dev, 0);
+ 
+ 	setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
+ 		    (unsigned long) dev);
+@@ -1526,6 +1534,15 @@ int i915_driver_unload(struct drm_device *dev)
+ 	}
+ 
+ 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		/*
++		 * free the memory space allocated for the child device
++		 * config parsed from VBT
++		 */
++		if (dev_priv->child_dev && dev_priv->child_dev_num) {
++			kfree(dev_priv->child_dev);
++			dev_priv->child_dev = NULL;
++			dev_priv->child_dev_num = 0;
++		}
+ 		drm_irq_uninstall(dev);
+ 		vga_client_register(dev->pdev, NULL, NULL, NULL);
+ 	}
+@@ -1536,8 +1553,7 @@ int i915_driver_unload(struct drm_device *dev)
+ 	if (dev_priv->regs != NULL)
+ 		iounmap(dev_priv->regs);
+ 
+-	if (!IS_IGDNG(dev))
+-		intel_opregion_free(dev, 0);
++	intel_opregion_free(dev, 0);
+ 
+ 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ 		intel_modeset_cleanup(dev);
+@@ -1549,6 +1565,8 @@ int i915_driver_unload(struct drm_device *dev)
+ 		mutex_unlock(&dev->struct_mutex);
+ 		drm_mm_takedown(&dev_priv->vram);
+ 		i915_gem_lastclose(dev);
++
++		intel_cleanup_overlay(dev);
+ 	}
+ 
+ 	pci_dev_put(dev_priv->bridge_dev);
+@@ -1639,6 +1657,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
+ 	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
+@@ -1657,6 +1676,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
+ 	DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
+ 	DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
++	DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
++	DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
+ };
+ 
+ int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index 7f436ec..cf4cb3e 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -33,7 +33,6 @@
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+ 
+-#include "drm_pciids.h"
+ #include <linux/console.h>
+ #include "drm_crtc_helper.h"
+ 
+@@ -46,36 +45,149 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
+ unsigned int i915_powersave = 1;
+ module_param_named(powersave, i915_powersave, int, 0400);
+ 
++unsigned int i915_lvds_downclock = 0;
++module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
++
+ static struct drm_driver driver;
+ 
+-static struct pci_device_id pciidlist[] = {
+-	i915_PCI_IDS
++#define INTEL_VGA_DEVICE(id, info) {		\
++	.class = PCI_CLASS_DISPLAY_VGA << 8,	\
++	.class_mask = 0xffff00,			\
++	.vendor = 0x8086,			\
++	.device = id,				\
++	.subvendor = PCI_ANY_ID,		\
++	.subdevice = PCI_ANY_ID,		\
++	.driver_data = (unsigned long) info }
++
++const static struct intel_device_info intel_i830_info = {
++	.is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
++};
++
++const static struct intel_device_info intel_845g_info = {
++	.is_i8xx = 1,
++};
++
++const static struct intel_device_info intel_i85x_info = {
++	.is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
++};
++
++const static struct intel_device_info intel_i865g_info = {
++	.is_i8xx = 1,
++};
++
++const static struct intel_device_info intel_i915g_info = {
++	.is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
++};
++const static struct intel_device_info intel_i915gm_info = {
++	.is_i9xx = 1,  .is_mobile = 1, .has_fbc = 1,
++	.cursor_needs_physical = 1,
++};
++const static struct intel_device_info intel_i945g_info = {
++	.is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
++};
++const static struct intel_device_info intel_i945gm_info = {
++	.is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
++	.has_hotplug = 1, .cursor_needs_physical = 1,
++};
++
++const static struct intel_device_info intel_i965g_info = {
++	.is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
++};
++
++const static struct intel_device_info intel_i965gm_info = {
++	.is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
++	.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
++	.has_hotplug = 1,
++};
++
++const static struct intel_device_info intel_g33_info = {
++	.is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
++	.has_hotplug = 1,
++};
++
++const static struct intel_device_info intel_g45_info = {
++	.is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
++	.has_pipe_cxsr = 1,
++	.has_hotplug = 1,
++};
++
++const static struct intel_device_info intel_gm45_info = {
++	.is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
++	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
++	.has_pipe_cxsr = 1,
++	.has_hotplug = 1,
++};
++
++const static struct intel_device_info intel_pineview_info = {
++	.is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
++	.need_gfx_hws = 1,
++	.has_hotplug = 1,
++};
++
++const static struct intel_device_info intel_ironlake_d_info = {
++	.is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
++	.has_pipe_cxsr = 1,
++	.has_hotplug = 1,
++};
++
++const static struct intel_device_info intel_ironlake_m_info = {
++	.is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
++	.need_gfx_hws = 1, .has_rc6 = 1,
++	.has_hotplug = 1,
++};
++
++const static struct pci_device_id pciidlist[] = {
++	INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
++	INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
++	INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
++	INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
++	INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
++	INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
++	INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
++	INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
++	INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
++	INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
++	INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
++	INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
++	INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
++	INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
++	INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
++	INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
++	INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
++	INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
++	INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
++	INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
++	INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
++	INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
++	INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
++	INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
++	INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
++	INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
++	INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
++	INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
++	INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
++	INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
++	{0, 0, 0}
+ };
+ 
+ #if defined(CONFIG_DRM_I915_KMS)
+ MODULE_DEVICE_TABLE(pci, pciidlist);
+ #endif
+ 
+-static int i915_suspend(struct drm_device *dev, pm_message_t state)
++static int i915_drm_freeze(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 
+-	if (!dev || !dev_priv) {
+-		DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
+-		DRM_ERROR("DRM not initialized, aborting suspend.\n");
+-		return -ENODEV;
+-	}
+-
+-	if (state.event == PM_EVENT_PRETHAW)
+-		return 0;
+-
+ 	pci_save_state(dev->pdev);
+ 
+ 	/* If KMS is active, we do the leavevt stuff here */
+ 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+-		if (i915_gem_idle(dev))
++		int error = i915_gem_idle(dev);
++		if (error) {
+ 			dev_err(&dev->pdev->dev,
+-				"GEM idle failed, resume may fail\n");
++				"GEM idle failed, resume might fail\n");
++			return error;
++		}
+ 		drm_irq_uninstall(dev);
+ 	}
+ 
+@@ -83,26 +195,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
+ 
+ 	intel_opregion_free(dev, 1);
+ 
++	/* Modeset on resume, not lid events */
++	dev_priv->modeset_on_lid = 0;
++
++	return 0;
++}
++
++static int i915_suspend(struct drm_device *dev, pm_message_t state)
++{
++	int error;
++
++	if (!dev || !dev->dev_private) {
++		DRM_ERROR("dev: %p\n", dev);
++		DRM_ERROR("DRM not initialized, aborting suspend.\n");
++		return -ENODEV;
++	}
++
++	if (state.event == PM_EVENT_PRETHAW)
++		return 0;
++
++	error = i915_drm_freeze(dev);
++	if (error)
++		return error;
++
+ 	if (state.event == PM_EVENT_SUSPEND) {
+ 		/* Shut down the device */
+ 		pci_disable_device(dev->pdev);
+ 		pci_set_power_state(dev->pdev, PCI_D3hot);
+ 	}
+ 
+-	/* Modeset on resume, not lid events */
+-	dev_priv->modeset_on_lid = 0;
+-
+ 	return 0;
+ }
+ 
+-static int i915_resume(struct drm_device *dev)
++static int i915_drm_thaw(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+-	int ret = 0;
+-
+-	if (pci_enable_device(dev->pdev))
+-		return -1;
+-	pci_set_master(dev->pdev);
++	int error = 0;
+ 
+ 	i915_restore_state(dev);
+ 
+@@ -113,21 +241,28 @@ static int i915_resume(struct drm_device *dev)
+ 		mutex_lock(&dev->struct_mutex);
+ 		dev_priv->mm.suspended = 0;
+ 
+-		ret = i915_gem_init_ringbuffer(dev);
+-		if (ret != 0)
+-			ret = -1;
++		error = i915_gem_init_ringbuffer(dev);
+ 		mutex_unlock(&dev->struct_mutex);
+ 
+ 		drm_irq_install(dev);
+-	}
+-	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++
+ 		/* Resume the modeset for every activated CRTC */
+ 		drm_helper_resume_force_mode(dev);
+ 	}
+ 
+ 	dev_priv->modeset_on_lid = 0;
+ 
+-	return ret;
++	return error;
++}
++
++static int i915_resume(struct drm_device *dev)
++{
++	if (pci_enable_device(dev->pdev))
++		return -EIO;
++
++	pci_set_master(dev->pdev);
++
++	return i915_drm_thaw(dev);
+ }
+ 
+ /**
+@@ -268,22 +403,73 @@ i915_pci_remove(struct pci_dev *pdev)
+ 	drm_put_dev(dev);
+ }
+ 
+-static int
+-i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
++static int i915_pm_suspend(struct device *dev)
+ {
+-	struct drm_device *dev = pci_get_drvdata(pdev);
++	struct pci_dev *pdev = to_pci_dev(dev);
++	struct drm_device *drm_dev = pci_get_drvdata(pdev);
++	int error;
+ 
+-	return i915_suspend(dev, state);
++	if (!drm_dev || !drm_dev->dev_private) {
++		dev_err(dev, "DRM not initialized, aborting suspend.\n");
++		return -ENODEV;
++	}
++
++	error = i915_drm_freeze(drm_dev);
++	if (error)
++		return error;
++
++	pci_disable_device(pdev);
++	pci_set_power_state(pdev, PCI_D3hot);
++
++	return 0;
+ }
+ 
+-static int
+-i915_pci_resume(struct pci_dev *pdev)
++static int i915_pm_resume(struct device *dev)
+ {
+-	struct drm_device *dev = pci_get_drvdata(pdev);
++	struct pci_dev *pdev = to_pci_dev(dev);
++	struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ 
+-	return i915_resume(dev);
++	return i915_resume(drm_dev);
+ }
+ 
++static int i915_pm_freeze(struct device *dev)
++{
++	struct pci_dev *pdev = to_pci_dev(dev);
++	struct drm_device *drm_dev = pci_get_drvdata(pdev);
++
++	if (!drm_dev || !drm_dev->dev_private) {
++		dev_err(dev, "DRM not initialized, aborting suspend.\n");
++		return -ENODEV;
++	}
++
++	return i915_drm_freeze(drm_dev);
++}
++
++static int i915_pm_thaw(struct device *dev)
++{
++	struct pci_dev *pdev = to_pci_dev(dev);
++	struct drm_device *drm_dev = pci_get_drvdata(pdev);
++
++	return i915_drm_thaw(drm_dev);
++}
++
++static int i915_pm_poweroff(struct device *dev)
++{
++	struct pci_dev *pdev = to_pci_dev(dev);
++	struct drm_device *drm_dev = pci_get_drvdata(pdev);
++
++	return i915_drm_freeze(drm_dev);
++}
++
++const struct dev_pm_ops i915_pm_ops = {
++     .suspend = i915_pm_suspend,
++     .resume = i915_pm_resume,
++     .freeze = i915_pm_freeze,
++     .thaw = i915_pm_thaw,
++     .poweroff = i915_pm_poweroff,
++     .restore = i915_pm_resume,
++};
++
+ static struct vm_operations_struct i915_gem_vm_ops = {
+ 	.fault = i915_gem_fault,
+ 	.open = drm_gem_vm_open,
+@@ -303,8 +489,11 @@ static struct drm_driver driver = {
+ 	.lastclose = i915_driver_lastclose,
+ 	.preclose = i915_driver_preclose,
+ 	.postclose = i915_driver_postclose,
++
++	/* Used in place of i915_pm_ops for non-DRIVER_MODESET */
+ 	.suspend = i915_suspend,
+ 	.resume = i915_resume,
++
+ 	.device_is_agp = i915_driver_device_is_agp,
+ 	.enable_vblank = i915_enable_vblank,
+ 	.disable_vblank = i915_disable_vblank,
+@@ -329,10 +518,11 @@ static struct drm_driver driver = {
+ 		 .owner = THIS_MODULE,
+ 		 .open = drm_open,
+ 		 .release = drm_release,
+-		 .ioctl = drm_ioctl,
++		 .unlocked_ioctl = drm_ioctl,
+ 		 .mmap = drm_gem_mmap,
+ 		 .poll = drm_poll,
+ 		 .fasync = drm_fasync,
++		 .read = drm_read,
+ #ifdef CONFIG_COMPAT
+ 		 .compat_ioctl = i915_compat_ioctl,
+ #endif
+@@ -343,10 +533,7 @@ static struct drm_driver driver = {
+ 		 .id_table = pciidlist,
+ 		 .probe = i915_pci_probe,
+ 		 .remove = i915_pci_remove,
+-#ifdef CONFIG_PM
+-		 .resume = i915_pci_resume,
+-		 .suspend = i915_pci_suspend,
+-#endif
++		 .driver.pm = &i915_pm_ops,
+ 	},
+ 
+ 	.name = DRIVER_NAME,
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index f5d49a7..b99b6a8 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -170,9 +170,33 @@ struct drm_i915_display_funcs {
+ 	/* clock gating init */
+ };
+ 
++struct intel_overlay;
++
++struct intel_device_info {
++	u8 is_mobile : 1;
++	u8 is_i8xx : 1;
++	u8 is_i915g : 1;
++	u8 is_i9xx : 1;
++	u8 is_i945gm : 1;
++	u8 is_i965g : 1;
++	u8 is_i965gm : 1;
++	u8 is_g33 : 1;
++	u8 need_gfx_hws : 1;
++	u8 is_g4x : 1;
++	u8 is_pineview : 1;
++	u8 is_ironlake : 1;
++	u8 has_fbc : 1;
++	u8 has_rc6 : 1;
++	u8 has_pipe_cxsr : 1;
++	u8 has_hotplug : 1;
++	u8 cursor_needs_physical : 1;
++};
++
+ typedef struct drm_i915_private {
+ 	struct drm_device *dev;
+ 
++	const struct intel_device_info *info;
++
+ 	int has_gem;
+ 
+ 	void __iomem *regs;
+@@ -187,6 +211,7 @@ typedef struct drm_i915_private {
+ 	unsigned int status_gfx_addr;
+ 	drm_local_map_t hws_map;
+ 	struct drm_gem_object *hws_obj;
++	struct drm_gem_object *pwrctx;
+ 
+ 	struct resource mch_res;
+ 
+@@ -206,11 +231,13 @@ typedef struct drm_i915_private {
+ 	/** Cached value of IMR to avoid reads in updating the bitfield */
+ 	u32 irq_mask_reg;
+ 	u32 pipestat[2];
+-	/** splitted irq regs for graphics and display engine on IGDNG,
++	/** splitted irq regs for graphics and display engine on Ironlake,
+ 	    irq_mask_reg is still used for display irq. */
+ 	u32 gt_irq_mask_reg;
+ 	u32 gt_irq_enable_reg;
+ 	u32 de_irq_enable_reg;
++	u32 pch_irq_mask_reg;
++	u32 pch_irq_enable_reg;
+ 
+ 	u32 hotplug_supported_mask;
+ 	struct work_struct hotplug_work;
+@@ -227,8 +254,6 @@ typedef struct drm_i915_private {
+ 	int hangcheck_count;
+ 	uint32_t last_acthd;
+ 
+-	bool cursor_needs_physical;
+-
+ 	struct drm_mm vram;
+ 
+ 	unsigned long cfb_size;
+@@ -240,6 +265,9 @@ typedef struct drm_i915_private {
+ 
+ 	struct intel_opregion opregion;
+ 
++	/* overlay */
++	struct intel_overlay *overlay;
++
+ 	/* LVDS info */
+ 	int backlight_duty_cycle;  /* restore backlight to this value */
+ 	bool panel_wants_dither;
+@@ -255,10 +283,11 @@ typedef struct drm_i915_private {
+ 	unsigned int lvds_use_ssc:1;
+ 	unsigned int edp_support:1;
+ 	int lvds_ssc_freq;
++	int edp_bpp;
+ 
+ 	struct notifier_block lid_notifier;
+ 
+-	int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
++	int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
+ 	struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
+ 	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
+ 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
+@@ -279,7 +308,6 @@ typedef struct drm_i915_private {
+ 	u32 saveDSPACNTR;
+ 	u32 saveDSPBCNTR;
+ 	u32 saveDSPARB;
+-	u32 saveRENDERSTANDBY;
+ 	u32 saveHWS;
+ 	u32 savePIPEACONF;
+ 	u32 savePIPEBCONF;
+@@ -374,8 +402,6 @@ typedef struct drm_i915_private {
+ 	u32 saveFDI_RXA_IMR;
+ 	u32 saveFDI_RXB_IMR;
+ 	u32 saveCACHE_MODE_0;
+-	u32 saveD_STATE;
+-	u32 saveDSPCLK_GATE_D;
+ 	u32 saveMI_ARB_STATE;
+ 	u32 saveSWF0[16];
+ 	u32 saveSWF1[16];
+@@ -548,13 +574,21 @@ typedef struct drm_i915_private {
+ 	/* indicate whether the LVDS_BORDER should be enabled or not */
+ 	unsigned int lvds_border_bits;
+ 
++	struct drm_crtc *plane_to_crtc_mapping[2];
++	struct drm_crtc *pipe_to_crtc_mapping[2];
++	wait_queue_head_t pending_flip_queue;
++
+ 	/* Reclocking support */
+ 	bool render_reclock_avail;
+ 	bool lvds_downclock_avail;
++	/* indicates the reduced downclock for LVDS*/
++	int lvds_downclock;
+ 	struct work_struct idle_work;
+ 	struct timer_list idle_timer;
+ 	bool busy;
+ 	u16 orig_clock;
++	int child_dev_num;
++	struct child_device_config *child_dev;
+ 	struct drm_connector *int_lvds_connector;
+ } drm_i915_private_t;
+ 
+@@ -650,6 +684,13 @@ struct drm_i915_gem_object {
+ 	 * Advice: are the backing pages purgeable?
+ 	 */
+ 	int madv;
++
++	/**
++	 * Number of crtcs where this object is currently the fb, but
++	 * will be page flipped away on the next vblank.  When it
++	 * reaches 0, dev_priv->pending_flip_queue will be woken up.
++	 */
++	atomic_t pending_flip;
+ };
+ 
+ /**
+@@ -693,6 +734,7 @@ extern struct drm_ioctl_desc i915_ioctls[];
+ extern int i915_max_ioctl;
+ extern unsigned int i915_fbpercrtc;
+ extern unsigned int i915_powersave;
++extern unsigned int i915_lvds_downclock;
+ 
+ extern void i915_save_display(struct drm_device *dev);
+ extern void i915_restore_display(struct drm_device *dev);
+@@ -750,6 +792,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+ void
+ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
+ 
++void intel_enable_asle (struct drm_device *dev);
++
+ 
+ /* i915_mem.c */
+ extern int i915_mem_alloc(struct drm_device *dev, void *data,
+@@ -782,6 +826,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+ 			     struct drm_file *file_priv);
+ int i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 			struct drm_file *file_priv);
++int i915_gem_execbuffer2(struct drm_device *dev, void *data,
++			 struct drm_file *file_priv);
+ int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+ 		       struct drm_file *file_priv);
+ int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+@@ -825,7 +871,9 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ 		     unsigned long end);
+ int i915_gem_idle(struct drm_device *dev);
+-int i915_lp_ring_sync(struct drm_device *dev);
++uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
++			  uint32_t flush_domains);
++int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
+ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+ 				      int write);
+@@ -838,6 +886,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
+ int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+ void i915_gem_object_put_pages(struct drm_gem_object *obj);
+ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
++void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
+ 
+ void i915_gem_shrinker_init(void);
+ void i915_gem_shrinker_exit(void);
+@@ -846,6 +895,9 @@ void i915_gem_shrinker_exit(void);
+ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+ void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
+ void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
++bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
++		    int tiling_mode);
++bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
+ 
+ /* i915_gem_debug.c */
+ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+@@ -877,11 +929,13 @@ extern int i915_restore_state(struct drm_device *dev);
+ extern int intel_opregion_init(struct drm_device *dev, int resume);
+ extern void intel_opregion_free(struct drm_device *dev, int suspend);
+ extern void opregion_asle_intr(struct drm_device *dev);
++extern void ironlake_opregion_gse_intr(struct drm_device *dev);
+ extern void opregion_enable_asle(struct drm_device *dev);
+ #else
+ static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
+ static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
+ static inline void opregion_asle_intr(struct drm_device *dev) { return; }
++static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
+ static inline void opregion_enable_asle(struct drm_device *dev) { return; }
+ #endif
+ 
+@@ -966,89 +1020,52 @@ extern void g4x_disable_fbc(struct drm_device *dev);
+ extern int i915_wrap_ring(struct drm_device * dev);
+ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
+ 
+-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+-#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+-#define IS_I855(dev) ((dev)->pci_device == 0x3582)
+-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+-#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
+-
+-#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
+-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+-#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
+-		        (dev)->pci_device == 0x27AE)
+-#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
+-		       (dev)->pci_device == 0x2982 || \
+-		       (dev)->pci_device == 0x2992 || \
+-		       (dev)->pci_device == 0x29A2 || \
+-		       (dev)->pci_device == 0x2A02 || \
+-		       (dev)->pci_device == 0x2A12 || \
+-		       (dev)->pci_device == 0x2A42 || \
+-		       (dev)->pci_device == 0x2E02 || \
+-		       (dev)->pci_device == 0x2E12 || \
+-		       (dev)->pci_device == 0x2E22 || \
+-		       (dev)->pci_device == 0x2E32 || \
+-		       (dev)->pci_device == 0x2E42 || \
+-		       (dev)->pci_device == 0x0042 || \
+-		       (dev)->pci_device == 0x0046)
+-
+-#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
+-			(dev)->pci_device == 0x2A12)
+-
+-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+-
+-#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
+-		     (dev)->pci_device == 0x2E12 || \
+-		     (dev)->pci_device == 0x2E22 || \
+-		     (dev)->pci_device == 0x2E32 || \
+-		     (dev)->pci_device == 0x2E42 || \
+-		     IS_GM45(dev))
+-
+-#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
+-#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
+-#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
+-
+-#define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||	\
+-			(dev)->pci_device == 0x29B2 ||	\
+-			(dev)->pci_device == 0x29D2 ||  \
+-			(IS_IGD(dev)))
+-
+-#define IS_IGDNG_D(dev)	((dev)->pci_device == 0x0042)
+-#define IS_IGDNG_M(dev)	((dev)->pci_device == 0x0046)
+-#define IS_IGDNG(dev)	(IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
+-
+-#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
+-		      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
+-		      IS_IGDNG(dev))
+-
+-#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
+-			IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
+-			IS_IGD(dev) || IS_IGDNG_M(dev))
+-
+-#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
+-				IS_IGDNG(dev))
++#define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
++
++#define IS_I830(dev)		((dev)->pci_device == 0x3577)
++#define IS_845G(dev)		((dev)->pci_device == 0x2562)
++#define IS_I85X(dev)		((dev)->pci_device == 0x3582)
++#define IS_I865G(dev)		((dev)->pci_device == 0x2572)
++#define IS_I8XX(dev)		(INTEL_INFO(dev)->is_i8xx)
++#define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
++#define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
++#define IS_I945G(dev)		((dev)->pci_device == 0x2772)
++#define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
++#define IS_I965G(dev)		(INTEL_INFO(dev)->is_i965g)
++#define IS_I965GM(dev)		(INTEL_INFO(dev)->is_i965gm)
++#define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
++#define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
++#define IS_PINEVIEW_G(dev)	((dev)->pci_device == 0xa001)
++#define IS_PINEVIEW_M(dev)	((dev)->pci_device == 0xa011)
++#define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
++#define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
++#define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
++#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
++#define IS_IRONLAKE(dev)	(INTEL_INFO(dev)->is_ironlake)
++#define IS_I9XX(dev)		(INTEL_INFO(dev)->is_i9xx)
++#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
++
++#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
++
+ /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+  * rows, which changed the alignment requirements and fence programming.
+  */
+ #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
+ 						      IS_I915GM(dev)))
+-#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(IS_I9XX(dev) && !IS_IGD(dev))
+-#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_IGDNG(dev))
+-#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_IGDNG(dev))
+-#define SUPPORTS_EDP(dev)		(IS_IGDNG_M(dev))
++#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(IS_I9XX(dev) && !IS_PINEVIEW(dev))
++#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
++#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
++#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
+ #define SUPPORTS_TV(dev)		(IS_I9XX(dev) && IS_MOBILE(dev) && \
+-					!IS_IGDNG(dev) && !IS_IGD(dev))
+-#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
++					!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
++#define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
+ /* dsparb controlled by hw only */
+-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+-
+-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
+-#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+-#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
+-			   (IS_I9XX(dev) || IS_GM45(dev)) && \
+-			   !IS_IGD(dev) && \
+-			   !IS_IGDNG(dev))
++#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
++
++#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
++#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
++#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
++#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
+ 
+ #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 04da731..ec8a0d7 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1294,7 +1294,7 @@ out_free_list:
+  * i915_gem_release_mmap - remove physical page mappings
+  * @obj: obj in question
+  *
+- * Preserve the reservation of the mmaping with the DRM core code, but
++ * Preserve the reservation of the mmapping with the DRM core code, but
+  * relinquish ownership of the pages back to the system.
+  *
+  * It is vital that we remove the page mapping if we have mapped a tiled
+@@ -1570,7 +1570,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+  *
+  * Returned sequence numbers are nonzero on success.
+  */
+-static uint32_t
++uint32_t
+ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+ 		 uint32_t flush_domains)
+ {
+@@ -1604,7 +1604,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+ 	OUT_RING(MI_USER_INTERRUPT);
+ 	ADVANCE_LP_RING();
+ 
+-	DRM_DEBUG("%d\n", seqno);
++	DRM_DEBUG_DRIVER("%d\n", seqno);
+ 
+ 	request->seqno = seqno;
+ 	request->emitted_jiffies = jiffies;
+@@ -1809,7 +1809,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
+ 	mutex_unlock(&dev->struct_mutex);
+ }
+ 
+-static int
++int
+ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -1822,7 +1822,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+ 		return -EIO;
+ 
+ 	if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+-		if (IS_IGDNG(dev))
++		if (IS_IRONLAKE(dev))
+ 			ier = I915_READ(DEIER) | I915_READ(GTIER);
+ 		else
+ 			ier = I915_READ(IER);
+@@ -1879,24 +1879,6 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
+ 	return i915_do_wait_request(dev, seqno, 1);
+ }
+ 
+-/**
+- * Waits for the ring to finish up to the latest request. Usefull for waiting
+- * for flip events, e.g for the overlay support. */
+-int i915_lp_ring_sync(struct drm_device *dev)
+-{
+-	uint32_t seqno;
+-	int ret;
+-
+-	seqno = i915_add_request(dev, NULL, 0);
+-
+-	if (seqno == 0)
+-		return -ENOMEM;
+-
+-	ret = i915_do_wait_request(dev, seqno, 0);
+-	BUG_ON(ret == -ERESTARTSYS);
+-	return ret;
+-}
+-
+ static void
+ i915_gem_flush(struct drm_device *dev,
+ 	       uint32_t invalidate_domains,
+@@ -2774,6 +2756,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+ 					    old_write_domain);
+ }
+ 
++void
++i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
++{
++	switch (obj->write_domain) {
++	case I915_GEM_DOMAIN_GTT:
++		i915_gem_object_flush_gtt_write_domain(obj);
++		break;
++	case I915_GEM_DOMAIN_CPU:
++		i915_gem_object_flush_cpu_write_domain(obj);
++		break;
++	default:
++		i915_gem_object_flush_gpu_write_domain(obj);
++		break;
++	}
++}
++
+ /**
+  * Moves a single object to the GTT read, and possibly write domain.
+  *
+@@ -3235,7 +3233,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+ static int
+ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+ 				 struct drm_file *file_priv,
+-				 struct drm_i915_gem_exec_object *entry,
++				 struct drm_i915_gem_exec_object2 *entry,
+ 				 struct drm_i915_gem_relocation_entry *relocs)
+ {
+ 	struct drm_device *dev = obj->dev;
+@@ -3243,12 +3241,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+ 	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ 	int i, ret;
+ 	void __iomem *reloc_page;
++	bool need_fence;
++
++	need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
++	             obj_priv->tiling_mode != I915_TILING_NONE;
++
++	/* Check fence reg constraints and rebind if necessary */
++	if (need_fence && !i915_obj_fenceable(dev, obj))
++		i915_gem_object_unbind(obj);
+ 
+ 	/* Choose the GTT offset for our buffer and put it there. */
+ 	ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+ 	if (ret)
+ 		return ret;
+ 
++	/*
++	 * Pre-965 chips need a fence register set up in order to
++	 * properly handle blits to/from tiled surfaces.
++	 */
++	if (need_fence) {
++		ret = i915_gem_object_get_fence_reg(obj);
++		if (ret != 0) {
++			if (ret != -EBUSY && ret != -ERESTARTSYS)
++				DRM_ERROR("Failure to install fence: %d\n",
++					  ret);
++			i915_gem_object_unpin(obj);
++			return ret;
++		}
++	}
++
+ 	entry->offset = obj_priv->gtt_offset;
+ 
+ 	/* Apply the relocations, using the GTT aperture to avoid cache
+@@ -3410,7 +3431,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+  */
+ static int
+ i915_dispatch_gem_execbuffer(struct drm_device *dev,
+-			      struct drm_i915_gem_execbuffer *exec,
++			      struct drm_i915_gem_execbuffer2 *exec,
+ 			      struct drm_clip_rect *cliprects,
+ 			      uint64_t exec_offset)
+ {
+@@ -3500,7 +3521,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+ }
+ 
+ static int
+-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
++i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
+ 			      uint32_t buffer_count,
+ 			      struct drm_i915_gem_relocation_entry **relocs)
+ {
+@@ -3515,8 +3536,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+ 	}
+ 
+ 	*relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
+-	if (*relocs == NULL)
++	if (*relocs == NULL) {
++		DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
+ 		return -ENOMEM;
++	}
+ 
+ 	for (i = 0; i < buffer_count; i++) {
+ 		struct drm_i915_gem_relocation_entry __user *user_relocs;
+@@ -3540,13 +3563,16 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+ }
+ 
+ static int
+-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
++i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
+ 			    uint32_t buffer_count,
+ 			    struct drm_i915_gem_relocation_entry *relocs)
+ {
+ 	uint32_t reloc_count = 0, i;
+ 	int ret = 0;
+ 
++	if (relocs == NULL)
++	    return 0;
++
+ 	for (i = 0; i < buffer_count; i++) {
+ 		struct drm_i915_gem_relocation_entry __user *user_relocs;
+ 		int unwritten;
+@@ -3573,7 +3599,7 @@ err:
+ }
+ 
+ static int
+-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
++i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
+ 			   uint64_t exec_offset)
+ {
+ 	uint32_t exec_start, exec_len;
+@@ -3590,22 +3616,57 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
+ 	return 0;
+ }
+ 
++static int
++i915_gem_wait_for_pending_flip(struct drm_device *dev,
++			       struct drm_gem_object **object_list,
++			       int count)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_i915_gem_object *obj_priv;
++	DEFINE_WAIT(wait);
++	int i, ret = 0;
++
++	for (;;) {
++		prepare_to_wait(&dev_priv->pending_flip_queue,
++				&wait, TASK_INTERRUPTIBLE);
++		for (i = 0; i < count; i++) {
++			obj_priv = object_list[i]->driver_private;
++			if (atomic_read(&obj_priv->pending_flip) > 0)
++				break;
++		}
++		if (i == count)
++			break;
++
++		if (!signal_pending(current)) {
++			mutex_unlock(&dev->struct_mutex);
++			schedule();
++			mutex_lock(&dev->struct_mutex);
++			continue;
++		}
++		ret = -ERESTARTSYS;
++		break;
++	}
++	finish_wait(&dev_priv->pending_flip_queue, &wait);
++
++	return ret;
++}
++
+ int
+-i915_gem_execbuffer(struct drm_device *dev, void *data,
+-		    struct drm_file *file_priv)
++i915_gem_do_execbuffer(struct drm_device *dev, void *data,
++		       struct drm_file *file_priv,
++		       struct drm_i915_gem_execbuffer2 *args,
++		       struct drm_i915_gem_exec_object2 *exec_list)
+ {
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+-	struct drm_i915_gem_execbuffer *args = data;
+-	struct drm_i915_gem_exec_object *exec_list = NULL;
+ 	struct drm_gem_object **object_list = NULL;
+ 	struct drm_gem_object *batch_obj;
+ 	struct drm_i915_gem_object *obj_priv;
+ 	struct drm_clip_rect *cliprects = NULL;
+-	struct drm_i915_gem_relocation_entry *relocs;
+-	int ret, ret2, i, pinned = 0;
++	struct drm_i915_gem_relocation_entry *relocs = NULL;
++	int ret = 0, ret2, i, pinned = 0;
+ 	uint64_t exec_offset;
+ 	uint32_t seqno, flush_domains, reloc_index;
+-	int pin_tries;
++	int pin_tries, flips;
+ 
+ #if WATCH_EXEC
+ 	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+@@ -3616,31 +3677,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ 		return -EINVAL;
+ 	}
+-	/* Copy in the exec list from userland */
+-	exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
+-	object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
+-	if (exec_list == NULL || object_list == NULL) {
+-		DRM_ERROR("Failed to allocate exec or object list "
+-			  "for %d buffers\n",
++	object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
++	if (object_list == NULL) {
++		DRM_ERROR("Failed to allocate object list for %d buffers\n",
+ 			  args->buffer_count);
+ 		ret = -ENOMEM;
+ 		goto pre_mutex_err;
+ 	}
+-	ret = copy_from_user(exec_list,
+-			     (struct drm_i915_relocation_entry __user *)
+-			     (uintptr_t) args->buffers_ptr,
+-			     sizeof(*exec_list) * args->buffer_count);
+-	if (ret != 0) {
+-		DRM_ERROR("copy %d exec entries failed %d\n",
+-			  args->buffer_count, ret);
+-		goto pre_mutex_err;
+-	}
+ 
+ 	if (args->num_cliprects != 0) {
+ 		cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
+ 				    GFP_KERNEL);
+-		if (cliprects == NULL)
++		if (cliprects == NULL) {
++			ret = -ENOMEM;
+ 			goto pre_mutex_err;
++		}
+ 
+ 		ret = copy_from_user(cliprects,
+ 				     (struct drm_clip_rect __user *)
+@@ -3663,26 +3714,27 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 	i915_verify_inactive(dev, __FILE__, __LINE__);
+ 
+ 	if (atomic_read(&dev_priv->mm.wedged)) {
+-		DRM_ERROR("Execbuf while wedged\n");
+ 		mutex_unlock(&dev->struct_mutex);
+ 		ret = -EIO;
+ 		goto pre_mutex_err;
+ 	}
+ 
+ 	if (dev_priv->mm.suspended) {
+-		DRM_ERROR("Execbuf while VT-switched.\n");
+ 		mutex_unlock(&dev->struct_mutex);
+ 		ret = -EBUSY;
+ 		goto pre_mutex_err;
+ 	}
+ 
+ 	/* Look up object handles */
++	flips = 0;
+ 	for (i = 0; i < args->buffer_count; i++) {
+ 		object_list[i] = drm_gem_object_lookup(dev, file_priv,
+ 						       exec_list[i].handle);
+ 		if (object_list[i] == NULL) {
+ 			DRM_ERROR("Invalid object handle %d at index %d\n",
+ 				   exec_list[i].handle, i);
++			/* prevent error path from reading uninitialized data */
++			args->buffer_count = i + 1;
+ 			ret = -EBADF;
+ 			goto err;
+ 		}
+@@ -3691,10 +3743,20 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
+ 		if (obj_priv->in_execbuffer) {
+ 			DRM_ERROR("Object %p appears more than once in object list\n",
+ 				   object_list[i]);
++			/* prevent error path from reading uninitialized data */
++			args->buffer_count = i + 1;
+ 			ret = -EBADF;
+ 			goto err;
+ 		}
+ 		obj_priv->in_execbuffer = true;
++		flips += atomic_read(&obj_priv->pending_flip);
++	}
++
++	if (flips > 0) {
++		ret = i915_gem_wait_for_pending_flip(dev, object_list,
++						     args->buffer_count);
++		if (ret)
++			goto err;
+ 	}
+ 
+ 	/* Pin and relocate */
+@@ -3886,8 +3948,101 @@ err:
+ 
+ 	mutex_unlock(&dev->struct_mutex);
+ 
++pre_mutex_err:
++	/* Copy the updated relocations out regardless of current error
++	 * state.  Failure to update the relocs would mean that the next
++	 * time userland calls execbuf, it would do so with presumed offset
++	 * state that didn't match the actual object state.
++	 */
++	ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
++					   relocs);
++	if (ret2 != 0) {
++		DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
++
++		if (ret == 0)
++			ret = ret2;
++	}
++
++	drm_free_large(object_list);
++	kfree(cliprects);
++
++	return ret;
++}
++
++/*
++ * Legacy execbuffer just creates an exec2 list from the original exec object
++ * list array and passes it to the real function.
++ */
++int
++i915_gem_execbuffer(struct drm_device *dev, void *data,
++		    struct drm_file *file_priv)
++{
++	struct drm_i915_gem_execbuffer *args = data;
++	struct drm_i915_gem_execbuffer2 exec2;
++	struct drm_i915_gem_exec_object *exec_list = NULL;
++	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
++	int ret, i;
++
++#if WATCH_EXEC
++	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
++		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
++#endif
++
++	if (args->buffer_count < 1) {
++		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
++		return -EINVAL;
++	}
++
++	/* Copy in the exec list from userland */
++	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
++	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
++	if (exec_list == NULL || exec2_list == NULL) {
++		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
++			  args->buffer_count);
++		drm_free_large(exec_list);
++		drm_free_large(exec2_list);
++		return -ENOMEM;
++	}
++	ret = copy_from_user(exec_list,
++			     (struct drm_i915_relocation_entry __user *)
++			     (uintptr_t) args->buffers_ptr,
++			     sizeof(*exec_list) * args->buffer_count);
++	if (ret != 0) {
++		DRM_ERROR("copy %d exec entries failed %d\n",
++			  args->buffer_count, ret);
++		drm_free_large(exec_list);
++		drm_free_large(exec2_list);
++		return -EFAULT;
++	}
++
++	for (i = 0; i < args->buffer_count; i++) {
++		exec2_list[i].handle = exec_list[i].handle;
++		exec2_list[i].relocation_count = exec_list[i].relocation_count;
++		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
++		exec2_list[i].alignment = exec_list[i].alignment;
++		exec2_list[i].offset = exec_list[i].offset;
++		if (!IS_I965G(dev))
++			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
++		else
++			exec2_list[i].flags = 0;
++	}
++
++	exec2.buffers_ptr = args->buffers_ptr;
++	exec2.buffer_count = args->buffer_count;
++	exec2.batch_start_offset = args->batch_start_offset;
++	exec2.batch_len = args->batch_len;
++	exec2.DR1 = args->DR1;
++	exec2.DR4 = args->DR4;
++	exec2.num_cliprects = args->num_cliprects;
++	exec2.cliprects_ptr = args->cliprects_ptr;
++	exec2.flags = 0;
++
++	ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
+ 	if (!ret) {
+ 		/* Copy the new buffer offsets back to the user's exec list. */
++		for (i = 0; i < args->buffer_count; i++)
++			exec_list[i].offset = exec2_list[i].offset;
++		/* ... and back out to userspace */
+ 		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ 				   (uintptr_t) args->buffers_ptr,
+ 				   exec_list,
+@@ -3900,25 +4055,62 @@ err:
+ 		}
+ 	}
+ 
+-	/* Copy the updated relocations out regardless of current error
+-	 * state.  Failure to update the relocs would mean that the next
+-	 * time userland calls execbuf, it would do so with presumed offset
+-	 * state that didn't match the actual object state.
+-	 */
+-	ret2 = i915_gem_put_relocs_to_user(exec_list, args->buffer_count,
+-					   relocs);
+-	if (ret2 != 0) {
+-		DRM_ERROR("Failed to copy relocations back out: %d\n", ret2);
++	drm_free_large(exec_list);
++	drm_free_large(exec2_list);
++	return ret;
++}
+ 
+-		if (ret == 0)
+-			ret = ret2;
++int
++i915_gem_execbuffer2(struct drm_device *dev, void *data,
++		     struct drm_file *file_priv)
++{
++	struct drm_i915_gem_execbuffer2 *args = data;
++	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
++	int ret;
++
++#if WATCH_EXEC
++	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
++		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
++#endif
++
++	if (args->buffer_count < 1) {
++		DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
++		return -EINVAL;
+ 	}
+ 
+-pre_mutex_err:
+-	drm_free_large(object_list);
+-	drm_free_large(exec_list);
+-	kfree(cliprects);
++	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
++	if (exec2_list == NULL) {
++		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
++			  args->buffer_count);
++		return -ENOMEM;
++	}
++	ret = copy_from_user(exec2_list,
++			     (struct drm_i915_relocation_entry __user *)
++			     (uintptr_t) args->buffers_ptr,
++			     sizeof(*exec2_list) * args->buffer_count);
++	if (ret != 0) {
++		DRM_ERROR("copy %d exec entries failed %d\n",
++			  args->buffer_count, ret);
++		drm_free_large(exec2_list);
++		return -EFAULT;
++	}
++
++	ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
++	if (!ret) {
++		/* Copy the new buffer offsets back to the user's exec list. */
++		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
++				   (uintptr_t) args->buffers_ptr,
++				   exec2_list,
++				   sizeof(*exec2_list) * args->buffer_count);
++		if (ret) {
++			ret = -EFAULT;
++			DRM_ERROR("failed to copy %d exec entries "
++				  "back to user (%d)\n",
++				  args->buffer_count, ret);
++		}
++	}
+ 
++	drm_free_large(exec2_list);
+ 	return ret;
+ }
+ 
+@@ -3935,19 +4127,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+ 		if (ret)
+ 			return ret;
+ 	}
+-	/*
+-	 * Pre-965 chips need a fence register set up in order to
+-	 * properly handle tiled surfaces.
+-	 */
+-	if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
+-		ret = i915_gem_object_get_fence_reg(obj);
+-		if (ret != 0) {
+-			if (ret != -EBUSY && ret != -ERESTARTSYS)
+-				DRM_ERROR("Failure to install fence: %d\n",
+-					  ret);
+-			return ret;
+-		}
+-	}
++
+ 	obj_priv->pin_count++;
+ 
+ 	/* If the object is not active and not pending a flush,
+@@ -4429,7 +4609,7 @@ i915_gem_init_hws(struct drm_device *dev)
+ 	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+ 	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ 	I915_READ(HWS_PGA); /* posting read */
+-	DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
++	DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+ 
+ 	return 0;
+ }
+@@ -4688,8 +4868,8 @@ i915_gem_load(struct drm_device *dev)
+ 			for (i = 0; i < 8; i++)
+ 				I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
+ 	}
+-
+ 	i915_gem_detect_bit_6_swizzle(dev);
++	init_waitqueue_head(&dev_priv->pending_flip_queue);
+ }
+ 
+ /*
+@@ -4864,7 +5044,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+ 	user_data = (char __user *) (uintptr_t) args->data_ptr;
+ 	obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
+ 
+-	DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
++	DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
+ 	ret = copy_from_user(obj_addr, user_data, args->size);
+ 	if (ret)
+ 		return -EFAULT;
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
+index 200e398..df278b2 100644
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -121,7 +121,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
+ 				     0,   pcibios_align_resource,
+ 				     dev_priv->bridge_dev);
+ 	if (ret) {
+-		DRM_DEBUG("failed bus alloc: %d\n", ret);
++		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+ 		dev_priv->mch_res.start = 0;
+ 		goto out;
+ 	}
+@@ -209,8 +209,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+ 	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ 	bool need_disable;
+ 
+-	if (IS_IGDNG(dev)) {
+-		/* On IGDNG whatever DRAM config, GPU always do
++	if (IS_IRONLAKE(dev)) {
++		/* On Ironlake whatever DRAM config, GPU always do
+ 		 * same swizzling setup.
+ 		 */
+ 		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+@@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+ 
+ 
+ /**
+- * Returns the size of the fence for a tiled object of the given size.
++ * Returns whether an object is currently fenceable.  If not, it may need
++ * to be unbound and have its pitch adjusted.
+  */
+-static int
+-i915_get_fence_size(struct drm_device *dev, int size)
++bool
++i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
+ {
+-	int i;
+-	int start;
++	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ 
+ 	if (IS_I965G(dev)) {
+ 		/* The 965 can have fences at any page boundary. */
+-		return ALIGN(size, 4096);
++		if (obj->size & 4095)
++			return false;
++		return true;
++	} else if (IS_I9XX(dev)) {
++		if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
++			return false;
+ 	} else {
+-		/* Align the size to a power of two greater than the smallest
+-		 * fence size.
+-		 */
+-		if (IS_I9XX(dev))
+-			start = 1024 * 1024;
+-		else
+-			start = 512 * 1024;
++		if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
++			return false;
++	}
+ 
+-		for (i = start; i < size; i <<= 1)
+-			;
++	/* Power of two sized... */
++	if (obj->size & (obj->size - 1))
++		return false;
+ 
+-		return i;
+-	}
++	/* Objects must be size aligned as well */
++	if (obj_priv->gtt_offset & (obj->size - 1))
++		return false;
++	return true;
+ }
+ 
+ /* Check pitch constriants for all chips & tiling formats */
+-static bool
++bool
+ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+ {
+ 	int tile_width;
+@@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
+ 	if (stride & (stride - 1))
+ 		return false;
+ 
+-	/* We don't 0handle the aperture area covered by the fence being bigger
+-	 * than the object size.
+-	 */
+-	if (i915_get_fence_size(dev, size) != size)
+-		return false;
+-
+ 	return true;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
+index 1fe68a2..13b0289 100644
+--- a/drivers/gpu/drm/i915/i915_ioc32.c
++++ b/drivers/gpu/drm/i915/i915_ioc32.c
+@@ -66,8 +66,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
+ 			  &batchbuffer->cliprects))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_I915_BATCHBUFFER,
++	return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
+ 			 (unsigned long)batchbuffer);
+ }
+ 
+@@ -102,8 +101,8 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
+ 			  &cmdbuffer->cliprects))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer);
++	return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
++			 (unsigned long)cmdbuffer);
+ }
+ 
+ typedef struct drm_i915_irq_emit32 {
+@@ -125,8 +124,8 @@ static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
+ 			  &request->irq_seq))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
++			 (unsigned long)request);
+ }
+ typedef struct drm_i915_getparam32 {
+ 	int param;
+@@ -149,8 +148,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
+ 			  &request->value))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_I915_GETPARAM, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
++			 (unsigned long)request);
+ }
+ 
+ typedef struct drm_i915_mem_alloc32 {
+@@ -178,8 +177,8 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
+ 			  &request->region_offset))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_I915_ALLOC, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
++			 (unsigned long)request);
+ }
+ 
+ drm_ioctl_compat_t *i915_compat_ioctls[] = {
+@@ -211,12 +210,10 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
+ 		fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
+ 
+-	lock_kernel();		/* XXX for now */
+ 	if (fn != NULL)
+ 		ret = (*fn) (filp, cmd, arg);
+ 	else
+-		ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+-	unlock_kernel();
++		ret = drm_ioctl(filp, cmd, arg);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 63f28ad..a17d6bd 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -43,10 +43,13 @@
+  * we leave them always unmasked in IMR and then control enabling them through
+  * PIPESTAT alone.
+  */
+-#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT |		 \
+-				   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+-				   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
+-				   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
++#define I915_INTERRUPT_ENABLE_FIX			\
++	(I915_ASLE_INTERRUPT |				\
++	 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |		\
++	 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |		\
++	 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |	\
++	 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |	\
++	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+ 
+ /** Interrupts that we mask and unmask at runtime. */
+ #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
+@@ -61,7 +64,7 @@
+ 					 DRM_I915_VBLANK_PIPE_B)
+ 
+ void
+-igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
++ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ {
+ 	if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
+ 		dev_priv->gt_irq_mask_reg &= ~mask;
+@@ -71,7 +74,7 @@ igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ }
+ 
+ static inline void
+-igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
++ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ {
+ 	if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
+ 		dev_priv->gt_irq_mask_reg |= mask;
+@@ -82,7 +85,7 @@ igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ 
+ /* For display hotplug interrupt */
+ void
+-igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
++ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ {
+ 	if ((dev_priv->irq_mask_reg & mask) != 0) {
+ 		dev_priv->irq_mask_reg &= ~mask;
+@@ -92,7 +95,7 @@ igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ }
+ 
+ static inline void
+-igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
++ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ {
+ 	if ((dev_priv->irq_mask_reg & mask) != mask) {
+ 		dev_priv->irq_mask_reg |= mask;
+@@ -157,6 +160,20 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
+ }
+ 
+ /**
++ * intel_enable_asle - enable ASLE interrupt for OpRegion
++ */
++void intel_enable_asle (struct drm_device *dev)
++{
++	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
++
++	if (IS_IRONLAKE(dev))
++		ironlake_enable_display_irq(dev_priv, DE_GSE);
++	else
++		i915_enable_pipestat(dev_priv, 1,
++				     I915_LEGACY_BLC_EVENT_ENABLE);
++}
++
++/**
+  * i915_pipe_enabled - check if a pipe is enabled
+  * @dev: DRM device
+  * @pipe: pipe to check
+@@ -191,7 +208,8 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+ 	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+ 
+ 	if (!i915_pipe_enabled(dev, pipe)) {
+-		DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
++		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
++				"pipe %d\n", pipe);
+ 		return 0;
+ 	}
+ 
+@@ -220,7 +238,8 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+ 	int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
+ 
+ 	if (!i915_pipe_enabled(dev, pipe)) {
+-		DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
++		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
++					"pipe %d\n", pipe);
+ 		return 0;
+ 	}
+ 
+@@ -250,11 +269,11 @@ static void i915_hotplug_work_func(struct work_struct *work)
+ 	drm_sysfs_hotplug_event(dev);
+ }
+ 
+-irqreturn_t igdng_irq_handler(struct drm_device *dev)
++irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ 	int ret = IRQ_NONE;
+-	u32 de_iir, gt_iir, de_ier;
++	u32 de_iir, gt_iir, de_ier, pch_iir;
+ 	struct drm_i915_master_private *master_priv;
+ 
+ 	/* disable master interrupt before clearing iir  */
+@@ -264,8 +283,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
+ 
+ 	de_iir = I915_READ(DEIIR);
+ 	gt_iir = I915_READ(GTIIR);
++	pch_iir = I915_READ(SDEIIR);
+ 
+-	if (de_iir == 0 && gt_iir == 0)
++	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+ 		goto done;
+ 
+ 	ret = IRQ_HANDLED;
+@@ -286,6 +306,33 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
+ 		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ 	}
+ 
++	if (de_iir & DE_GSE)
++		ironlake_opregion_gse_intr(dev);
++
++	if (de_iir & DE_PLANEA_FLIP_DONE) {
++		intel_prepare_page_flip(dev, 0);
++		intel_finish_page_flip(dev, 0);
++	}
++
++	if (de_iir & DE_PLANEB_FLIP_DONE) {
++		intel_prepare_page_flip(dev, 1);
++		intel_finish_page_flip(dev, 1);
++	}
++
++	if (de_iir & DE_PIPEA_VBLANK)
++		drm_handle_vblank(dev, 0);
++
++	if (de_iir & DE_PIPEB_VBLANK)
++		drm_handle_vblank(dev, 1);
++
++	/* check event from PCH */
++	if ((de_iir & DE_PCH_EVENT) &&
++	    (pch_iir & SDE_HOTPLUG_MASK)) {
++		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
++	}
++
++	/* should clear PCH hotplug event before clear CPU irq */
++	I915_WRITE(SDEIIR, pch_iir);
+ 	I915_WRITE(GTIIR, gt_iir);
+ 	I915_WRITE(DEIIR, de_iir);
+ 
+@@ -312,19 +359,19 @@ static void i915_error_work_func(struct work_struct *work)
+ 	char *reset_event[] = { "RESET=1", NULL };
+ 	char *reset_done_event[] = { "ERROR=0", NULL };
+ 
+-	DRM_DEBUG("generating error event\n");
++	DRM_DEBUG_DRIVER("generating error event\n");
+ 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+ 
+ 	if (atomic_read(&dev_priv->mm.wedged)) {
+ 		if (IS_I965G(dev)) {
+-			DRM_DEBUG("resetting chip\n");
++			DRM_DEBUG_DRIVER("resetting chip\n");
+ 			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+ 			if (!i965_reset(dev, GDRST_RENDER)) {
+ 				atomic_set(&dev_priv->mm.wedged, 0);
+ 				kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
+ 			}
+ 		} else {
+-			printk("reboot required\n");
++			DRM_DEBUG_DRIVER("reboot required\n");
+ 		}
+ 	}
+ }
+@@ -350,7 +397,7 @@ static void i915_capture_error_state(struct drm_device *dev)
+ 
+ 	error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ 	if (!error) {
+-		DRM_DEBUG("out ot memory, not capturing error state\n");
++		DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
+ 		goto out;
+ 	}
+ 
+@@ -507,7 +554,6 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
+ 		/*
+ 		 * Wakeup waiting processes so they don't hang
+ 		 */
+-		printk("i915: Waking up sleeping processes\n");
+ 		DRM_WAKEUP(&dev_priv->irq_queue);
+ 	}
+ 
+@@ -530,8 +576,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ 
+ 	atomic_inc(&dev_priv->irq_received);
+ 
+-	if (IS_IGDNG(dev))
+-		return igdng_irq_handler(dev);
++	if (IS_IRONLAKE(dev))
++		return ironlake_irq_handler(dev);
+ 
+ 	iir = I915_READ(IIR);
+ 
+@@ -563,14 +609,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ 		 */
+ 		if (pipea_stats & 0x8000ffff) {
+ 			if (pipea_stats &  PIPE_FIFO_UNDERRUN_STATUS)
+-				DRM_DEBUG("pipe a underrun\n");
++				DRM_DEBUG_DRIVER("pipe a underrun\n");
+ 			I915_WRITE(PIPEASTAT, pipea_stats);
+ 			irq_received = 1;
+ 		}
+ 
+ 		if (pipeb_stats & 0x8000ffff) {
+ 			if (pipeb_stats &  PIPE_FIFO_UNDERRUN_STATUS)
+-				DRM_DEBUG("pipe b underrun\n");
++				DRM_DEBUG_DRIVER("pipe b underrun\n");
+ 			I915_WRITE(PIPEBSTAT, pipeb_stats);
+ 			irq_received = 1;
+ 		}
+@@ -586,7 +632,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ 		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
+ 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+ 
+-			DRM_DEBUG("hotplug event received, stat 0x%08x\n",
++			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
+ 				  hotplug_status);
+ 			if (hotplug_status & dev_priv->hotplug_supported_mask)
+ 				queue_work(dev_priv->wq,
+@@ -594,27 +640,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ 
+ 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ 			I915_READ(PORT_HOTPLUG_STAT);
+-
+-			/* EOS interrupts occurs */
+-			if (IS_IGD(dev) &&
+-				(hotplug_status & CRT_EOS_INT_STATUS)) {
+-				u32 temp;
+-
+-				DRM_DEBUG("EOS interrupt occurs\n");
+-				/* status is already cleared */
+-				temp = I915_READ(ADPA);
+-				temp &= ~ADPA_DAC_ENABLE;
+-				I915_WRITE(ADPA, temp);
+-
+-				temp = I915_READ(PORT_HOTPLUG_EN);
+-				temp &= ~CRT_EOS_INT_EN;
+-				I915_WRITE(PORT_HOTPLUG_EN, temp);
+-
+-				temp = I915_READ(PORT_HOTPLUG_STAT);
+-				if (temp & CRT_EOS_INT_STATUS)
+-					I915_WRITE(PORT_HOTPLUG_STAT,
+-						CRT_EOS_INT_STATUS);
+-			}
+ 		}
+ 
+ 		I915_WRITE(IIR, iir);
+@@ -636,14 +661,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+ 			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+ 		}
+ 
++		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
++			intel_prepare_page_flip(dev, 0);
++
++		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
++			intel_prepare_page_flip(dev, 1);
++
+ 		if (pipea_stats & vblank_status) {
+ 			vblank++;
+ 			drm_handle_vblank(dev, 0);
++			intel_finish_page_flip(dev, 0);
+ 		}
+ 
+ 		if (pipeb_stats & vblank_status) {
+ 			vblank++;
+ 			drm_handle_vblank(dev, 1);
++			intel_finish_page_flip(dev, 1);
+ 		}
+ 
+ 		if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+@@ -679,7 +712,7 @@ static int i915_emit_irq(struct drm_device * dev)
+ 
+ 	i915_kernel_lost_context(dev);
+ 
+-	DRM_DEBUG("\n");
++	DRM_DEBUG_DRIVER("\n");
+ 
+ 	dev_priv->counter++;
+ 	if (dev_priv->counter > 0x7FFFFFFFUL)
+@@ -704,8 +737,8 @@ void i915_user_irq_get(struct drm_device *dev)
+ 
+ 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ 	if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
+-		if (IS_IGDNG(dev))
+-			igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
++		if (IS_IRONLAKE(dev))
++			ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ 		else
+ 			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ 	}
+@@ -720,8 +753,8 @@ void i915_user_irq_put(struct drm_device *dev)
+ 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ 	BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
+ 	if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
+-		if (IS_IGDNG(dev))
+-			igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
++		if (IS_IRONLAKE(dev))
++			ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+ 		else
+ 			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ 	}
+@@ -744,7 +777,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+ 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ 	int ret = 0;
+ 
+-	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
++	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
+ 		  READ_BREADCRUMB(dev_priv));
+ 
+ 	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+@@ -827,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
+ 	if (!(pipeconf & PIPEACONF_ENABLE))
+ 		return -EINVAL;
+ 
+-	if (IS_IGDNG(dev))
+-		return 0;
+-
+ 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+-	if (IS_I965G(dev))
++	if (IS_IRONLAKE(dev))
++		ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 
++					    DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
++	else if (IS_I965G(dev))
+ 		i915_enable_pipestat(dev_priv, pipe,
+ 				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ 	else
+@@ -849,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ 	unsigned long irqflags;
+ 
+-	if (IS_IGDNG(dev))
+-		return;
+-
+ 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+-	i915_disable_pipestat(dev_priv, pipe,
+-			      PIPE_VBLANK_INTERRUPT_ENABLE |
+-			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
++	if (IS_IRONLAKE(dev))
++		ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 
++					     DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
++	else
++		i915_disable_pipestat(dev_priv, pipe,
++				      PIPE_VBLANK_INTERRUPT_ENABLE |
++				      PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ }
+ 
+@@ -863,7 +897,7 @@ void i915_enable_interrupt (struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 
+-	if (!IS_IGDNG(dev))
++	if (!IS_IRONLAKE(dev))
+ 		opregion_enable_asle(dev);
+ 	dev_priv->irq_enabled = 1;
+ }
+@@ -971,7 +1005,7 @@ void i915_hangcheck_elapsed(unsigned long data)
+ 
+ /* drm_dma.h hooks
+ */
+-static void igdng_irq_preinstall(struct drm_device *dev)
++static void ironlake_irq_preinstall(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ 
+@@ -987,17 +1021,25 @@ static void igdng_irq_preinstall(struct drm_device *dev)
+ 	I915_WRITE(GTIMR, 0xffffffff);
+ 	I915_WRITE(GTIER, 0x0);
+ 	(void) I915_READ(GTIER);
++
++	/* south display irq */
++	I915_WRITE(SDEIMR, 0xffffffff);
++	I915_WRITE(SDEIER, 0x0);
++	(void) I915_READ(SDEIER);
+ }
+ 
+-static int igdng_irq_postinstall(struct drm_device *dev)
++static int ironlake_irq_postinstall(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ 	/* enable kind of interrupts always enabled */
+-	u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
++	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
++			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
+ 	u32 render_mask = GT_USER_INTERRUPT;
++	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
++			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+ 
+ 	dev_priv->irq_mask_reg = ~display_mask;
+-	dev_priv->de_irq_enable_reg = display_mask;
++	dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
+ 
+ 	/* should always can generate irq */
+ 	I915_WRITE(DEIIR, I915_READ(DEIIR));
+@@ -1014,6 +1056,14 @@ static int igdng_irq_postinstall(struct drm_device *dev)
+ 	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
+ 	(void) I915_READ(GTIER);
+ 
++	dev_priv->pch_irq_mask_reg = ~hotplug_mask;
++	dev_priv->pch_irq_enable_reg = hotplug_mask;
++
++	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
++	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
++	I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
++	(void) I915_READ(SDEIER);
++
+ 	return 0;
+ }
+ 
+@@ -1026,8 +1076,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
+ 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ 	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
+ 
+-	if (IS_IGDNG(dev)) {
+-		igdng_irq_preinstall(dev);
++	if (IS_IRONLAKE(dev)) {
++		ironlake_irq_preinstall(dev);
+ 		return;
+ 	}
+ 
+@@ -1058,8 +1108,8 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
+ 
+ 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+ 
+-	if (IS_IGDNG(dev))
+-		return igdng_irq_postinstall(dev);
++	if (IS_IRONLAKE(dev))
++		return ironlake_irq_postinstall(dev);
+ 
+ 	/* Unmask the interrupts that we always want on. */
+ 	dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
+@@ -1123,7 +1173,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
+ 	return 0;
+ }
+ 
+-static void igdng_irq_uninstall(struct drm_device *dev)
++static void ironlake_irq_uninstall(struct drm_device *dev)
+ {
+ 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ 	I915_WRITE(HWSTAM, 0xffffffff);
+@@ -1146,8 +1196,8 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
+ 
+ 	dev_priv->vblank_pipe = 0;
+ 
+-	if (IS_IGDNG(dev)) {
+-		igdng_irq_uninstall(dev);
++	if (IS_IRONLAKE(dev)) {
++		ironlake_irq_uninstall(dev);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
+index 2d51935..7cc8410 100644
+--- a/drivers/gpu/drm/i915/i915_opregion.c
++++ b/drivers/gpu/drm/i915/i915_opregion.c
+@@ -118,6 +118,10 @@ struct opregion_asle {
+ #define ASLE_BACKLIGHT_FAIL    (2<<12)
+ #define ASLE_PFIT_FAIL         (2<<14)
+ #define ASLE_PWM_FREQ_FAIL     (2<<16)
++#define ASLE_ALS_ILLUM_FAILED	(1<<10)
++#define ASLE_BACKLIGHT_FAILED	(1<<12)
++#define ASLE_PFIT_FAILED	(1<<14)
++#define ASLE_PWM_FREQ_FAILED	(1<<16)
+ 
+ /* ASLE backlight brightness to set */
+ #define ASLE_BCLP_VALID                (1<<31)
+@@ -163,7 +167,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+ 	if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
+ 		pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
+ 	else {
+-		if (IS_IGD(dev)) {
++		if (IS_PINEVIEW(dev)) {
+ 			blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ 			max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> 
+ 					BACKLIGHT_MODULATION_FREQ_SHIFT;
+@@ -224,7 +228,7 @@ void opregion_asle_intr(struct drm_device *dev)
+ 	asle_req = asle->aslc & ASLE_REQ_MSK;
+ 
+ 	if (!asle_req) {
+-		DRM_DEBUG("non asle set request??\n");
++		DRM_DEBUG_DRIVER("non asle set request??\n");
+ 		return;
+ 	}
+ 
+@@ -243,6 +247,73 @@ void opregion_asle_intr(struct drm_device *dev)
+ 	asle->aslc = asle_stat;
+ }
+ 
++static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct opregion_asle *asle = dev_priv->opregion.asle;
++	u32 cpu_pwm_ctl, pch_pwm_ctl2;
++	u32 max_backlight, level;
++
++	if (!(bclp & ASLE_BCLP_VALID))
++		return ASLE_BACKLIGHT_FAILED;
++
++	bclp &= ASLE_BCLP_MSK;
++	if (bclp < 0 || bclp > 255)
++		return ASLE_BACKLIGHT_FAILED;
++
++	cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
++	pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
++	/* get the max PWM frequency */
++	max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
++	/* calculate the expected PMW frequency */
++	level = (bclp * max_backlight) / 255;
++	/* reserve the high 16 bits */
++	cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
++	/* write the updated PWM frequency */
++	I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
++
++	asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
++
++	return 0;
++}
++
++void ironlake_opregion_gse_intr(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct opregion_asle *asle = dev_priv->opregion.asle;
++	u32 asle_stat = 0;
++	u32 asle_req;
++
++	if (!asle)
++		return;
++
++	asle_req = asle->aslc & ASLE_REQ_MSK;
++
++	if (!asle_req) {
++		DRM_DEBUG_DRIVER("non asle set request??\n");
++		return;
++	}
++
++	if (asle_req & ASLE_SET_ALS_ILLUM) {
++		DRM_DEBUG_DRIVER("Illum is not supported\n");
++		asle_stat |= ASLE_ALS_ILLUM_FAILED;
++	}
++
++	if (asle_req & ASLE_SET_BACKLIGHT)
++		asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
++
++	if (asle_req & ASLE_SET_PFIT) {
++		DRM_DEBUG_DRIVER("Pfit is not supported\n");
++		asle_stat |= ASLE_PFIT_FAILED;
++	}
++
++	if (asle_req & ASLE_SET_PWM_FREQ) {
++		DRM_DEBUG_DRIVER("PWM freq is not supported\n");
++		asle_stat |= ASLE_PWM_FREQ_FAILED;
++	}
++
++	asle->aslc = asle_stat;
++}
+ #define ASLE_ALS_EN    (1<<0)
+ #define ASLE_BLC_EN    (1<<1)
+ #define ASLE_PFIT_EN   (1<<2)
+@@ -258,8 +329,7 @@ void opregion_enable_asle(struct drm_device *dev)
+ 			unsigned long irqflags;
+ 
+ 			spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+-			i915_enable_pipestat(dev_priv, 1,
+-					     I915_LEGACY_BLC_EVENT_ENABLE);
++			intel_enable_asle(dev);
+ 			spin_unlock_irqrestore(&dev_priv->user_irq_lock,
+ 					       irqflags);
+ 		}
+@@ -361,9 +431,9 @@ int intel_opregion_init(struct drm_device *dev, int resume)
+ 	int err = 0;
+ 
+ 	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
+-	DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
++	DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
+ 	if (asls == 0) {
+-		DRM_DEBUG("ACPI OpRegion not supported!\n");
++		DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
+ 		return -ENOTSUPP;
+ 	}
+ 
+@@ -373,30 +443,30 @@ int intel_opregion_init(struct drm_device *dev, int resume)
+ 
+ 	opregion->header = base;
+ 	if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
+-		DRM_DEBUG("opregion signature mismatch\n");
++		DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+ 		err = -EINVAL;
+ 		goto err_out;
+ 	}
+ 
+ 	mboxes = opregion->header->mboxes;
+ 	if (mboxes & MBOX_ACPI) {
+-		DRM_DEBUG("Public ACPI methods supported\n");
++		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+ 		opregion->acpi = base + OPREGION_ACPI_OFFSET;
+ 		if (drm_core_check_feature(dev, DRIVER_MODESET))
+ 			intel_didl_outputs(dev);
+ 	} else {
+-		DRM_DEBUG("Public ACPI methods not supported\n");
++		DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
+ 		err = -ENOTSUPP;
+ 		goto err_out;
+ 	}
+ 	opregion->enabled = 1;
+ 
+ 	if (mboxes & MBOX_SWSCI) {
+-		DRM_DEBUG("SWSCI supported\n");
++		DRM_DEBUG_DRIVER("SWSCI supported\n");
+ 		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+ 	}
+ 	if (mboxes & MBOX_ASLE) {
+-		DRM_DEBUG("ASLE supported\n");
++		DRM_DEBUG_DRIVER("ASLE supported\n");
+ 		opregion->asle = base + OPREGION_ASLE_OFFSET;
+ 		opregion_enable_asle(dev);
+ 	}
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index cc9b49a..ab1bd2d 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -140,6 +140,7 @@
+ #define MI_NOOP			MI_INSTR(0, 0)
+ #define MI_USER_INTERRUPT	MI_INSTR(0x02, 0)
+ #define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
++#define   MI_WAIT_FOR_OVERLAY_FLIP	(1<<16)
+ #define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
+ #define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
+ #define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+@@ -151,7 +152,13 @@
+ #define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
+ #define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
+ #define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
++#define MI_OVERLAY_FLIP		MI_INSTR(0x11,0)
++#define   MI_OVERLAY_CONTINUE	(0x0<<21)
++#define   MI_OVERLAY_ON		(0x1<<21)
++#define   MI_OVERLAY_OFF	(0x2<<21)
+ #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
++#define MI_DISPLAY_FLIP		MI_INSTR(0x14, 2)
++#define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+ #define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
+ #define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
+ #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
+@@ -260,6 +267,8 @@
+ #define HWS_PGA		0x02080
+ #define HWS_ADDRESS_MASK	0xfffff000
+ #define HWS_START_ADDRESS_SHIFT	4
++#define PWRCTXA		0x2088 /* 965GM+ only */
++#define   PWRCTX_EN	(1<<0)
+ #define IPEIR		0x02088
+ #define IPEHR		0x0208c
+ #define INSTDONE	0x02090
+@@ -443,7 +452,7 @@
+ #define   DPLLB_LVDS_P2_CLOCK_DIV_7	(1 << 24) /* i915 */
+ #define   DPLL_P2_CLOCK_DIV_MASK	0x03000000 /* i915 */
+ #define   DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000 /* i915 */
+-#define   DPLL_FPA01_P1_POST_DIV_MASK_IGD	0x00ff8000 /* IGD */
++#define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW	0x00ff8000 /* Pineview */
+ 
+ #define I915_FIFO_UNDERRUN_STATUS		(1UL<<31)
+ #define I915_CRC_ERROR_ENABLE			(1UL<<29)
+@@ -520,7 +529,7 @@
+  */
+ #define   DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
+ #define   DPLL_FPA01_P1_POST_DIV_SHIFT	16
+-#define   DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
++#define   DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
+ /* i830, required in DVO non-gang */
+ #define   PLL_P2_DIVIDE_BY_4		(1 << 23)
+ #define   PLL_P1_DIVIDE_BY_TWO		(1 << 21) /* i830 */
+@@ -530,7 +539,7 @@
+ #define   PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+ #define   PLL_REF_INPUT_MASK		(3 << 13)
+ #define   PLL_LOAD_PULSE_PHASE_SHIFT		9
+-/* IGDNG */
++/* Ironlake */
+ # define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT     9
+ # define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK      (7 << 9)
+ # define PLL_REF_SDVO_HDMI_MULTIPLIER(x)	(((x)-1) << 9)
+@@ -594,12 +603,12 @@
+ #define FPB0	0x06048
+ #define FPB1	0x0604c
+ #define   FP_N_DIV_MASK		0x003f0000
+-#define   FP_N_IGD_DIV_MASK	0x00ff0000
++#define   FP_N_PINEVIEW_DIV_MASK	0x00ff0000
+ #define   FP_N_DIV_SHIFT		16
+ #define   FP_M1_DIV_MASK	0x00003f00
+ #define   FP_M1_DIV_SHIFT		 8
+ #define   FP_M2_DIV_MASK	0x0000003f
+-#define   FP_M2_IGD_DIV_MASK	0x000000ff
++#define   FP_M2_PINEVIEW_DIV_MASK	0x000000ff
+ #define   FP_M2_DIV_SHIFT		 0
+ #define DPLL_TEST	0x606c
+ #define   DPLLB_TEST_SDVO_DIV_1		(0 << 22)
+@@ -777,7 +786,8 @@
+ 
+ /** GM965 GM45 render standby register */
+ #define MCHBAR_RENDER_STANDBY	0x111B8
+-
++#define   RCX_SW_EXIT		(1<<23)
++#define   RSX_STATUS_MASK	0x00700000
+ #define PEG_BAND_GAP_DATA	0x14d68
+ 
+ /*
+@@ -852,7 +862,6 @@
+ #define   SDVOB_HOTPLUG_INT_EN			(1 << 26)
+ #define   SDVOC_HOTPLUG_INT_EN			(1 << 25)
+ #define   TV_HOTPLUG_INT_EN			(1 << 18)
+-#define   CRT_EOS_INT_EN			(1 << 10)
+ #define   CRT_HOTPLUG_INT_EN			(1 << 9)
+ #define   CRT_HOTPLUG_FORCE_DETECT		(1 << 3)
+ #define CRT_HOTPLUG_ACTIVATION_PERIOD_32	(0 << 8)
+@@ -879,7 +888,6 @@
+ #define   DPC_HOTPLUG_INT_STATUS		(1 << 28)
+ #define   HDMID_HOTPLUG_INT_STATUS		(1 << 27)
+ #define   DPD_HOTPLUG_INT_STATUS		(1 << 27)
+-#define   CRT_EOS_INT_STATUS			(1 << 12)
+ #define   CRT_HOTPLUG_INT_STATUS		(1 << 11)
+ #define   TV_HOTPLUG_INT_STATUS			(1 << 10)
+ #define   CRT_HOTPLUG_MONITOR_MASK		(3 << 8)
+@@ -1622,7 +1630,7 @@
+ #define   DP_CLOCK_OUTPUT_ENABLE	(1 << 13)
+ 
+ #define   DP_SCRAMBLING_DISABLE		(1 << 12)
+-#define   DP_SCRAMBLING_DISABLE_IGDNG	(1 << 7)
++#define   DP_SCRAMBLING_DISABLE_IRONLAKE	(1 << 7)
+ 
+ /** limit RGB values to avoid confusing TVs */
+ #define   DP_COLOR_RANGE_16_235		(1 << 8)
+@@ -1808,11 +1816,11 @@
+ #define   DSPFW_PLANEB_SHIFT	8
+ #define DSPFW2			0x70038
+ #define   DSPFW_CURSORA_MASK	0x00003f00
+-#define   DSPFW_CURSORA_SHIFT	16
++#define   DSPFW_CURSORA_SHIFT	8
+ #define DSPFW3			0x7003c
+ #define   DSPFW_HPLL_SR_EN	(1<<31)
+ #define   DSPFW_CURSOR_SR_SHIFT	24
+-#define   IGD_SELF_REFRESH_EN	(1<<30)
++#define   PINEVIEW_SELF_REFRESH_EN	(1<<30)
+ 
+ /* FIFO watermark sizes etc */
+ #define G4X_FIFO_LINE_SIZE	64
+@@ -1828,16 +1836,16 @@
+ #define G4X_MAX_WM		0x3f
+ #define I915_MAX_WM		0x3f
+ 
+-#define IGD_DISPLAY_FIFO	512 /* in 64byte unit */
+-#define IGD_FIFO_LINE_SIZE	64
+-#define IGD_MAX_WM		0x1ff
+-#define IGD_DFT_WM		0x3f
+-#define IGD_DFT_HPLLOFF_WM	0
+-#define IGD_GUARD_WM		10
+-#define IGD_CURSOR_FIFO		64
+-#define IGD_CURSOR_MAX_WM	0x3f
+-#define IGD_CURSOR_DFT_WM	0
+-#define IGD_CURSOR_GUARD_WM	5
++#define PINEVIEW_DISPLAY_FIFO	512 /* in 64byte unit */
++#define PINEVIEW_FIFO_LINE_SIZE	64
++#define PINEVIEW_MAX_WM		0x1ff
++#define PINEVIEW_DFT_WM		0x3f
++#define PINEVIEW_DFT_HPLLOFF_WM	0
++#define PINEVIEW_GUARD_WM		10
++#define PINEVIEW_CURSOR_FIFO		64
++#define PINEVIEW_CURSOR_MAX_WM	0x3f
++#define PINEVIEW_CURSOR_DFT_WM	0
++#define PINEVIEW_CURSOR_GUARD_WM	5
+ 
+ /*
+  * The two pipe frame counter registers are not synchronized, so
+@@ -1911,6 +1919,7 @@
+ #define   DISPPLANE_16BPP			(0x5<<26)
+ #define   DISPPLANE_32BPP_NO_ALPHA		(0x6<<26)
+ #define   DISPPLANE_32BPP			(0x7<<26)
++#define   DISPPLANE_32BPP_30BIT_NO_ALPHA	(0xa<<26)
+ #define   DISPPLANE_STEREO_ENABLE		(1<<25)
+ #define   DISPPLANE_STEREO_DISABLE		0
+ #define   DISPPLANE_SEL_PIPE_MASK		(1<<24)
+@@ -1922,7 +1931,7 @@
+ #define   DISPPLANE_NO_LINE_DOUBLE		0
+ #define   DISPPLANE_STEREO_POLARITY_FIRST	0
+ #define   DISPPLANE_STEREO_POLARITY_SECOND	(1<<18)
+-#define   DISPPLANE_TRICKLE_FEED_DISABLE	(1<<14) /* IGDNG */
++#define   DISPPLANE_TRICKLE_FEED_DISABLE	(1<<14) /* Ironlake */
+ #define   DISPPLANE_TILED			(1<<10)
+ #define DSPAADDR		0x70184
+ #define DSPASTRIDE		0x70188
+@@ -1975,7 +1984,7 @@
+ # define VGA_2X_MODE				(1 << 30)
+ # define VGA_PIPE_B_SELECT			(1 << 29)
+ 
+-/* IGDNG */
++/* Ironlake */
+ 
+ #define CPU_VGACNTRL	0x41000
+ 
+@@ -2121,6 +2130,7 @@
+ #define SDE_PORTC_HOTPLUG       (1 << 9)
+ #define SDE_PORTB_HOTPLUG       (1 << 8)
+ #define SDE_SDVOB_HOTPLUG       (1 << 6)
++#define SDE_HOTPLUG_MASK	(0xf << 8)
+ 
+ #define SDEISR  0xc4000
+ #define SDEIMR  0xc4004
+@@ -2303,7 +2313,7 @@
+ #define  FDI_DP_PORT_WIDTH_X3           (2<<19)
+ #define  FDI_DP_PORT_WIDTH_X4           (3<<19)
+ #define  FDI_TX_ENHANCE_FRAME_ENABLE    (1<<18)
+-/* IGDNG: hardwired to 1 */
++/* Ironlake: hardwired to 1 */
+ #define  FDI_TX_PLL_ENABLE              (1<<14)
+ /* both Tx and Rx */
+ #define  FDI_SCRAMBLING_ENABLE          (0<<7)
+diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
+index 7ad742f..a3b90c9 100644
+--- a/drivers/gpu/drm/i915/i915_suspend.c
++++ b/drivers/gpu/drm/i915/i915_suspend.c
+@@ -34,7 +34,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32	dpll_reg;
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
+ 	} else {
+ 		dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
+@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+ 	if (!i915_pipe_enabled(dev, pipe))
+ 		return;
+ 
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
+ 
+ 	if (pipe == PIPE_A)
+@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+ 	if (!i915_pipe_enabled(dev, pipe))
+ 		return;
+ 
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
+ 
+ 	if (pipe == PIPE_A)
+@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ 	if (drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return;
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+ 		dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+ 	}
+@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ 	/* Pipe & plane A info */
+ 	dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
+ 	dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
+ 		dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
+ 		dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
+@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ 		dev_priv->saveFPA1 = I915_READ(FPA1);
+ 		dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+ 	}
+-	if (IS_I965G(dev) && !IS_IGDNG(dev))
++	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ 		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
+ 	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
+ 	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
+@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ 	dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
+ 	dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
+ 	dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+-	if (!IS_IGDNG(dev))
++	if (!IS_IRONLAKE(dev))
+ 		dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
+ 		dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
+ 		dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
+@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ 	/* Pipe & plane B info */
+ 	dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
+ 	dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
+ 		dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
+ 		dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
+@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ 		dev_priv->saveFPB1 = I915_READ(FPB1);
+ 		dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+ 	}
+-	if (IS_I965G(dev) && !IS_IGDNG(dev))
++	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ 		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
+ 	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
+ 	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
+@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
+ 	dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
+ 	dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
+ 	dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+-	if (!IS_IGDNG(dev))
++	if (!IS_IRONLAKE(dev))
+ 		dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
+ 		dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
+ 		dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
+@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ 	if (drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return;
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		dpll_a_reg = PCH_DPLL_A;
+ 		dpll_b_reg = PCH_DPLL_B;
+ 		fpa0_reg = PCH_FPA0;
+@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ 		fpb1_reg = FPB1;
+ 	}
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
+ 		I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
+ 	}
+@@ -402,7 +402,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ 	/* Actually enable it */
+ 	I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
+ 	DRM_UDELAY(150);
+-	if (IS_I965G(dev) && !IS_IGDNG(dev))
++	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ 		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ 	DRM_UDELAY(150);
+ 
+@@ -413,10 +413,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ 	I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
+ 	I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
+ 	I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+-	if (!IS_IGDNG(dev))
++	if (!IS_IRONLAKE(dev))
+ 		I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
+ 		I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
+ 		I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
+@@ -467,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ 	/* Actually enable it */
+ 	I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
+ 	DRM_UDELAY(150);
+-	if (IS_I965G(dev) && !IS_IGDNG(dev))
++	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ 		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ 	DRM_UDELAY(150);
+ 
+@@ -478,10 +478,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
+ 	I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
+ 	I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
+ 	I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+-	if (!IS_IGDNG(dev))
++	if (!IS_IRONLAKE(dev))
+ 		I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
+ 		I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
+ 		I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
+@@ -546,14 +546,14 @@ void i915_save_display(struct drm_device *dev)
+ 		dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+ 
+ 	/* CRT state */
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		dev_priv->saveADPA = I915_READ(PCH_ADPA);
+ 	} else {
+ 		dev_priv->saveADPA = I915_READ(ADPA);
+ 	}
+ 
+ 	/* LVDS state */
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
+ 		dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
+ 		dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
+@@ -571,10 +571,10 @@ void i915_save_display(struct drm_device *dev)
+ 			dev_priv->saveLVDS = I915_READ(LVDS);
+ 	}
+ 
+-	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
++	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
+ 		dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
+ 		dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
+ 		dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
+@@ -614,7 +614,7 @@ void i915_save_display(struct drm_device *dev)
+ 	dev_priv->saveVGA0 = I915_READ(VGA0);
+ 	dev_priv->saveVGA1 = I915_READ(VGA1);
+ 	dev_priv->saveVGA_PD = I915_READ(VGA_PD);
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
+ 	else
+ 		dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+@@ -656,24 +656,24 @@ void i915_restore_display(struct drm_device *dev)
+ 		I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+ 
+ 	/* CRT state */
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
+ 	else
+ 		I915_WRITE(ADPA, dev_priv->saveADPA);
+ 
+ 	/* LVDS state */
+-	if (IS_I965G(dev) && !IS_IGDNG(dev))
++	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ 		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
+ 	} else if (IS_MOBILE(dev) && !IS_I830(dev))
+ 		I915_WRITE(LVDS, dev_priv->saveLVDS);
+ 
+-	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
++	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
+ 		I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
+ 		I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ 		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+@@ -713,7 +713,7 @@ void i915_restore_display(struct drm_device *dev)
+ 	}
+ 
+ 	/* VGA state */
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
+ 	else
+ 		I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+@@ -732,17 +732,13 @@ int i915_save_state(struct drm_device *dev)
+ 
+ 	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+ 
+-	/* Render Standby */
+-	if (IS_I965G(dev) && IS_MOBILE(dev))
+-		dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
+-
+ 	/* Hardware status page */
+ 	dev_priv->saveHWS = I915_READ(HWS_PGA);
+ 
+ 	i915_save_display(dev);
+ 
+ 	/* Interrupt state */
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		dev_priv->saveDEIER = I915_READ(DEIER);
+ 		dev_priv->saveDEIMR = I915_READ(DEIMR);
+ 		dev_priv->saveGTIER = I915_READ(GTIER);
+@@ -754,10 +750,6 @@ int i915_save_state(struct drm_device *dev)
+ 		dev_priv->saveIMR = I915_READ(IMR);
+ 	}
+ 
+-	/* Clock gating state */
+-	dev_priv->saveD_STATE = I915_READ(D_STATE);
+-	dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
+-
+ 	/* Cache mode state */
+ 	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+ 
+@@ -795,10 +787,6 @@ int i915_restore_state(struct drm_device *dev)
+ 
+ 	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+ 
+-	/* Render Standby */
+-	if (IS_I965G(dev) && IS_MOBILE(dev))
+-		I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
+-
+ 	/* Hardware status page */
+ 	I915_WRITE(HWS_PGA, dev_priv->saveHWS);
+ 
+@@ -817,7 +805,7 @@ int i915_restore_state(struct drm_device *dev)
+ 	i915_restore_display(dev);
+ 
+ 	/* Interrupt state */
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		I915_WRITE(DEIER, dev_priv->saveDEIER);
+ 		I915_WRITE(DEIMR, dev_priv->saveDEIMR);
+ 		I915_WRITE(GTIER, dev_priv->saveGTIER);
+@@ -830,8 +818,7 @@ int i915_restore_state(struct drm_device *dev)
+ 	}
+ 
+ 	/* Clock gating state */
+-	I915_WRITE (D_STATE, dev_priv->saveD_STATE);
+-	I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
++	intel_init_clock_gating(dev);
+ 
+ 	/* Cache mode state */
+ 	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index 96cd256..15fbc1b 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -33,6 +33,8 @@
+ #define	SLAVE_ADDR1	0x70
+ #define	SLAVE_ADDR2	0x72
+ 
++static int panel_type;
++
+ static void *
+ find_section(struct bdb_header *bdb, int section_id)
+ {
+@@ -114,6 +116,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ 	struct lvds_dvo_timing *dvo_timing;
+ 	struct drm_display_mode *panel_fixed_mode;
+ 	int lfp_data_size, dvo_timing_offset;
++	int i, temp_downclock;
++	struct drm_display_mode *temp_mode;
+ 
+ 	/* Defaults if we can't find VBT info */
+ 	dev_priv->lvds_dither = 0;
+@@ -126,6 +130,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ 	dev_priv->lvds_dither = lvds_options->pixel_dither;
+ 	if (lvds_options->panel_type == 0xff)
+ 		return;
++	panel_type = lvds_options->panel_type;
+ 
+ 	lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
+ 	if (!lvds_lfp_data)
+@@ -159,9 +164,50 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
+ 
+ 	dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
+ 
+-	DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
++	DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
+ 	drm_mode_debug_printmodeline(panel_fixed_mode);
+ 
++	temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
++	temp_downclock = panel_fixed_mode->clock;
++	/*
++	 * enumerate the LVDS panel timing info entry in VBT to check whether
++	 * the LVDS downclock is found.
++	 */
++	for (i = 0; i < 16; i++) {
++		entry = (struct bdb_lvds_lfp_data_entry *)
++			((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
++		dvo_timing = (struct lvds_dvo_timing *)
++			((unsigned char *)entry + dvo_timing_offset);
++
++		fill_detail_timing_data(temp_mode, dvo_timing);
++
++		if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
++		temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
++		temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
++		temp_mode->htotal == panel_fixed_mode->htotal &&
++		temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
++		temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
++		temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
++		temp_mode->vtotal == panel_fixed_mode->vtotal &&
++		temp_mode->clock < temp_downclock) {
++			/*
++			 * downclock is already found. But we expect
++			 * to find the lower downclock.
++			 */
++			temp_downclock = temp_mode->clock;
++		}
++		/* clear it to zero */
++		memset(temp_mode, 0, sizeof(*temp_mode));
++	}
++	kfree(temp_mode);
++	if (temp_downclock < panel_fixed_mode->clock &&
++	    i915_lvds_downclock) {
++		dev_priv->lvds_downclock_avail = 1;
++		dev_priv->lvds_downclock = temp_downclock;
++		DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
++				"Normal Clock %dKHz, downclock %dKHz\n",
++				temp_downclock, panel_fixed_mode->clock);
++	}
+ 	return;
+ }
+ 
+@@ -217,7 +263,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
+ 			if (IS_I85X(dev_priv->dev))
+ 				dev_priv->lvds_ssc_freq =
+ 					general->ssc_freq ? 66 : 48;
+-			else if (IS_IGDNG(dev_priv->dev))
++			else if (IS_IRONLAKE(dev_priv->dev))
+ 				dev_priv->lvds_ssc_freq =
+ 					general->ssc_freq ? 100 : 120;
+ 			else
+@@ -241,22 +287,18 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
+ 		GPIOF,
+ 	};
+ 
+-	/* Set sensible defaults in case we can't find the general block
+-	   or it is the wrong chipset */
+-	dev_priv->crt_ddc_bus = -1;
+-
+ 	general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ 	if (general) {
+ 		u16 block_size = get_blocksize(general);
+ 		if (block_size >= sizeof(*general)) {
+ 			int bus_pin = general->crt_ddc_gmbus_pin;
+-			DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin);
++			DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+ 			if ((bus_pin >= 1) && (bus_pin <= 6)) {
+ 				dev_priv->crt_ddc_bus =
+ 					crt_bus_map_table[bus_pin-1];
+ 			}
+ 		} else {
+-			DRM_DEBUG("BDB_GD too small (%d). Invalid.\n",
++			DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
+ 				  block_size);
+ 		}
+ 	}
+@@ -274,7 +316,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+ 
+ 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+ 	if (!p_defs) {
+-		DRM_DEBUG("No general definition block is found\n");
++		DRM_DEBUG_KMS("No general definition block is found\n");
+ 		return;
+ 	}
+ 	/* judge whether the size of child device meets the requirements.
+@@ -284,7 +326,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+ 	 */
+ 	if (p_defs->child_dev_size != sizeof(*p_child)) {
+ 		/* different child dev size . Ignore it */
+-		DRM_DEBUG("different child size is found. Invalid.\n");
++		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+ 		return;
+ 	}
+ 	/* get the block size of general definitions */
+@@ -310,11 +352,11 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+ 		if (p_child->dvo_port != DEVICE_PORT_DVOB &&
+ 			p_child->dvo_port != DEVICE_PORT_DVOC) {
+ 			/* skip the incorrect SDVO port */
+-			DRM_DEBUG("Incorrect SDVO port. Skip it \n");
++			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
+ 			continue;
+ 		}
+-		DRM_DEBUG("the SDVO device with slave addr %2x is found on "
+-				"%s port\n",
++		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
++				" %s port\n",
+ 				p_child->slave_addr,
+ 				(p_child->dvo_port == DEVICE_PORT_DVOB) ?
+ 					"SDVOB" : "SDVOC");
+@@ -325,21 +367,21 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
+ 			p_mapping->dvo_wiring = p_child->dvo_wiring;
+ 			p_mapping->initialized = 1;
+ 		} else {
+-			DRM_DEBUG("Maybe one SDVO port is shared by "
++			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
+ 					 "two SDVO device.\n");
+ 		}
+ 		if (p_child->slave2_addr) {
+ 			/* Maybe this is a SDVO device with multiple inputs */
+ 			/* And the mapping info is not added */
+-			DRM_DEBUG("there exists the slave2_addr. Maybe this "
+-				"is a SDVO device with multiple inputs.\n");
++			DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
++				" is a SDVO device with multiple inputs.\n");
+ 		}
+ 		count++;
+ 	}
+ 
+ 	if (!count) {
+ 		/* No SDVO device info is found */
+-		DRM_DEBUG("No SDVO device info is found in VBT\n");
++		DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+ 	}
+ 	return;
+ }
+@@ -366,6 +408,98 @@ parse_driver_features(struct drm_i915_private *dev_priv,
+ 		dev_priv->render_reclock_avail = true;
+ }
+ 
++static void
++parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
++{
++	struct bdb_edp *edp;
++
++	edp = find_section(bdb, BDB_EDP);
++	if (!edp) {
++		if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
++			DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported,\
++				       assume 18bpp panel color depth.\n");
++			dev_priv->edp_bpp = 18;
++		}
++		return;
++	}
++
++	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
++	case EDP_18BPP:
++		dev_priv->edp_bpp = 18;
++		break;
++	case EDP_24BPP:
++		dev_priv->edp_bpp = 24;
++		break;
++	case EDP_30BPP:
++		dev_priv->edp_bpp = 30;
++		break;
++	}
++}
++
++static void
++parse_device_mapping(struct drm_i915_private *dev_priv,
++		       struct bdb_header *bdb)
++{
++	struct bdb_general_definitions *p_defs;
++	struct child_device_config *p_child, *child_dev_ptr;
++	int i, child_device_num, count;
++	u16	block_size;
++
++	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
++	if (!p_defs) {
++		DRM_DEBUG_KMS("No general definition block is found\n");
++		return;
++	}
++	/* judge whether the size of child device meets the requirements.
++	 * If the child device size obtained from general definition block
++	 * is different with sizeof(struct child_device_config), skip the
++	 * parsing of sdvo device info
++	 */
++	if (p_defs->child_dev_size != sizeof(*p_child)) {
++		/* different child dev size . Ignore it */
++		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
++		return;
++	}
++	/* get the block size of general definitions */
++	block_size = get_blocksize(p_defs);
++	/* get the number of child device */
++	child_device_num = (block_size - sizeof(*p_defs)) /
++				sizeof(*p_child);
++	count = 0;
++	/* get the number of child device that is present */
++	for (i = 0; i < child_device_num; i++) {
++		p_child = &(p_defs->devices[i]);
++		if (!p_child->device_type) {
++			/* skip the device block if device type is invalid */
++			continue;
++		}
++		count++;
++	}
++	if (!count) {
++		DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
++		return;
++	}
++	dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
++	if (!dev_priv->child_dev) {
++		DRM_DEBUG_KMS("No memory space for child device\n");
++		return;
++	}
++
++	dev_priv->child_dev_num = count;
++	count = 0;
++	for (i = 0; i < child_device_num; i++) {
++		p_child = &(p_defs->devices[i]);
++		if (!p_child->device_type) {
++			/* skip the device block if device type is invalid */
++			continue;
++		}
++		child_dev_ptr = dev_priv->child_dev + count;
++		count++;
++		memcpy((void *)child_dev_ptr, (void *)p_child,
++					sizeof(*p_child));
++	}
++	return;
++}
+ /**
+  * intel_init_bios - initialize VBIOS settings & find VBT
+  * @dev: DRM device
+@@ -417,7 +551,9 @@ intel_init_bios(struct drm_device *dev)
+ 	parse_lfp_panel_data(dev_priv, bdb);
+ 	parse_sdvo_panel_data(dev_priv, bdb);
+ 	parse_sdvo_device_mapping(dev_priv, bdb);
++	parse_device_mapping(dev_priv, bdb);
+ 	parse_driver_features(dev_priv, bdb);
++	parse_edp(dev_priv, bdb);
+ 
+ 	pci_unmap_rom(pdev, bios);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
+index 0f8e5f6..4c18514 100644
+--- a/drivers/gpu/drm/i915/intel_bios.h
++++ b/drivers/gpu/drm/i915/intel_bios.h
+@@ -98,6 +98,7 @@ struct vbios_data {
+ #define BDB_SDVO_LVDS_PNP_IDS	 24
+ #define BDB_SDVO_LVDS_POWER_SEQ	 25
+ #define BDB_TV_OPTIONS		 26
++#define BDB_EDP			 27
+ #define BDB_LVDS_OPTIONS	 40
+ #define BDB_LVDS_LFP_DATA_PTRS	 41
+ #define BDB_LVDS_LFP_DATA	 42
+@@ -426,6 +427,45 @@ struct bdb_driver_features {
+ 	u8 custom_vbt_version;
+ } __attribute__((packed));
+ 
++#define EDP_18BPP	0
++#define EDP_24BPP	1
++#define EDP_30BPP	2
++#define EDP_RATE_1_62	0
++#define EDP_RATE_2_7	1
++#define EDP_LANE_1	0
++#define EDP_LANE_2	1
++#define EDP_LANE_4	3
++#define EDP_PREEMPHASIS_NONE	0
++#define EDP_PREEMPHASIS_3_5dB	1
++#define EDP_PREEMPHASIS_6dB	2
++#define EDP_PREEMPHASIS_9_5dB	3
++#define EDP_VSWING_0_4V		0
++#define EDP_VSWING_0_6V		1
++#define EDP_VSWING_0_8V		2
++#define EDP_VSWING_1_2V		3
++
++struct edp_power_seq {
++	u16 t3;
++	u16 t7;
++	u16 t9;
++	u16 t10;
++	u16 t12;
++} __attribute__ ((packed));
++
++struct edp_link_params {
++	u8 rate:4;
++	u8 lanes:4;
++	u8 preemphasis:4;
++	u8 vswing:4;
++} __attribute__ ((packed));
++
++struct bdb_edp {
++	struct edp_power_seq power_seqs[16];
++	u32 color_depth;
++	u32 sdrrs_msa_timing_delay;
++	struct edp_link_params link_params[16];
++} __attribute__ ((packed));
++
+ bool intel_init_bios(struct drm_device *dev);
+ 
+ /*
+@@ -549,4 +589,21 @@ bool intel_init_bios(struct drm_device *dev);
+ #define   SWF14_APM_STANDBY	0x1
+ #define   SWF14_APM_RESTORE	0x0
+ 
++/* Add the device class for LFP, TV, HDMI */
++#define	 DEVICE_TYPE_INT_LFP	0x1022
++#define	 DEVICE_TYPE_INT_TV	0x1009
++#define	 DEVICE_TYPE_HDMI	0x60D2
++#define	 DEVICE_TYPE_DP		0x68C6
++#define	 DEVICE_TYPE_eDP	0x78C6
++
++/* define the DVO port for HDMI output type */
++#define		DVO_B		1
++#define		DVO_C		2
++#define		DVO_D		3
++
++/* define the PORT for DP output type */
++#define		PORT_IDPB	7
++#define		PORT_IDPC	8
++#define		PORT_IDPD	9
++
+ #endif /* _I830_BIOS_H_ */
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index 5e730e6..79dd402 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 temp, reg;
+ 
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		reg = PCH_ADPA;
+ 	else
+ 		reg = ADPA;
+@@ -64,34 +64,6 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
+ 	}
+ 
+ 	I915_WRITE(reg, temp);
+-
+-	if (IS_IGD(dev)) {
+-		if (mode == DRM_MODE_DPMS_OFF) {
+-			/* turn off DAC */
+-			temp = I915_READ(PORT_HOTPLUG_EN);
+-			temp &= ~CRT_EOS_INT_EN;
+-			I915_WRITE(PORT_HOTPLUG_EN, temp);
+-
+-			temp = I915_READ(PORT_HOTPLUG_STAT);
+-			if (temp & CRT_EOS_INT_STATUS)
+-				I915_WRITE(PORT_HOTPLUG_STAT,
+-					CRT_EOS_INT_STATUS);
+-		} else {
+-			/* turn on DAC. EOS interrupt must be enabled after DAC
+-			 * is enabled, so it sounds not good to enable it in
+-			 * i915_driver_irq_postinstall()
+-			 * wait 12.5ms after DAC is enabled
+-			 */
+-			msleep(13);
+-			temp = I915_READ(PORT_HOTPLUG_STAT);
+-			if (temp & CRT_EOS_INT_STATUS)
+-				I915_WRITE(PORT_HOTPLUG_STAT,
+-					CRT_EOS_INT_STATUS);
+-			temp = I915_READ(PORT_HOTPLUG_EN);
+-			temp |= CRT_EOS_INT_EN;
+-			I915_WRITE(PORT_HOTPLUG_EN, temp);
+-		}
+-	}
+ }
+ 
+ static int intel_crt_mode_valid(struct drm_connector *connector,
+@@ -141,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
+ 	else
+ 		dpll_md_reg = DPLL_B_MD;
+ 
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		adpa_reg = PCH_ADPA;
+ 	else
+ 		adpa_reg = ADPA;
+@@ -150,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
+ 	 * Disable separate mode multiplier used when cloning SDVO to CRT
+ 	 * XXX this needs to be adjusted when we really are cloning
+ 	 */
+-	if (IS_I965G(dev) && !IS_IGDNG(dev)) {
++	if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
+ 		dpll_md = I915_READ(dpll_md_reg);
+ 		I915_WRITE(dpll_md_reg,
+ 			   dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
+@@ -164,18 +136,18 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
+ 
+ 	if (intel_crtc->pipe == 0) {
+ 		adpa |= ADPA_PIPE_A_SELECT;
+-		if (!IS_IGDNG(dev))
++		if (!IS_IRONLAKE(dev))
+ 			I915_WRITE(BCLRPAT_A, 0);
+ 	} else {
+ 		adpa |= ADPA_PIPE_B_SELECT;
+-		if (!IS_IGDNG(dev))
++		if (!IS_IRONLAKE(dev))
+ 			I915_WRITE(BCLRPAT_B, 0);
+ 	}
+ 
+ 	I915_WRITE(adpa_reg, adpa);
+ }
+ 
+-static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
++static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
+ {
+ 	struct drm_device *dev = connector->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -197,7 +169,7 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
+ 			ADPA_CRT_HOTPLUG_ENABLE |
+ 			ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
+ 
+-	DRM_DEBUG("pch crt adpa 0x%x", adpa);
++	DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
+ 	I915_WRITE(PCH_ADPA, adpa);
+ 
+ 	while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
+@@ -230,8 +202,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
+ 	u32 hotplug_en;
+ 	int i, tries = 0;
+ 
+-	if (IS_IGDNG(dev))
+-		return intel_igdng_crt_detect_hotplug(connector);
++	if (IS_IRONLAKE(dev))
++		return intel_ironlake_crt_detect_hotplug(connector);
+ 
+ 	/*
+ 	 * On 4 series desktop, CRT detect sequence need to be done twice
+@@ -552,12 +524,12 @@ void intel_crt_init(struct drm_device *dev)
+ 					  &intel_output->enc);
+ 
+ 	/* Set up the DDC bus. */
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		i2c_reg = PCH_GPIOA;
+ 	else {
+ 		i2c_reg = GPIOA;
+ 		/* Use VBT information for CRT DDC if available */
+-		if (dev_priv->crt_ddc_bus != -1)
++		if (dev_priv->crt_ddc_bus != 0)
+ 			i2c_reg = dev_priv->crt_ddc_bus;
+ 	}
+ 	intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index b00a1aa..b27202d 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -32,7 +32,7 @@
+ #include "intel_drv.h"
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+-#include "intel_dp.h"
++#include "drm_dp_helper.h"
+ 
+ #include "drm_crtc_helper.h"
+ 
+@@ -70,8 +70,6 @@ struct intel_limit {
+     intel_p2_t	    p2;
+     bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
+ 		      int, int, intel_clock_t *);
+-    bool (* find_reduced_pll)(const intel_limit_t *, struct drm_crtc *,
+-			      int, int, intel_clock_t *);
+ };
+ 
+ #define I8XX_DOT_MIN		  25000
+@@ -102,32 +100,32 @@ struct intel_limit {
+ #define I9XX_DOT_MAX		 400000
+ #define I9XX_VCO_MIN		1400000
+ #define I9XX_VCO_MAX		2800000
+-#define IGD_VCO_MIN		1700000
+-#define IGD_VCO_MAX		3500000
++#define PINEVIEW_VCO_MIN		1700000
++#define PINEVIEW_VCO_MAX		3500000
+ #define I9XX_N_MIN		      1
+ #define I9XX_N_MAX		      6
+-/* IGD's Ncounter is a ring counter */
+-#define IGD_N_MIN		      3
+-#define IGD_N_MAX		      6
++/* Pineview's Ncounter is a ring counter */
++#define PINEVIEW_N_MIN		      3
++#define PINEVIEW_N_MAX		      6
+ #define I9XX_M_MIN		     70
+ #define I9XX_M_MAX		    120
+-#define IGD_M_MIN		      2
+-#define IGD_M_MAX		    256
++#define PINEVIEW_M_MIN		      2
++#define PINEVIEW_M_MAX		    256
+ #define I9XX_M1_MIN		     10
+ #define I9XX_M1_MAX		     22
+ #define I9XX_M2_MIN		      5
+ #define I9XX_M2_MAX		      9
+-/* IGD M1 is reserved, and must be 0 */
+-#define IGD_M1_MIN		      0
+-#define IGD_M1_MAX		      0
+-#define IGD_M2_MIN		      0
+-#define IGD_M2_MAX		      254
++/* Pineview M1 is reserved, and must be 0 */
++#define PINEVIEW_M1_MIN		      0
++#define PINEVIEW_M1_MAX		      0
++#define PINEVIEW_M2_MIN		      0
++#define PINEVIEW_M2_MAX		      254
+ #define I9XX_P_SDVO_DAC_MIN	      5
+ #define I9XX_P_SDVO_DAC_MAX	     80
+ #define I9XX_P_LVDS_MIN		      7
+ #define I9XX_P_LVDS_MAX		     98
+-#define IGD_P_LVDS_MIN		      7
+-#define IGD_P_LVDS_MAX		     112
++#define PINEVIEW_P_LVDS_MIN		      7
++#define PINEVIEW_P_LVDS_MAX		     112
+ #define I9XX_P1_MIN		      1
+ #define I9XX_P1_MAX		      8
+ #define I9XX_P2_SDVO_DAC_SLOW		     10
+@@ -234,53 +232,108 @@ struct intel_limit {
+ #define G4X_P2_DISPLAY_PORT_FAST           10
+ #define G4X_P2_DISPLAY_PORT_LIMIT          0
+ 
+-/* IGDNG */
++/* Ironlake */
+ /* as we calculate clock using (register_value + 2) for
+    N/M1/M2, so here the range value for them is (actual_value-2).
+  */
+-#define IGDNG_DOT_MIN         25000
+-#define IGDNG_DOT_MAX         350000
+-#define IGDNG_VCO_MIN         1760000
+-#define IGDNG_VCO_MAX         3510000
+-#define IGDNG_N_MIN           1
+-#define IGDNG_N_MAX           5
+-#define IGDNG_M_MIN           79
+-#define IGDNG_M_MAX           118
+-#define IGDNG_M1_MIN          12
+-#define IGDNG_M1_MAX          23
+-#define IGDNG_M2_MIN          5
+-#define IGDNG_M2_MAX          9
+-#define IGDNG_P_SDVO_DAC_MIN  5
+-#define IGDNG_P_SDVO_DAC_MAX  80
+-#define IGDNG_P_LVDS_MIN      28
+-#define IGDNG_P_LVDS_MAX      112
+-#define IGDNG_P1_MIN          1
+-#define IGDNG_P1_MAX          8
+-#define IGDNG_P2_SDVO_DAC_SLOW 10
+-#define IGDNG_P2_SDVO_DAC_FAST 5
+-#define IGDNG_P2_LVDS_SLOW    14 /* single channel */
+-#define IGDNG_P2_LVDS_FAST    7  /* double channel */
+-#define IGDNG_P2_DOT_LIMIT    225000 /* 225Mhz */
++#define IRONLAKE_DOT_MIN         25000
++#define IRONLAKE_DOT_MAX         350000
++#define IRONLAKE_VCO_MIN         1760000
++#define IRONLAKE_VCO_MAX         3510000
++#define IRONLAKE_M1_MIN          12
++#define IRONLAKE_M1_MAX          22
++#define IRONLAKE_M2_MIN          5
++#define IRONLAKE_M2_MAX          9
++#define IRONLAKE_P2_DOT_LIMIT    225000 /* 225Mhz */
++
++/* We have parameter ranges for different type of outputs. */
++
++/* DAC & HDMI Refclk 120Mhz */
++#define IRONLAKE_DAC_N_MIN	1
++#define IRONLAKE_DAC_N_MAX	5
++#define IRONLAKE_DAC_M_MIN	79
++#define IRONLAKE_DAC_M_MAX	127
++#define IRONLAKE_DAC_P_MIN	5
++#define IRONLAKE_DAC_P_MAX	80
++#define IRONLAKE_DAC_P1_MIN	1
++#define IRONLAKE_DAC_P1_MAX	8
++#define IRONLAKE_DAC_P2_SLOW	10
++#define IRONLAKE_DAC_P2_FAST	5
++
++/* LVDS single-channel 120Mhz refclk */
++#define IRONLAKE_LVDS_S_N_MIN	1
++#define IRONLAKE_LVDS_S_N_MAX	3
++#define IRONLAKE_LVDS_S_M_MIN	79
++#define IRONLAKE_LVDS_S_M_MAX	118
++#define IRONLAKE_LVDS_S_P_MIN	28
++#define IRONLAKE_LVDS_S_P_MAX	112
++#define IRONLAKE_LVDS_S_P1_MIN	2
++#define IRONLAKE_LVDS_S_P1_MAX	8
++#define IRONLAKE_LVDS_S_P2_SLOW	14
++#define IRONLAKE_LVDS_S_P2_FAST	14
++
++/* LVDS dual-channel 120Mhz refclk */
++#define IRONLAKE_LVDS_D_N_MIN	1
++#define IRONLAKE_LVDS_D_N_MAX	3
++#define IRONLAKE_LVDS_D_M_MIN	79
++#define IRONLAKE_LVDS_D_M_MAX	127
++#define IRONLAKE_LVDS_D_P_MIN	14
++#define IRONLAKE_LVDS_D_P_MAX	56
++#define IRONLAKE_LVDS_D_P1_MIN	2
++#define IRONLAKE_LVDS_D_P1_MAX	8
++#define IRONLAKE_LVDS_D_P2_SLOW	7
++#define IRONLAKE_LVDS_D_P2_FAST	7
++
++/* LVDS single-channel 100Mhz refclk */
++#define IRONLAKE_LVDS_S_SSC_N_MIN	1
++#define IRONLAKE_LVDS_S_SSC_N_MAX	2
++#define IRONLAKE_LVDS_S_SSC_M_MIN	79
++#define IRONLAKE_LVDS_S_SSC_M_MAX	126
++#define IRONLAKE_LVDS_S_SSC_P_MIN	28
++#define IRONLAKE_LVDS_S_SSC_P_MAX	112
++#define IRONLAKE_LVDS_S_SSC_P1_MIN	2
++#define IRONLAKE_LVDS_S_SSC_P1_MAX	8
++#define IRONLAKE_LVDS_S_SSC_P2_SLOW	14
++#define IRONLAKE_LVDS_S_SSC_P2_FAST	14
++
++/* LVDS dual-channel 100Mhz refclk */
++#define IRONLAKE_LVDS_D_SSC_N_MIN	1
++#define IRONLAKE_LVDS_D_SSC_N_MAX	3
++#define IRONLAKE_LVDS_D_SSC_M_MIN	79
++#define IRONLAKE_LVDS_D_SSC_M_MAX	126
++#define IRONLAKE_LVDS_D_SSC_P_MIN	14
++#define IRONLAKE_LVDS_D_SSC_P_MAX	42
++#define IRONLAKE_LVDS_D_SSC_P1_MIN	2
++#define IRONLAKE_LVDS_D_SSC_P1_MAX	6
++#define IRONLAKE_LVDS_D_SSC_P2_SLOW	7
++#define IRONLAKE_LVDS_D_SSC_P2_FAST	7
++
++/* DisplayPort */
++#define IRONLAKE_DP_N_MIN		1
++#define IRONLAKE_DP_N_MAX		2
++#define IRONLAKE_DP_M_MIN		81
++#define IRONLAKE_DP_M_MAX		90
++#define IRONLAKE_DP_P_MIN		10
++#define IRONLAKE_DP_P_MAX		20
++#define IRONLAKE_DP_P2_FAST		10
++#define IRONLAKE_DP_P2_SLOW		10
++#define IRONLAKE_DP_P2_LIMIT		0
++#define IRONLAKE_DP_P1_MIN		1
++#define IRONLAKE_DP_P1_MAX		2
+ 
+ static bool
+ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 		    int target, int refclk, intel_clock_t *best_clock);
+ static bool
+-intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+-			    int target, int refclk, intel_clock_t *best_clock);
+-static bool
+ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 			int target, int refclk, intel_clock_t *best_clock);
+-static bool
+-intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+-			int target, int refclk, intel_clock_t *best_clock);
+ 
+ static bool
+ intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
+ 		      int target, int refclk, intel_clock_t *best_clock);
+ static bool
+-intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc,
+-		      int target, int refclk, intel_clock_t *best_clock);
++intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
++			   int target, int refclk, intel_clock_t *best_clock);
+ 
+ static const intel_limit_t intel_limits_i8xx_dvo = {
+         .dot = { .min = I8XX_DOT_MIN,		.max = I8XX_DOT_MAX },
+@@ -294,7 +347,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
+ 	.p2  = { .dot_limit = I8XX_P2_SLOW_LIMIT,
+ 		 .p2_slow = I8XX_P2_SLOW,	.p2_fast = I8XX_P2_FAST },
+ 	.find_pll = intel_find_best_PLL,
+-	.find_reduced_pll = intel_find_best_reduced_PLL,
+ };
+ 
+ static const intel_limit_t intel_limits_i8xx_lvds = {
+@@ -309,7 +361,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
+ 	.p2  = { .dot_limit = I8XX_P2_SLOW_LIMIT,
+ 		 .p2_slow = I8XX_P2_LVDS_SLOW,	.p2_fast = I8XX_P2_LVDS_FAST },
+ 	.find_pll = intel_find_best_PLL,
+-	.find_reduced_pll = intel_find_best_reduced_PLL,
+ };
+ 	
+ static const intel_limit_t intel_limits_i9xx_sdvo = {
+@@ -324,7 +375,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
+ 	.p2  = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+ 		 .p2_slow = I9XX_P2_SDVO_DAC_SLOW,	.p2_fast = I9XX_P2_SDVO_DAC_FAST },
+ 	.find_pll = intel_find_best_PLL,
+-	.find_reduced_pll = intel_find_best_reduced_PLL,
+ };
+ 
+ static const intel_limit_t intel_limits_i9xx_lvds = {
+@@ -342,7 +392,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
+ 	.p2  = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+ 		 .p2_slow = I9XX_P2_LVDS_SLOW,	.p2_fast = I9XX_P2_LVDS_FAST },
+ 	.find_pll = intel_find_best_PLL,
+-	.find_reduced_pll = intel_find_best_reduced_PLL,
+ };
+ 
+     /* below parameter and function is for G4X Chipset Family*/
+@@ -360,7 +409,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
+ 		 .p2_fast = G4X_P2_SDVO_FAST
+ 	},
+ 	.find_pll = intel_g4x_find_best_PLL,
+-	.find_reduced_pll = intel_g4x_find_best_PLL,
+ };
+ 
+ static const intel_limit_t intel_limits_g4x_hdmi = {
+@@ -377,7 +425,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
+ 		 .p2_fast = G4X_P2_HDMI_DAC_FAST
+ 	},
+ 	.find_pll = intel_g4x_find_best_PLL,
+-	.find_reduced_pll = intel_g4x_find_best_PLL,
+ };
+ 
+ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
+@@ -402,7 +449,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
+ 		 .p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
+ 	},
+ 	.find_pll = intel_g4x_find_best_PLL,
+-	.find_reduced_pll = intel_g4x_find_best_PLL,
+ };
+ 
+ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
+@@ -427,7 +473,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
+ 		 .p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
+ 	},
+ 	.find_pll = intel_g4x_find_best_PLL,
+-	.find_reduced_pll = intel_g4x_find_best_PLL,
+ };
+ 
+ static const intel_limit_t intel_limits_g4x_display_port = {
+@@ -453,74 +498,162 @@ static const intel_limit_t intel_limits_g4x_display_port = {
+         .find_pll = intel_find_pll_g4x_dp,
+ };
+ 
+-static const intel_limit_t intel_limits_igd_sdvo = {
++static const intel_limit_t intel_limits_pineview_sdvo = {
+         .dot = { .min = I9XX_DOT_MIN,		.max = I9XX_DOT_MAX},
+-        .vco = { .min = IGD_VCO_MIN,		.max = IGD_VCO_MAX },
+-        .n   = { .min = IGD_N_MIN,		.max = IGD_N_MAX },
+-        .m   = { .min = IGD_M_MIN,		.max = IGD_M_MAX },
+-        .m1  = { .min = IGD_M1_MIN,		.max = IGD_M1_MAX },
+-        .m2  = { .min = IGD_M2_MIN,		.max = IGD_M2_MAX },
++        .vco = { .min = PINEVIEW_VCO_MIN,		.max = PINEVIEW_VCO_MAX },
++        .n   = { .min = PINEVIEW_N_MIN,		.max = PINEVIEW_N_MAX },
++        .m   = { .min = PINEVIEW_M_MIN,		.max = PINEVIEW_M_MAX },
++        .m1  = { .min = PINEVIEW_M1_MIN,		.max = PINEVIEW_M1_MAX },
++        .m2  = { .min = PINEVIEW_M2_MIN,		.max = PINEVIEW_M2_MAX },
+         .p   = { .min = I9XX_P_SDVO_DAC_MIN,    .max = I9XX_P_SDVO_DAC_MAX },
+         .p1  = { .min = I9XX_P1_MIN,		.max = I9XX_P1_MAX },
+ 	.p2  = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
+ 		 .p2_slow = I9XX_P2_SDVO_DAC_SLOW,	.p2_fast = I9XX_P2_SDVO_DAC_FAST },
+ 	.find_pll = intel_find_best_PLL,
+-	.find_reduced_pll = intel_find_best_reduced_PLL,
+ };
+ 
+-static const intel_limit_t intel_limits_igd_lvds = {
++static const intel_limit_t intel_limits_pineview_lvds = {
+         .dot = { .min = I9XX_DOT_MIN,		.max = I9XX_DOT_MAX },
+-        .vco = { .min = IGD_VCO_MIN,		.max = IGD_VCO_MAX },
+-        .n   = { .min = IGD_N_MIN,		.max = IGD_N_MAX },
+-        .m   = { .min = IGD_M_MIN,		.max = IGD_M_MAX },
+-        .m1  = { .min = IGD_M1_MIN,		.max = IGD_M1_MAX },
+-        .m2  = { .min = IGD_M2_MIN,		.max = IGD_M2_MAX },
+-        .p   = { .min = IGD_P_LVDS_MIN,	.max = IGD_P_LVDS_MAX },
++        .vco = { .min = PINEVIEW_VCO_MIN,		.max = PINEVIEW_VCO_MAX },
++        .n   = { .min = PINEVIEW_N_MIN,		.max = PINEVIEW_N_MAX },
++        .m   = { .min = PINEVIEW_M_MIN,		.max = PINEVIEW_M_MAX },
++        .m1  = { .min = PINEVIEW_M1_MIN,		.max = PINEVIEW_M1_MAX },
++        .m2  = { .min = PINEVIEW_M2_MIN,		.max = PINEVIEW_M2_MAX },
++        .p   = { .min = PINEVIEW_P_LVDS_MIN,	.max = PINEVIEW_P_LVDS_MAX },
+         .p1  = { .min = I9XX_P1_MIN,		.max = I9XX_P1_MAX },
+-	/* IGD only supports single-channel mode. */
++	/* Pineview only supports single-channel mode. */
+ 	.p2  = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
+ 		 .p2_slow = I9XX_P2_LVDS_SLOW,	.p2_fast = I9XX_P2_LVDS_SLOW },
+ 	.find_pll = intel_find_best_PLL,
+-	.find_reduced_pll = intel_find_best_reduced_PLL,
+ };
+ 
+-static const intel_limit_t intel_limits_igdng_sdvo = {
+-	.dot = { .min = IGDNG_DOT_MIN,          .max = IGDNG_DOT_MAX },
+-	.vco = { .min = IGDNG_VCO_MIN,          .max = IGDNG_VCO_MAX },
+-	.n   = { .min = IGDNG_N_MIN,            .max = IGDNG_N_MAX },
+-	.m   = { .min = IGDNG_M_MIN,            .max = IGDNG_M_MAX },
+-	.m1  = { .min = IGDNG_M1_MIN,           .max = IGDNG_M1_MAX },
+-	.m2  = { .min = IGDNG_M2_MIN,           .max = IGDNG_M2_MAX },
+-	.p   = { .min = IGDNG_P_SDVO_DAC_MIN,   .max = IGDNG_P_SDVO_DAC_MAX },
+-	.p1  = { .min = IGDNG_P1_MIN,           .max = IGDNG_P1_MAX },
+-	.p2  = { .dot_limit = IGDNG_P2_DOT_LIMIT,
+-		 .p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
+-		 .p2_fast = IGDNG_P2_SDVO_DAC_FAST },
+-	.find_pll = intel_igdng_find_best_PLL,
++static const intel_limit_t intel_limits_ironlake_dac = {
++	.dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
++	.vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
++	.n   = { .min = IRONLAKE_DAC_N_MIN,        .max = IRONLAKE_DAC_N_MAX },
++	.m   = { .min = IRONLAKE_DAC_M_MIN,        .max = IRONLAKE_DAC_M_MAX },
++	.m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
++	.m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
++	.p   = { .min = IRONLAKE_DAC_P_MIN,	   .max = IRONLAKE_DAC_P_MAX },
++	.p1  = { .min = IRONLAKE_DAC_P1_MIN,       .max = IRONLAKE_DAC_P1_MAX },
++	.p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
++		 .p2_slow = IRONLAKE_DAC_P2_SLOW,
++		 .p2_fast = IRONLAKE_DAC_P2_FAST },
++	.find_pll = intel_g4x_find_best_PLL,
++};
++
++static const intel_limit_t intel_limits_ironlake_single_lvds = {
++	.dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
++	.vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
++	.n   = { .min = IRONLAKE_LVDS_S_N_MIN,     .max = IRONLAKE_LVDS_S_N_MAX },
++	.m   = { .min = IRONLAKE_LVDS_S_M_MIN,     .max = IRONLAKE_LVDS_S_M_MAX },
++	.m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
++	.m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
++	.p   = { .min = IRONLAKE_LVDS_S_P_MIN,     .max = IRONLAKE_LVDS_S_P_MAX },
++	.p1  = { .min = IRONLAKE_LVDS_S_P1_MIN,    .max = IRONLAKE_LVDS_S_P1_MAX },
++	.p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
++		 .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
++		 .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
++	.find_pll = intel_g4x_find_best_PLL,
+ };
+ 
+-static const intel_limit_t intel_limits_igdng_lvds = {
+-	.dot = { .min = IGDNG_DOT_MIN,          .max = IGDNG_DOT_MAX },
+-	.vco = { .min = IGDNG_VCO_MIN,          .max = IGDNG_VCO_MAX },
+-	.n   = { .min = IGDNG_N_MIN,            .max = IGDNG_N_MAX },
+-	.m   = { .min = IGDNG_M_MIN,            .max = IGDNG_M_MAX },
+-	.m1  = { .min = IGDNG_M1_MIN,           .max = IGDNG_M1_MAX },
+-	.m2  = { .min = IGDNG_M2_MIN,           .max = IGDNG_M2_MAX },
+-	.p   = { .min = IGDNG_P_LVDS_MIN,       .max = IGDNG_P_LVDS_MAX },
+-	.p1  = { .min = IGDNG_P1_MIN,           .max = IGDNG_P1_MAX },
+-	.p2  = { .dot_limit = IGDNG_P2_DOT_LIMIT,
+-		 .p2_slow = IGDNG_P2_LVDS_SLOW,
+-		 .p2_fast = IGDNG_P2_LVDS_FAST },
+-	.find_pll = intel_igdng_find_best_PLL,
++static const intel_limit_t intel_limits_ironlake_dual_lvds = {
++	.dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
++	.vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
++	.n   = { .min = IRONLAKE_LVDS_D_N_MIN,     .max = IRONLAKE_LVDS_D_N_MAX },
++	.m   = { .min = IRONLAKE_LVDS_D_M_MIN,     .max = IRONLAKE_LVDS_D_M_MAX },
++	.m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
++	.m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
++	.p   = { .min = IRONLAKE_LVDS_D_P_MIN,     .max = IRONLAKE_LVDS_D_P_MAX },
++	.p1  = { .min = IRONLAKE_LVDS_D_P1_MIN,    .max = IRONLAKE_LVDS_D_P1_MAX },
++	.p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
++		 .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
++		 .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
++	.find_pll = intel_g4x_find_best_PLL,
+ };
+ 
+-static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
++static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
++	.dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
++	.vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
++	.n   = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
++	.m   = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
++	.m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
++	.m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
++	.p   = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
++	.p1  = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
++	.p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
++		 .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
++		 .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
++	.find_pll = intel_g4x_find_best_PLL,
++};
++
++static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
++	.dot = { .min = IRONLAKE_DOT_MIN,          .max = IRONLAKE_DOT_MAX },
++	.vco = { .min = IRONLAKE_VCO_MIN,          .max = IRONLAKE_VCO_MAX },
++	.n   = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
++	.m   = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
++	.m1  = { .min = IRONLAKE_M1_MIN,           .max = IRONLAKE_M1_MAX },
++	.m2  = { .min = IRONLAKE_M2_MIN,           .max = IRONLAKE_M2_MAX },
++	.p   = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
++	.p1  = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
++	.p2  = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
++		 .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
++		 .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
++	.find_pll = intel_g4x_find_best_PLL,
++};
++
++static const intel_limit_t intel_limits_ironlake_display_port = {
++        .dot = { .min = IRONLAKE_DOT_MIN,
++                 .max = IRONLAKE_DOT_MAX },
++        .vco = { .min = IRONLAKE_VCO_MIN,
++                 .max = IRONLAKE_VCO_MAX},
++        .n   = { .min = IRONLAKE_DP_N_MIN,
++                 .max = IRONLAKE_DP_N_MAX },
++        .m   = { .min = IRONLAKE_DP_M_MIN,
++                 .max = IRONLAKE_DP_M_MAX },
++        .m1  = { .min = IRONLAKE_M1_MIN,
++                 .max = IRONLAKE_M1_MAX },
++        .m2  = { .min = IRONLAKE_M2_MIN,
++                 .max = IRONLAKE_M2_MAX },
++        .p   = { .min = IRONLAKE_DP_P_MIN,
++                 .max = IRONLAKE_DP_P_MAX },
++        .p1  = { .min = IRONLAKE_DP_P1_MIN,
++                 .max = IRONLAKE_DP_P1_MAX},
++        .p2  = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
++                 .p2_slow = IRONLAKE_DP_P2_SLOW,
++                 .p2_fast = IRONLAKE_DP_P2_FAST },
++        .find_pll = intel_find_pll_ironlake_dp,
++};
++
++static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
+ {
++	struct drm_device *dev = crtc->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	const intel_limit_t *limit;
+-	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+-		limit = &intel_limits_igdng_lvds;
++	int refclk = 120;
++
++	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
++		if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
++			refclk = 100;
++
++		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
++		    LVDS_CLKB_POWER_UP) {
++			/* LVDS dual channel */
++			if (refclk == 100)
++				limit = &intel_limits_ironlake_dual_lvds_100m;
++			else
++				limit = &intel_limits_ironlake_dual_lvds;
++		} else {
++			if (refclk == 100)
++				limit = &intel_limits_ironlake_single_lvds_100m;
++			else
++				limit = &intel_limits_ironlake_single_lvds;
++		}
++	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
++			HAS_eDP)
++		limit = &intel_limits_ironlake_display_port;
+ 	else
+-		limit = &intel_limits_igdng_sdvo;
++		limit = &intel_limits_ironlake_dac;
+ 
+ 	return limit;
+ }
+@@ -557,20 +690,20 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+ 	struct drm_device *dev = crtc->dev;
+ 	const intel_limit_t *limit;
+ 
+-	if (IS_IGDNG(dev))
+-		limit = intel_igdng_limit(crtc);
++	if (IS_IRONLAKE(dev))
++		limit = intel_ironlake_limit(crtc);
+ 	else if (IS_G4X(dev)) {
+ 		limit = intel_g4x_limit(crtc);
+-	} else if (IS_I9XX(dev) && !IS_IGD(dev)) {
++	} else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
+ 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ 			limit = &intel_limits_i9xx_lvds;
+ 		else
+ 			limit = &intel_limits_i9xx_sdvo;
+-	} else if (IS_IGD(dev)) {
++	} else if (IS_PINEVIEW(dev)) {
+ 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+-			limit = &intel_limits_igd_lvds;
++			limit = &intel_limits_pineview_lvds;
+ 		else
+-			limit = &intel_limits_igd_sdvo;
++			limit = &intel_limits_pineview_sdvo;
+ 	} else {
+ 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ 			limit = &intel_limits_i8xx_lvds;
+@@ -580,8 +713,8 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+ 	return limit;
+ }
+ 
+-/* m1 is reserved as 0 in IGD, n is a ring counter */
+-static void igd_clock(int refclk, intel_clock_t *clock)
++/* m1 is reserved as 0 in Pineview, n is a ring counter */
++static void pineview_clock(int refclk, intel_clock_t *clock)
+ {
+ 	clock->m = clock->m2 + 2;
+ 	clock->p = clock->p1 * clock->p2;
+@@ -591,8 +724,8 @@ static void igd_clock(int refclk, intel_clock_t *clock)
+ 
+ static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
+ {
+-	if (IS_IGD(dev)) {
+-		igd_clock(refclk, clock);
++	if (IS_PINEVIEW(dev)) {
++		pineview_clock(refclk, clock);
+ 		return;
+ 	}
+ 	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
+@@ -657,7 +790,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+ 		INTELPllInvalid ("m2 out of range\n");
+ 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
+ 		INTELPllInvalid ("m1 out of range\n");
+-	if (clock->m1 <= clock->m2 && !IS_IGD(dev))
++	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
+ 		INTELPllInvalid ("m1 <= m2\n");
+ 	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
+ 		INTELPllInvalid ("m out of range\n");
+@@ -706,16 +839,17 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 
+ 	memset (best_clock, 0, sizeof (*best_clock));
+ 
+-	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+-		for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
+-		     clock.m1++) {
+-			for (clock.m2 = limit->m2.min;
+-			     clock.m2 <= limit->m2.max; clock.m2++) {
+-				/* m1 is always 0 in IGD */
+-				if (clock.m2 >= clock.m1 && !IS_IGD(dev))
+-					break;
+-				for (clock.n = limit->n.min;
+-				     clock.n <= limit->n.max; clock.n++) {
++	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
++	     clock.m1++) {
++		for (clock.m2 = limit->m2.min;
++		     clock.m2 <= limit->m2.max; clock.m2++) {
++			/* m1 is always 0 in Pineview */
++			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
++				break;
++			for (clock.n = limit->n.min;
++			     clock.n <= limit->n.max; clock.n++) {
++				for (clock.p1 = limit->p1.min;
++					clock.p1 <= limit->p1.max; clock.p1++) {
+ 					int this_err;
+ 
+ 					intel_clock(dev, refclk, &clock);
+@@ -736,46 +870,6 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 	return (err != target);
+ }
+ 
+-
+-static bool
+-intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+-			    int target, int refclk, intel_clock_t *best_clock)
+-
+-{
+-	struct drm_device *dev = crtc->dev;
+-	intel_clock_t clock;
+-	int err = target;
+-	bool found = false;
+-
+-	memcpy(&clock, best_clock, sizeof(intel_clock_t));
+-
+-	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+-		for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
+-			/* m1 is always 0 in IGD */
+-			if (clock.m2 >= clock.m1 && !IS_IGD(dev))
+-				break;
+-			for (clock.n = limit->n.min; clock.n <= limit->n.max;
+-			     clock.n++) {
+-				int this_err;
+-
+-				intel_clock(dev, refclk, &clock);
+-
+-				if (!intel_PLL_is_valid(crtc, &clock))
+-					continue;
+-
+-				this_err = abs(clock.dot - target);
+-				if (this_err < err) {
+-					*best_clock = clock;
+-					err = this_err;
+-					found = true;
+-				}
+-			}
+-		}
+-	}
+-
+-	return found;
+-}
+-
+ static bool
+ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 			int target, int refclk, intel_clock_t *best_clock)
+@@ -790,7 +884,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 	found = false;
+ 
+ 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+-		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
++		int lvds_reg;
++
++		if (IS_IRONLAKE(dev))
++			lvds_reg = PCH_LVDS;
++		else
++			lvds_reg = LVDS;
++		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
+ 		    LVDS_CLKB_POWER_UP)
+ 			clock.p2 = limit->p2.p2_fast;
+ 		else
+@@ -833,11 +933,16 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+ }
+ 
+ static bool
+-intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+-		      int target, int refclk, intel_clock_t *best_clock)
++intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
++			   int target, int refclk, intel_clock_t *best_clock)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	intel_clock_t clock;
++
++	/* return directly when it is eDP */
++	if (HAS_eDP)
++		return true;
++
+ 	if (target < 200000) {
+ 		clock.n = 1;
+ 		clock.p1 = 2;
+@@ -856,68 +961,6 @@ intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+ 	return true;
+ }
+ 
+-static bool
+-intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
+-			int target, int refclk, intel_clock_t *best_clock)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+-	intel_clock_t clock;
+-	int err_most = 47;
+-	int err_min = 10000;
+-
+-	/* eDP has only 2 clock choice, no n/m/p setting */
+-	if (HAS_eDP)
+-		return true;
+-
+-	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
+-		return intel_find_pll_igdng_dp(limit, crtc, target,
+-					       refclk, best_clock);
+-
+-	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+-		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
+-		    LVDS_CLKB_POWER_UP)
+-			clock.p2 = limit->p2.p2_fast;
+-		else
+-			clock.p2 = limit->p2.p2_slow;
+-	} else {
+-		if (target < limit->p2.dot_limit)
+-			clock.p2 = limit->p2.p2_slow;
+-		else
+-			clock.p2 = limit->p2.p2_fast;
+-	}
+-
+-	memset(best_clock, 0, sizeof(*best_clock));
+-	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+-		/* based on hardware requriment prefer smaller n to precision */
+-		for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
+-			/* based on hardware requirment prefere larger m1,m2 */
+-			for (clock.m1 = limit->m1.max;
+-			     clock.m1 >= limit->m1.min; clock.m1--) {
+-				for (clock.m2 = limit->m2.max;
+-				     clock.m2 >= limit->m2.min; clock.m2--) {
+-					int this_err;
+-
+-					intel_clock(dev, refclk, &clock);
+-					if (!intel_PLL_is_valid(crtc, &clock))
+-						continue;
+-					this_err = abs((10000 - (target*10000/clock.dot)));
+-					if (this_err < err_most) {
+-						*best_clock = clock;
+-						/* found on first matching */
+-						goto out;
+-					} else if (this_err < err_min) {
+-						*best_clock = clock;
+-						err_min = this_err;
+-					}
+-				}
+-			}
+-		}
+-	}
+-out:
+-	return true;
+-}
+-
+ /* DisplayPort has only two frequencies, 162MHz and 270MHz */
+ static bool
+ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
+@@ -949,7 +992,7 @@ void
+ intel_wait_for_vblank(struct drm_device *dev)
+ {
+ 	/* Wait for 20ms, i.e. one cycle at 50hz. */
+-	mdelay(20);
++	msleep(20);
+ }
+ 
+ /* Parameters have changed, update FBC info */
+@@ -996,7 +1039,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ 		fbc_ctl |= dev_priv->cfb_fence;
+ 	I915_WRITE(FBC_CONTROL, fbc_ctl);
+ 
+-	DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ",
++	DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
+ 		  dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
+ }
+ 
+@@ -1019,7 +1062,7 @@ void i8xx_disable_fbc(struct drm_device *dev)
+ 
+ 	intel_wait_for_vblank(dev);
+ 
+-	DRM_DEBUG("disabled FBC\n");
++	DRM_DEBUG_KMS("disabled FBC\n");
+ }
+ 
+ static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
+@@ -1064,7 +1107,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+ 	/* enable it... */
+ 	I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
+ 
+-	DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane);
++	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+ }
+ 
+ void g4x_disable_fbc(struct drm_device *dev)
+@@ -1078,7 +1121,7 @@ void g4x_disable_fbc(struct drm_device *dev)
+ 	I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ 	intel_wait_for_vblank(dev);
+ 
+-	DRM_DEBUG("disabled FBC\n");
++	DRM_DEBUG_KMS("disabled FBC\n");
+ }
+ 
+ static bool g4x_fbc_enabled(struct drm_crtc *crtc)
+@@ -1143,25 +1186,27 @@ static void intel_update_fbc(struct drm_crtc *crtc,
+ 	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
+ 	 */
+ 	if (intel_fb->obj->size > dev_priv->cfb_size) {
+-		DRM_DEBUG("framebuffer too large, disabling compression\n");
++		DRM_DEBUG_KMS("framebuffer too large, disabling "
++				"compression\n");
+ 		goto out_disable;
+ 	}
+ 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ 	    (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
+-		DRM_DEBUG("mode incompatible with compression, disabling\n");
++		DRM_DEBUG_KMS("mode incompatible with compression, "
++				"disabling\n");
+ 		goto out_disable;
+ 	}
+ 	if ((mode->hdisplay > 2048) ||
+ 	    (mode->vdisplay > 1536)) {
+-		DRM_DEBUG("mode too large for compression, disabling\n");
++		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ 		goto out_disable;
+ 	}
+ 	if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
+-		DRM_DEBUG("plane not 0, disabling compression\n");
++		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ 		goto out_disable;
+ 	}
+ 	if (obj_priv->tiling_mode != I915_TILING_X) {
+-		DRM_DEBUG("framebuffer not tiled, disabling compression\n");
++		DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
+ 		goto out_disable;
+ 	}
+ 
+@@ -1183,13 +1228,57 @@ static void intel_update_fbc(struct drm_crtc *crtc,
+ 	return;
+ 
+ out_disable:
+-	DRM_DEBUG("unsupported config, disabling FBC\n");
++	DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
+ 	/* Multiple disables should be harmless */
+ 	if (dev_priv->display.fbc_enabled(crtc))
+ 		dev_priv->display.disable_fbc(dev);
+ }
+ 
+ static int
++intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
++{
++	struct drm_i915_gem_object *obj_priv = obj->driver_private;
++	u32 alignment;
++	int ret;
++
++	switch (obj_priv->tiling_mode) {
++	case I915_TILING_NONE:
++		alignment = 64 * 1024;
++		break;
++	case I915_TILING_X:
++		/* pin() will align the object as required by fence */
++		alignment = 0;
++		break;
++	case I915_TILING_Y:
++		/* FIXME: Is this true? */
++		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
++		return -EINVAL;
++	default:
++		BUG();
++	}
++
++	ret = i915_gem_object_pin(obj, alignment);
++	if (ret != 0)
++		return ret;
++
++	/* Install a fence for tiled scan-out. Pre-i965 always needs a
++	 * fence, whereas 965+ only requires a fence if using
++	 * framebuffer compression.  For simplicity, we always install
++	 * a fence as the cost is not that onerous.
++	 */
++	if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
++	    obj_priv->tiling_mode != I915_TILING_NONE) {
++		ret = i915_gem_object_get_fence_reg(obj);
++		if (ret != 0) {
++			i915_gem_object_unpin(obj);
++			return ret;
++		}
++	}
++
++	return 0;
++}
++
++static int
+ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 		    struct drm_framebuffer *old_fb)
+ {
+@@ -1208,12 +1297,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 	int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ 	int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
+ 	int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+-	u32 dspcntr, alignment;
++	u32 dspcntr;
+ 	int ret;
+ 
+ 	/* no fb bound */
+ 	if (!crtc->fb) {
+-		DRM_DEBUG("No FB bound\n");
++		DRM_DEBUG_KMS("No FB bound\n");
+ 		return 0;
+ 	}
+ 
+@@ -1230,24 +1319,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 	obj = intel_fb->obj;
+ 	obj_priv = obj->driver_private;
+ 
+-	switch (obj_priv->tiling_mode) {
+-	case I915_TILING_NONE:
+-		alignment = 64 * 1024;
+-		break;
+-	case I915_TILING_X:
+-		/* pin() will align the object as required by fence */
+-		alignment = 0;
+-		break;
+-	case I915_TILING_Y:
+-		/* FIXME: Is this true? */
+-		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
+-		return -EINVAL;
+-	default:
+-		BUG();
+-	}
+-
+ 	mutex_lock(&dev->struct_mutex);
+-	ret = i915_gem_object_pin(obj, alignment);
++	ret = intel_pin_and_fence_fb_obj(dev, obj);
+ 	if (ret != 0) {
+ 		mutex_unlock(&dev->struct_mutex);
+ 		return ret;
+@@ -1260,20 +1333,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 		return ret;
+ 	}
+ 
+-	/* Install a fence for tiled scan-out. Pre-i965 always needs a fence,
+-	 * whereas 965+ only requires a fence if using framebuffer compression.
+-	 * For simplicity, we always install a fence as the cost is not that onerous.
+-	 */
+-	if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
+-	    obj_priv->tiling_mode != I915_TILING_NONE) {
+-		ret = i915_gem_object_get_fence_reg(obj);
+-		if (ret != 0) {
+-			i915_gem_object_unpin(obj);
+-			mutex_unlock(&dev->struct_mutex);
+-			return ret;
+-		}
+-	}
+-
+ 	dspcntr = I915_READ(dspcntr_reg);
+ 	/* Mask out pixel format bits in case we change it */
+ 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+@@ -1289,7 +1348,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 		break;
+ 	case 24:
+ 	case 32:
+-		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
++		if (crtc->fb->depth == 30)
++			dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
++		else
++			dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ 		break;
+ 	default:
+ 		DRM_ERROR("Unknown color depth\n");
+@@ -1304,7 +1366,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 			dspcntr &= ~DISPPLANE_TILED;
+ 	}
+ 
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		/* must disable */
+ 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+ 
+@@ -1313,7 +1375,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ 	Start = obj_priv->gtt_offset;
+ 	Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ 
+-	DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
++	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+ 	I915_WRITE(dspstride, crtc->fb->pitch);
+ 	if (IS_I965G(dev)) {
+ 		I915_WRITE(dspbase, Offset);
+@@ -1365,7 +1427,7 @@ static void i915_disable_vga (struct drm_device *dev)
+ 	u8 sr1;
+ 	u32 vga_reg;
+ 
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		vga_reg = CPU_VGACNTRL;
+ 	else
+ 		vga_reg = VGACNTRL;
+@@ -1381,19 +1443,19 @@ static void i915_disable_vga (struct drm_device *dev)
+ 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
+ }
+ 
+-static void igdng_disable_pll_edp (struct drm_crtc *crtc)
++static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 dpa_ctl;
+ 
+-	DRM_DEBUG("\n");
++	DRM_DEBUG_KMS("\n");
+ 	dpa_ctl = I915_READ(DP_A);
+ 	dpa_ctl &= ~DP_PLL_ENABLE;
+ 	I915_WRITE(DP_A, dpa_ctl);
+ }
+ 
+-static void igdng_enable_pll_edp (struct drm_crtc *crtc)
++static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -1406,13 +1468,13 @@ static void igdng_enable_pll_edp (struct drm_crtc *crtc)
+ }
+ 
+ 
+-static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
++static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 dpa_ctl;
+ 
+-	DRM_DEBUG("eDP PLL enable for clock %d\n", clock);
++	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
+ 	dpa_ctl = I915_READ(DP_A);
+ 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
+ 
+@@ -1442,7 +1504,7 @@ static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
+ 	udelay(500);
+ }
+ 
+-static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
++static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -1487,7 +1549,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 	case DRM_MODE_DPMS_ON:
+ 	case DRM_MODE_DPMS_STANDBY:
+ 	case DRM_MODE_DPMS_SUSPEND:
+-		DRM_DEBUG("crtc %d dpms on\n", pipe);
++		DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
+ 
+ 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ 			temp = I915_READ(PCH_LVDS);
+@@ -1499,7 +1561,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 
+ 		if (HAS_eDP) {
+ 			/* enable eDP PLL */
+-			igdng_enable_pll_edp(crtc);
++			ironlake_enable_pll_edp(crtc);
+ 		} else {
+ 			/* enable PCH DPLL */
+ 			temp = I915_READ(pch_dpll_reg);
+@@ -1522,7 +1584,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 			I915_READ(fdi_rx_reg);
+ 			udelay(200);
+ 
+-			/* Enable CPU FDI TX PLL, always on for IGDNG */
++			/* Enable CPU FDI TX PLL, always on for Ironlake */
+ 			temp = I915_READ(fdi_tx_reg);
+ 			if ((temp & FDI_TX_PLL_ENABLE) == 0) {
+ 				I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
+@@ -1589,12 +1651,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 			udelay(150);
+ 
+ 			temp = I915_READ(fdi_rx_iir_reg);
+-			DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
++			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ 
+ 			if ((temp & FDI_RX_BIT_LOCK) == 0) {
+ 				for (j = 0; j < tries; j++) {
+ 					temp = I915_READ(fdi_rx_iir_reg);
+-					DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
++					DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
++								temp);
+ 					if (temp & FDI_RX_BIT_LOCK)
+ 						break;
+ 					udelay(200);
+@@ -1603,11 +1666,11 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 					I915_WRITE(fdi_rx_iir_reg,
+ 							temp | FDI_RX_BIT_LOCK);
+ 				else
+-					DRM_DEBUG("train 1 fail\n");
++					DRM_DEBUG_KMS("train 1 fail\n");
+ 			} else {
+ 				I915_WRITE(fdi_rx_iir_reg,
+ 						temp | FDI_RX_BIT_LOCK);
+-				DRM_DEBUG("train 1 ok 2!\n");
++				DRM_DEBUG_KMS("train 1 ok 2!\n");
+ 			}
+ 			temp = I915_READ(fdi_tx_reg);
+ 			temp &= ~FDI_LINK_TRAIN_NONE;
+@@ -1622,12 +1685,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 			udelay(150);
+ 
+ 			temp = I915_READ(fdi_rx_iir_reg);
+-			DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
++			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ 
+ 			if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
+ 				for (j = 0; j < tries; j++) {
+ 					temp = I915_READ(fdi_rx_iir_reg);
+-					DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
++					DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
++								temp);
+ 					if (temp & FDI_RX_SYMBOL_LOCK)
+ 						break;
+ 					udelay(200);
+@@ -1635,15 +1699,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 				if (j != tries) {
+ 					I915_WRITE(fdi_rx_iir_reg,
+ 							temp | FDI_RX_SYMBOL_LOCK);
+-					DRM_DEBUG("train 2 ok 1!\n");
++					DRM_DEBUG_KMS("train 2 ok 1!\n");
+ 				} else
+-					DRM_DEBUG("train 2 fail\n");
++					DRM_DEBUG_KMS("train 2 fail\n");
+ 			} else {
+ 				I915_WRITE(fdi_rx_iir_reg,
+ 						temp | FDI_RX_SYMBOL_LOCK);
+-				DRM_DEBUG("train 2 ok 2!\n");
++				DRM_DEBUG_KMS("train 2 ok 2!\n");
+ 			}
+-			DRM_DEBUG("train done\n");
++			DRM_DEBUG_KMS("train done\n");
+ 
+ 			/* set transcoder timing */
+ 			I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
+@@ -1691,8 +1755,9 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 
+ 	break;
+ 	case DRM_MODE_DPMS_OFF:
+-		DRM_DEBUG("crtc %d dpms off\n", pipe);
++		DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
+ 
++		drm_vblank_off(dev, pipe);
+ 		/* Disable display plane */
+ 		temp = I915_READ(dspcntr_reg);
+ 		if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+@@ -1717,12 +1782,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 					udelay(500);
+ 					continue;
+ 				} else {
+-					DRM_DEBUG("pipe %d off delay\n", pipe);
++					DRM_DEBUG_KMS("pipe %d off delay\n",
++								pipe);
+ 					break;
+ 				}
+ 			}
+ 		} else
+-			DRM_DEBUG("crtc %d is disabled\n", pipe);
++			DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+ 
+ 		udelay(100);
+ 
+@@ -1781,7 +1847,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 					udelay(500);
+ 					continue;
+ 				} else {
+-					DRM_DEBUG("transcoder %d off delay\n", pipe);
++					DRM_DEBUG_KMS("transcoder %d off "
++							"delay\n", pipe);
+ 					break;
+ 				}
+ 			}
+@@ -1802,7 +1869,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 		}
+ 
+ 		if (HAS_eDP) {
+-			igdng_disable_pll_edp(crtc);
++			ironlake_disable_pll_edp(crtc);
+ 		}
+ 
+ 		temp = I915_READ(fdi_rx_reg);
+@@ -1829,6 +1896,37 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 	}
+ }
+ 
++static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
++{
++	struct intel_overlay *overlay;
++	int ret;
++
++	if (!enable && intel_crtc->overlay) {
++		overlay = intel_crtc->overlay;
++		mutex_lock(&overlay->dev->struct_mutex);
++		for (;;) {
++			ret = intel_overlay_switch_off(overlay);
++			if (ret == 0)
++				break;
++
++			ret = intel_overlay_recover_from_interrupt(overlay, 0);
++			if (ret != 0) {
++				/* overlay doesn't react anymore. Usually
++				 * results in a black screen and an unkillable
++				 * X server. */
++				BUG();
++				overlay->hw_wedged = HW_WEDGED;
++				break;
++			}
++		}
++		mutex_unlock(&overlay->dev->struct_mutex);
++	}
++	/* Let userspace switch the overlay on again. In most cases userspace
++	 * has to recompute where to put it anyway. */
++
++	return;
++}
++
+ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
+ {
+ 	struct drm_device *dev = crtc->dev;
+@@ -1887,12 +1985,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 			intel_update_fbc(crtc, &crtc->mode);
+ 
+ 		/* Give the overlay scaler a chance to enable if it's on this pipe */
+-		//intel_crtc_dpms_video(crtc, true); TODO
++		intel_crtc_dpms_overlay(intel_crtc, true);
+ 	break;
+ 	case DRM_MODE_DPMS_OFF:
+ 		intel_update_watermarks(dev);
++
+ 		/* Give the overlay scaler a chance to disable if it's on this pipe */
+-		//intel_crtc_dpms_video(crtc, FALSE); TODO
++		intel_crtc_dpms_overlay(intel_crtc, false);
+ 		drm_vblank_off(dev, pipe);
+ 
+ 		if (dev_priv->cfb_plane == plane &&
+@@ -2012,7 +2111,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
+ 				  struct drm_display_mode *adjusted_mode)
+ {
+ 	struct drm_device *dev = crtc->dev;
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		/* FDI link clock is fixed at 2.7G */
+ 		if (mode->clock * 3 > 27000 * 4)
+ 			return MODE_CLOCK_HIGH;
+@@ -2088,7 +2187,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
+  * Return the pipe currently connected to the panel fitter,
+  * or -1 if the panel fitter is not present or not in use
+  */
+-static int intel_panel_fitter_pipe (struct drm_device *dev)
++int intel_panel_fitter_pipe (struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32  pfit_control;
+@@ -2132,9 +2231,8 @@ fdi_reduce_ratio(u32 *num, u32 *den)
+ #define LINK_N 0x80000
+ 
+ static void
+-igdng_compute_m_n(int bits_per_pixel, int nlanes,
+-		int pixel_clock, int link_clock,
+-		struct fdi_m_n *m_n)
++ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
++		     int link_clock, struct fdi_m_n *m_n)
+ {
+ 	u64 temp;
+ 
+@@ -2162,34 +2260,34 @@ struct intel_watermark_params {
+ 	unsigned long cacheline_size;
+ };
+ 
+-/* IGD has different values for various configs */
+-static struct intel_watermark_params igd_display_wm = {
+-	IGD_DISPLAY_FIFO,
+-	IGD_MAX_WM,
+-	IGD_DFT_WM,
+-	IGD_GUARD_WM,
+-	IGD_FIFO_LINE_SIZE
++/* Pineview has different values for various configs */
++static struct intel_watermark_params pineview_display_wm = {
++	PINEVIEW_DISPLAY_FIFO,
++	PINEVIEW_MAX_WM,
++	PINEVIEW_DFT_WM,
++	PINEVIEW_GUARD_WM,
++	PINEVIEW_FIFO_LINE_SIZE
+ };
+-static struct intel_watermark_params igd_display_hplloff_wm = {
+-	IGD_DISPLAY_FIFO,
+-	IGD_MAX_WM,
+-	IGD_DFT_HPLLOFF_WM,
+-	IGD_GUARD_WM,
+-	IGD_FIFO_LINE_SIZE
++static struct intel_watermark_params pineview_display_hplloff_wm = {
++	PINEVIEW_DISPLAY_FIFO,
++	PINEVIEW_MAX_WM,
++	PINEVIEW_DFT_HPLLOFF_WM,
++	PINEVIEW_GUARD_WM,
++	PINEVIEW_FIFO_LINE_SIZE
+ };
+-static struct intel_watermark_params igd_cursor_wm = {
+-	IGD_CURSOR_FIFO,
+-	IGD_CURSOR_MAX_WM,
+-	IGD_CURSOR_DFT_WM,
+-	IGD_CURSOR_GUARD_WM,
+-	IGD_FIFO_LINE_SIZE,
++static struct intel_watermark_params pineview_cursor_wm = {
++	PINEVIEW_CURSOR_FIFO,
++	PINEVIEW_CURSOR_MAX_WM,
++	PINEVIEW_CURSOR_DFT_WM,
++	PINEVIEW_CURSOR_GUARD_WM,
++	PINEVIEW_FIFO_LINE_SIZE,
+ };
+-static struct intel_watermark_params igd_cursor_hplloff_wm = {
+-	IGD_CURSOR_FIFO,
+-	IGD_CURSOR_MAX_WM,
+-	IGD_CURSOR_DFT_WM,
+-	IGD_CURSOR_GUARD_WM,
+-	IGD_FIFO_LINE_SIZE
++static struct intel_watermark_params pineview_cursor_hplloff_wm = {
++	PINEVIEW_CURSOR_FIFO,
++	PINEVIEW_CURSOR_MAX_WM,
++	PINEVIEW_CURSOR_DFT_WM,
++	PINEVIEW_CURSOR_GUARD_WM,
++	PINEVIEW_FIFO_LINE_SIZE
+ };
+ static struct intel_watermark_params g4x_wm_info = {
+ 	G4X_FIFO_SIZE,
+@@ -2262,11 +2360,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
+ 		1000;
+ 	entries_required /= wm->cacheline_size;
+ 
+-	DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required);
++	DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
+ 
+ 	wm_size = wm->fifo_size - (entries_required + wm->guard_size);
+ 
+-	DRM_DEBUG("FIFO watermark level: %d\n", wm_size);
++	DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
+ 
+ 	/* Don't promote wm_size to unsigned... */
+ 	if (wm_size > (long)wm->max_wm)
+@@ -2328,50 +2426,50 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
+ 			return latency;
+ 	}
+ 
+-	DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
++	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+ 
+ 	return NULL;
+ }
+ 
+-static void igd_disable_cxsr(struct drm_device *dev)
++static void pineview_disable_cxsr(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 reg;
+ 
+ 	/* deactivate cxsr */
+ 	reg = I915_READ(DSPFW3);
+-	reg &= ~(IGD_SELF_REFRESH_EN);
++	reg &= ~(PINEVIEW_SELF_REFRESH_EN);
+ 	I915_WRITE(DSPFW3, reg);
+ 	DRM_INFO("Big FIFO is disabled\n");
+ }
+ 
+-static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
+-			    int pixel_size)
++static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
++				 int pixel_size)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 reg;
+ 	unsigned long wm;
+ 	struct cxsr_latency *latency;
+ 
+-	latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq,
++	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
+ 		dev_priv->mem_freq);
+ 	if (!latency) {
+-		DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n");
+-		igd_disable_cxsr(dev);
++		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
++		pineview_disable_cxsr(dev);
+ 		return;
+ 	}
+ 
+ 	/* Display SR */
+-	wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size,
++	wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
+ 				latency->display_sr);
+ 	reg = I915_READ(DSPFW1);
+ 	reg &= 0x7fffff;
+ 	reg |= wm << 23;
+ 	I915_WRITE(DSPFW1, reg);
+-	DRM_DEBUG("DSPFW1 register is %x\n", reg);
++	DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
+ 
+ 	/* cursor SR */
+-	wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size,
++	wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
+ 				latency->cursor_sr);
+ 	reg = I915_READ(DSPFW3);
+ 	reg &= ~(0x3f << 24);
+@@ -2379,7 +2477,7 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
+ 	I915_WRITE(DSPFW3, reg);
+ 
+ 	/* Display HPLL off SR */
+-	wm = intel_calculate_wm(clock, &igd_display_hplloff_wm,
++	wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+ 		latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
+ 	reg = I915_READ(DSPFW3);
+ 	reg &= 0xfffffe00;
+@@ -2387,17 +2485,17 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
+ 	I915_WRITE(DSPFW3, reg);
+ 
+ 	/* cursor HPLL off SR */
+-	wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size,
++	wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
+ 				latency->cursor_hpll_disable);
+ 	reg = I915_READ(DSPFW3);
+ 	reg &= ~(0x3f << 16);
+ 	reg |= (wm & 0x3f) << 16;
+ 	I915_WRITE(DSPFW3, reg);
+-	DRM_DEBUG("DSPFW3 register is %x\n", reg);
++	DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
+ 
+ 	/* activate cxsr */
+ 	reg = I915_READ(DSPFW3);
+-	reg |= IGD_SELF_REFRESH_EN;
++	reg |= PINEVIEW_SELF_REFRESH_EN;
+ 	I915_WRITE(DSPFW3, reg);
+ 
+ 	DRM_INFO("Big FIFO is enabled\n");
+@@ -2419,7 +2517,7 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
+  * A value of 5us seems to be a good balance; safe for very low end
+  * platforms but not overly aggressive on lower latency configs.
+  */
+-const static int latency_ns = 5000;
++static const int latency_ns = 5000;
+ 
+ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+ {
+@@ -2433,8 +2531,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
+ 		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
+ 			(dsparb & 0x7f);
+ 
+-	DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+-		  size);
++	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
++			plane ? "B" : "A", size);
+ 
+ 	return size;
+ }
+@@ -2452,8 +2550,8 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
+ 			(dsparb & 0x1ff);
+ 	size >>= 1; /* Convert to cachelines */
+ 
+-	DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+-		  size);
++	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
++			plane ? "B" : "A", size);
+ 
+ 	return size;
+ }
+@@ -2467,7 +2565,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
+ 	size = dsparb & 0x7f;
+ 	size >>= 2; /* Convert to cachelines */
+ 
+-	DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
++	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
++			plane ? "B" : "A",
+ 		  size);
+ 
+ 	return size;
+@@ -2482,8 +2581,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
+ 	size = dsparb & 0x7f;
+ 	size >>= 1; /* Convert to cachelines */
+ 
+-	DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A",
+-		  size);
++	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
++			plane ? "B" : "A", size);
+ 
+ 	return size;
+ }
+@@ -2529,7 +2628,7 @@ static void g4x_update_wm(struct drm_device *dev,  int planea_clock,
+ 	/* Calc sr entries for one plane configs */
+ 	if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
+ 		/* self-refresh has much higher latency */
+-		const static int sr_latency_ns = 12000;
++		static const int sr_latency_ns = 12000;
+ 
+ 		sr_clock = planea_clock ? planea_clock : planeb_clock;
+ 		line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+@@ -2572,7 +2671,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
+ 	/* Calc sr entries for one plane configs */
+ 	if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
+ 		/* self-refresh has much higher latency */
+-		const static int sr_latency_ns = 12000;
++		static const int sr_latency_ns = 12000;
+ 
+ 		sr_clock = planea_clock ? planea_clock : planeb_clock;
+ 		line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+@@ -2634,7 +2733,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
+ 				       pixel_size, latency_ns);
+ 	planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
+ 				       pixel_size, latency_ns);
+-	DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
++	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+ 
+ 	/*
+ 	 * Overlay gets an aggressive default since video jitter is bad.
+@@ -2645,7 +2744,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
+ 	if (HAS_FW_BLC(dev) && sr_hdisplay &&
+ 	    (!planea_clock || !planeb_clock)) {
+ 		/* self-refresh has much higher latency */
+-		const static int sr_latency_ns = 6000;
++		static const int sr_latency_ns = 6000;
+ 
+ 		sr_clock = planea_clock ? planea_clock : planeb_clock;
+ 		line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+@@ -2654,7 +2753,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
+ 		sr_entries = (((sr_latency_ns / line_time_us) + 1) *
+ 			      pixel_size * sr_hdisplay) / 1000;
+ 		sr_entries = roundup(sr_entries / cacheline_size, 1);
+-		DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
++		DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
+ 		srwm = total_size - sr_entries;
+ 		if (srwm < 0)
+ 			srwm = 1;
+@@ -2665,7 +2764,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
+ 					& ~FW_BLC_SELF_EN);
+ 	}
+ 
+-	DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
++	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
+ 		  planea_wm, planeb_wm, cwm, srwm);
+ 
+ 	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
+@@ -2692,7 +2791,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
+ 				       pixel_size, latency_ns);
+ 	fwater_lo |= (3<<8) | planea_wm;
+ 
+-	DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm);
++	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
+ 
+ 	I915_WRITE(FW_BLC, fwater_lo);
+ }
+@@ -2746,11 +2845,11 @@ static void intel_update_watermarks(struct drm_device *dev)
+ 		if (crtc->enabled) {
+ 			enabled++;
+ 			if (intel_crtc->plane == 0) {
+-				DRM_DEBUG("plane A (pipe %d) clock: %d\n",
++				DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
+ 					  intel_crtc->pipe, crtc->mode.clock);
+ 				planea_clock = crtc->mode.clock;
+ 			} else {
+-				DRM_DEBUG("plane B (pipe %d) clock: %d\n",
++				DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
+ 					  intel_crtc->pipe, crtc->mode.clock);
+ 				planeb_clock = crtc->mode.clock;
+ 			}
+@@ -2767,10 +2866,10 @@ static void intel_update_watermarks(struct drm_device *dev)
+ 		return;
+ 
+ 	/* Single plane configs can enable self refresh */
+-	if (enabled == 1 && IS_IGD(dev))
+-		igd_enable_cxsr(dev, sr_clock, pixel_size);
+-	else if (IS_IGD(dev))
+-		igd_disable_cxsr(dev);
++	if (enabled == 1 && IS_PINEVIEW(dev))
++		pineview_enable_cxsr(dev, sr_clock, pixel_size);
++	else if (IS_PINEVIEW(dev))
++		pineview_disable_cxsr(dev);
+ 
+ 	dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
+ 				    sr_hdisplay, pixel_size);
+@@ -2864,10 +2963,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 
+ 	if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
+ 		refclk = dev_priv->lvds_ssc_freq * 1000;
+-		DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000);
++		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
++					refclk / 1000);
+ 	} else if (IS_I9XX(dev)) {
+ 		refclk = 96000;
+-		if (IS_IGDNG(dev))
++		if (IS_IRONLAKE(dev))
+ 			refclk = 120000; /* 120Mhz refclk */
+ 	} else {
+ 		refclk = 48000;
+@@ -2887,14 +2987,23 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (limit->find_reduced_pll && dev_priv->lvds_downclock_avail) {
+-		memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
+-		has_reduced_clock = limit->find_reduced_pll(limit, crtc,
+-							    (adjusted_mode->clock*3/4),
++	if (is_lvds && dev_priv->lvds_downclock_avail) {
++		has_reduced_clock = limit->find_pll(limit, crtc,
++							    dev_priv->lvds_downclock,
+ 							    refclk,
+ 							    &reduced_clock);
++		if (has_reduced_clock && (clock.p != reduced_clock.p)) {
++			/*
++			 * If the different P is found, it means that we can't
++			 * switch the display clock by using the FP0/FP1.
++			 * In such case we will disable the LVDS downclock
++			 * feature.
++			 */
++			DRM_DEBUG_KMS("Different P is found for "
++						"LVDS clock/downclock\n");
++			has_reduced_clock = 0;
++		}
+ 	}
+-
+ 	/* SDVO TV has fixed PLL values depend on its clock range,
+ 	   this mirrors vbios setting. */
+ 	if (is_sdvo && is_tv) {
+@@ -2916,7 +3025,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 	}
+ 
+ 	/* FDI link */
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		int lane, link_bw, bpp;
+ 		/* eDP doesn't require FDI link, so just set DP M/N
+ 		   according to current link config */
+@@ -2947,6 +3056,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 				temp |= PIPE_8BPC;
+ 			else
+ 				temp |= PIPE_6BPC;
++		} else if (is_edp) {
++			switch (dev_priv->edp_bpp/3) {
++			case 8:
++				temp |= PIPE_8BPC;
++				break;
++			case 10:
++				temp |= PIPE_10BPC;
++				break;
++			case 6:
++				temp |= PIPE_6BPC;
++				break;
++			case 12:
++				temp |= PIPE_12BPC;
++				break;
++			}
+ 		} else
+ 			temp |= PIPE_8BPC;
+ 		I915_WRITE(pipeconf_reg, temp);
+@@ -2970,8 +3094,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 			bpp = 24;
+ 		}
+ 
+-		igdng_compute_m_n(bpp, lane, target_clock,
+-				  link_bw, &m_n);
++		ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
+ 	}
+ 
+ 	/* Ironlake: try to setup display ref clock before DPLL
+@@ -2979,7 +3102,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 	 * PCH B stepping, previous chipset stepping should be
+ 	 * ignoring this setting.
+ 	 */
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		temp = I915_READ(PCH_DREF_CONTROL);
+ 		/* Always enable nonspread source */
+ 		temp &= ~DREF_NONSPREAD_SOURCE_MASK;
+@@ -3014,7 +3137,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 		}
+ 	}
+ 
+-	if (IS_IGD(dev)) {
++	if (IS_PINEVIEW(dev)) {
+ 		fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
+ 		if (has_reduced_clock)
+ 			fp2 = (1 << reduced_clock.n) << 16 |
+@@ -3026,7 +3149,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 				reduced_clock.m2;
+ 	}
+ 
+-	if (!IS_IGDNG(dev))
++	if (!IS_IRONLAKE(dev))
+ 		dpll = DPLL_VGA_MODE_DIS;
+ 
+ 	if (IS_I9XX(dev)) {
+@@ -3039,19 +3162,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 			sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ 			if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ 				dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
+-			else if (IS_IGDNG(dev))
++			else if (IS_IRONLAKE(dev))
+ 				dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
+ 		}
+ 		if (is_dp)
+ 			dpll |= DPLL_DVO_HIGH_SPEED;
+ 
+ 		/* compute bitmask from p1 value */
+-		if (IS_IGD(dev))
+-			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
++		if (IS_PINEVIEW(dev))
++			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
+ 		else {
+ 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ 			/* also FPA1 */
+-			if (IS_IGDNG(dev))
++			if (IS_IRONLAKE(dev))
+ 				dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ 			if (IS_G4X(dev) && has_reduced_clock)
+ 				dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+@@ -3070,7 +3193,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
+ 			break;
+ 		}
+-		if (IS_I965G(dev) && !IS_IGDNG(dev))
++		if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ 			dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
+ 	} else {
+ 		if (is_lvds) {
+@@ -3102,9 +3225,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 	/* Set up the display plane register */
+ 	dspcntr = DISPPLANE_GAMMA_ENABLE;
+ 
+-	/* IGDNG's plane is forced to pipe, bit 24 is to
++	/* Ironlake's plane is forced to pipe, bit 24 is to
+ 	   enable color space conversion */
+-	if (!IS_IGDNG(dev)) {
++	if (!IS_IRONLAKE(dev)) {
+ 		if (pipe == 0)
+ 			dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
+ 		else
+@@ -3131,20 +3254,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 
+ 
+ 	/* Disable the panel fitter if it was on our pipe */
+-	if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe)
++	if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
+ 		I915_WRITE(PFIT_CONTROL, 0);
+ 
+-	DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
++	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
+ 	drm_mode_debug_printmodeline(mode);
+ 
+-	/* assign to IGDNG registers */
+-	if (IS_IGDNG(dev)) {
++	/* assign to Ironlake registers */
++	if (IS_IRONLAKE(dev)) {
+ 		fp_reg = pch_fp_reg;
+ 		dpll_reg = pch_dpll_reg;
+ 	}
+ 
+ 	if (is_edp) {
+-		igdng_disable_pll_edp(crtc);
++		ironlake_disable_pll_edp(crtc);
+ 	} else if ((dpll & DPLL_VCO_ENABLE)) {
+ 		I915_WRITE(fp_reg, fp);
+ 		I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
+@@ -3159,7 +3282,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 	if (is_lvds) {
+ 		u32 lvds;
+ 
+-		if (IS_IGDNG(dev))
++		if (IS_IRONLAKE(dev))
+ 			lvds_reg = PCH_LVDS;
+ 
+ 		lvds = I915_READ(lvds_reg);
+@@ -3181,12 +3304,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 		/* set the dithering flag */
+ 		if (IS_I965G(dev)) {
+ 			if (dev_priv->lvds_dither) {
+-				if (IS_IGDNG(dev))
++				if (IS_IRONLAKE(dev))
+ 					pipeconf |= PIPE_ENABLE_DITHER;
+ 				else
+ 					lvds |= LVDS_ENABLE_DITHER;
+ 			} else {
+-				if (IS_IGDNG(dev))
++				if (IS_IRONLAKE(dev))
+ 					pipeconf &= ~PIPE_ENABLE_DITHER;
+ 				else
+ 					lvds &= ~LVDS_ENABLE_DITHER;
+@@ -3205,7 +3328,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 		/* Wait for the clocks to stabilize. */
+ 		udelay(150);
+ 
+-		if (IS_I965G(dev) && !IS_IGDNG(dev)) {
++		if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
+ 			if (is_sdvo) {
+ 				sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
+ 				I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
+@@ -3225,14 +3348,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 		I915_WRITE(fp_reg + 4, fp2);
+ 		intel_crtc->lowfreq_avail = true;
+ 		if (HAS_PIPE_CXSR(dev)) {
+-			DRM_DEBUG("enabling CxSR downclocking\n");
++			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
+ 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
+ 		}
+ 	} else {
+ 		I915_WRITE(fp_reg + 4, fp);
+ 		intel_crtc->lowfreq_avail = false;
+ 		if (HAS_PIPE_CXSR(dev)) {
+-			DRM_DEBUG("disabling CxSR downclocking\n");
++			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
+ 			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
+ 		}
+ 	}
+@@ -3252,21 +3375,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 	/* pipesrc and dspsize control the size that is scaled from, which should
+ 	 * always be the user's requested size.
+ 	 */
+-	if (!IS_IGDNG(dev)) {
++	if (!IS_IRONLAKE(dev)) {
+ 		I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
+ 				(mode->hdisplay - 1));
+ 		I915_WRITE(dsppos_reg, 0);
+ 	}
+ 	I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
+ 		I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
+ 		I915_WRITE(link_m1_reg, m_n.link_m);
+ 		I915_WRITE(link_n1_reg, m_n.link_n);
+ 
+ 		if (is_edp) {
+-			igdng_set_pll_edp(crtc, adjusted_mode->clock);
++			ironlake_set_pll_edp(crtc, adjusted_mode->clock);
+ 		} else {
+ 			/* enable FDI RX PLL too */
+ 			temp = I915_READ(fdi_rx_reg);
+@@ -3280,7 +3403,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
+ 
+ 	intel_wait_for_vblank(dev);
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		/* enable address swizzle for tiling buffer */
+ 		temp = I915_READ(DISP_ARB_CTL);
+ 		I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
+@@ -3314,8 +3437,8 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
+ 	if (!crtc->enabled)
+ 		return;
+ 
+-	/* use legacy palette for IGDNG */
+-	if (IS_IGDNG(dev))
++	/* use legacy palette for Ironlake */
++	if (IS_IRONLAKE(dev))
+ 		palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
+ 						   LGC_PALETTE_B;
+ 
+@@ -3344,11 +3467,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ 	size_t addr;
+ 	int ret;
+ 
+-	DRM_DEBUG("\n");
++	DRM_DEBUG_KMS("\n");
+ 
+ 	/* if we want to turn off the cursor ignore width and height */
+ 	if (!handle) {
+-		DRM_DEBUG("cursor off\n");
++		DRM_DEBUG_KMS("cursor off\n");
+ 		if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+ 			temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ 			temp |= CURSOR_MODE_DISABLE;
+@@ -3381,7 +3504,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ 
+ 	/* we only need to pin inside GTT if cursor is non-phy */
+ 	mutex_lock(&dev->struct_mutex);
+-	if (!dev_priv->cursor_needs_physical) {
++	if (!dev_priv->info->cursor_needs_physical) {
+ 		ret = i915_gem_object_pin(bo, PAGE_SIZE);
+ 		if (ret) {
+ 			DRM_ERROR("failed to pin cursor bo\n");
+@@ -3416,7 +3539,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
+ 	I915_WRITE(base, addr);
+ 
+ 	if (intel_crtc->cursor_bo) {
+-		if (dev_priv->cursor_needs_physical) {
++		if (dev_priv->info->cursor_needs_physical) {
+ 			if (intel_crtc->cursor_bo != bo)
+ 				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
+ 		} else
+@@ -3656,18 +3779,18 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+ 		fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
+ 
+ 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+-	if (IS_IGD(dev)) {
+-		clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+-		clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
++	if (IS_PINEVIEW(dev)) {
++		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
++		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ 	} else {
+ 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ 	}
+ 
+ 	if (IS_I9XX(dev)) {
+-		if (IS_IGD(dev))
+-			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
+-				DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
++		if (IS_PINEVIEW(dev))
++			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
++				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+ 		else
+ 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+ 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
+@@ -3682,7 +3805,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+ 				7 : 14;
+ 			break;
+ 		default:
+-			DRM_DEBUG("Unknown DPLL mode %08x in programmed "
++			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
+ 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
+ 			return 0;
+ 		}
+@@ -3768,7 +3891,7 @@ static void intel_gpu_idle_timer(unsigned long arg)
+ 	struct drm_device *dev = (struct drm_device *)arg;
+ 	drm_i915_private_t *dev_priv = dev->dev_private;
+ 
+-	DRM_DEBUG("idle timer fired, downclocking\n");
++	DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
+ 
+ 	dev_priv->busy = false;
+ 
+@@ -3783,7 +3906,7 @@ static void intel_crtc_idle_timer(unsigned long arg)
+ 	struct drm_crtc *crtc = &intel_crtc->base;
+ 	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
+ 
+-	DRM_DEBUG("idle timer fired, downclocking\n");
++	DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
+ 
+ 	intel_crtc->busy = false;
+ 
+@@ -3799,14 +3922,14 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
+ 	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ 	int dpll = I915_READ(dpll_reg);
+ 
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		return;
+ 
+ 	if (!dev_priv->lvds_downclock_avail)
+ 		return;
+ 
+ 	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
+-		DRM_DEBUG("upclocking LVDS\n");
++		DRM_DEBUG_DRIVER("upclocking LVDS\n");
+ 
+ 		/* Unlock panel regs */
+ 		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+@@ -3817,7 +3940,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
+ 		intel_wait_for_vblank(dev);
+ 		dpll = I915_READ(dpll_reg);
+ 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
+-			DRM_DEBUG("failed to upclock LVDS!\n");
++			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
+ 
+ 		/* ...and lock them again */
+ 		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
+@@ -3838,7 +3961,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
+ 	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ 	int dpll = I915_READ(dpll_reg);
+ 
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		return;
+ 
+ 	if (!dev_priv->lvds_downclock_avail)
+@@ -3849,7 +3972,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
+ 	 * the manual case.
+ 	 */
+ 	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
+-		DRM_DEBUG("downclocking LVDS\n");
++		DRM_DEBUG_DRIVER("downclocking LVDS\n");
+ 
+ 		/* Unlock panel regs */
+ 		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
+@@ -3860,7 +3983,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
+ 		intel_wait_for_vblank(dev);
+ 		dpll = I915_READ(dpll_reg);
+ 		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
+-			DRM_DEBUG("failed to downclock LVDS!\n");
++			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
+ 
+ 		/* ...and lock them again */
+ 		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
+@@ -3921,7 +4044,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return;
+ 
+-	dev_priv->busy = true;
++	if (!dev_priv->busy)
++		dev_priv->busy = true;
++	else
++		mod_timer(&dev_priv->idle_timer, jiffies +
++			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ 
+ 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ 		if (!crtc->fb)
+@@ -3951,6 +4078,180 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
+ 	kfree(intel_crtc);
+ }
+ 
++struct intel_unpin_work {
++	struct work_struct work;
++	struct drm_device *dev;
++	struct drm_gem_object *old_fb_obj;
++	struct drm_gem_object *pending_flip_obj;
++	struct drm_pending_vblank_event *event;
++	int pending;
++};
++
++static void intel_unpin_work_fn(struct work_struct *__work)
++{
++	struct intel_unpin_work *work =
++		container_of(__work, struct intel_unpin_work, work);
++
++	mutex_lock(&work->dev->struct_mutex);
++	i915_gem_object_unpin(work->old_fb_obj);
++	drm_gem_object_unreference(work->pending_flip_obj);
++	drm_gem_object_unreference(work->old_fb_obj);
++	mutex_unlock(&work->dev->struct_mutex);
++	kfree(work);
++}
++
++void intel_finish_page_flip(struct drm_device *dev, int pipe)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
++	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	struct intel_unpin_work *work;
++	struct drm_i915_gem_object *obj_priv;
++	struct drm_pending_vblank_event *e;
++	struct timeval now;
++	unsigned long flags;
++
++	/* Ignore early vblank irqs */
++	if (intel_crtc == NULL)
++		return;
++
++	spin_lock_irqsave(&dev->event_lock, flags);
++	work = intel_crtc->unpin_work;
++	if (work == NULL || !work->pending) {
++		if (work && !work->pending) {
++			obj_priv = work->pending_flip_obj->driver_private;
++			DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
++					 obj_priv,
++					 atomic_read(&obj_priv->pending_flip));
++		}
++		spin_unlock_irqrestore(&dev->event_lock, flags);
++		return;
++	}
++
++	intel_crtc->unpin_work = NULL;
++	drm_vblank_put(dev, intel_crtc->pipe);
++
++	if (work->event) {
++		e = work->event;
++		do_gettimeofday(&now);
++		e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
++		e->event.tv_sec = now.tv_sec;
++		e->event.tv_usec = now.tv_usec;
++		list_add_tail(&e->base.link,
++			      &e->base.file_priv->event_list);
++		wake_up_interruptible(&e->base.file_priv->event_wait);
++	}
++
++	spin_unlock_irqrestore(&dev->event_lock, flags);
++
++	obj_priv = work->pending_flip_obj->driver_private;
++
++	/* Initial scanout buffer will have a 0 pending flip count */
++	if ((atomic_read(&obj_priv->pending_flip) == 0) ||
++	    atomic_dec_and_test(&obj_priv->pending_flip))
++		DRM_WAKEUP(&dev_priv->pending_flip_queue);
++	schedule_work(&work->work);
++}
++
++void intel_prepare_page_flip(struct drm_device *dev, int plane)
++{
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_crtc *intel_crtc =
++		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
++	unsigned long flags;
++
++	spin_lock_irqsave(&dev->event_lock, flags);
++	if (intel_crtc->unpin_work) {
++		intel_crtc->unpin_work->pending = 1;
++	} else {
++		DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
++	}
++	spin_unlock_irqrestore(&dev->event_lock, flags);
++}
++
++static int intel_crtc_page_flip(struct drm_crtc *crtc,
++				struct drm_framebuffer *fb,
++				struct drm_pending_vblank_event *event)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct intel_framebuffer *intel_fb;
++	struct drm_i915_gem_object *obj_priv;
++	struct drm_gem_object *obj;
++	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
++	struct intel_unpin_work *work;
++	unsigned long flags;
++	int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
++	int ret, pipesrc;
++	RING_LOCALS;
++
++	work = kzalloc(sizeof *work, GFP_KERNEL);
++	if (work == NULL)
++		return -ENOMEM;
++
++	mutex_lock(&dev->struct_mutex);
++
++	work->event = event;
++	work->dev = crtc->dev;
++	intel_fb = to_intel_framebuffer(crtc->fb);
++	work->old_fb_obj = intel_fb->obj;
++	INIT_WORK(&work->work, intel_unpin_work_fn);
++
++	/* We borrow the event spin lock for protecting unpin_work */
++	spin_lock_irqsave(&dev->event_lock, flags);
++	if (intel_crtc->unpin_work) {
++		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
++		spin_unlock_irqrestore(&dev->event_lock, flags);
++		kfree(work);
++		mutex_unlock(&dev->struct_mutex);
++		return -EBUSY;
++	}
++	intel_crtc->unpin_work = work;
++	spin_unlock_irqrestore(&dev->event_lock, flags);
++
++	intel_fb = to_intel_framebuffer(fb);
++	obj = intel_fb->obj;
++
++	ret = intel_pin_and_fence_fb_obj(dev, obj);
++	if (ret != 0) {
++		DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
++			  obj->driver_private);
++		kfree(work);
++		intel_crtc->unpin_work = NULL;
++		mutex_unlock(&dev->struct_mutex);
++		return ret;
++	}
++
++	/* Reference the objects for the scheduled work. */
++	drm_gem_object_reference(work->old_fb_obj);
++	drm_gem_object_reference(obj);
++
++	crtc->fb = fb;
++	i915_gem_object_flush_write_domain(obj);
++	drm_vblank_get(dev, intel_crtc->pipe);
++	obj_priv = obj->driver_private;
++	atomic_inc(&obj_priv->pending_flip);
++	work->pending_flip_obj = obj;
++
++	BEGIN_LP_RING(4);
++	OUT_RING(MI_DISPLAY_FLIP |
++		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
++	OUT_RING(fb->pitch);
++	if (IS_I965G(dev)) {
++		OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
++		pipesrc = I915_READ(pipesrc_reg); 
++		OUT_RING(pipesrc & 0x0fff0fff);
++	} else {
++		OUT_RING(obj_priv->gtt_offset);
++		OUT_RING(MI_NOOP);
++	}
++	ADVANCE_LP_RING();
++
++	mutex_unlock(&dev->struct_mutex);
++
++	return 0;
++}
++
+ static const struct drm_crtc_helper_funcs intel_helper_funcs = {
+ 	.dpms = intel_crtc_dpms,
+ 	.mode_fixup = intel_crtc_mode_fixup,
+@@ -3967,11 +4268,13 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
+ 	.gamma_set = intel_crtc_gamma_set,
+ 	.set_config = drm_crtc_helper_set_config,
+ 	.destroy = intel_crtc_destroy,
++	.page_flip = intel_crtc_page_flip,
+ };
+ 
+ 
+ static void intel_crtc_init(struct drm_device *dev, int pipe)
+ {
++	drm_i915_private_t *dev_priv = dev->dev_private;
+ 	struct intel_crtc *intel_crtc;
+ 	int i;
+ 
+@@ -3994,10 +4297,15 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
+ 	intel_crtc->pipe = pipe;
+ 	intel_crtc->plane = pipe;
+ 	if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
+-		DRM_DEBUG("swapping pipes & planes for FBC\n");
++		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
+ 		intel_crtc->plane = ((pipe == 0) ? 1 : 0);
+ 	}
+ 
++	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
++	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
++	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
++	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
++
+ 	intel_crtc->cursor_addr = 0;
+ 	intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+ 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
+@@ -4074,7 +4382,7 @@ static void intel_setup_outputs(struct drm_device *dev)
+ 	if (IS_MOBILE(dev) && !IS_I830(dev))
+ 		intel_lvds_init(dev);
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		int found;
+ 
+ 		if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
+@@ -4247,6 +4555,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
+ 	.fb_changed = intelfb_probe,
+ };
+ 
++static struct drm_gem_object *
++intel_alloc_power_context(struct drm_device *dev)
++{
++	struct drm_gem_object *pwrctx;
++	int ret;
++
++	pwrctx = drm_gem_object_alloc(dev, 4096);
++	if (!pwrctx) {
++		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
++		return NULL;
++	}
++
++	mutex_lock(&dev->struct_mutex);
++	ret = i915_gem_object_pin(pwrctx, 4096);
++	if (ret) {
++		DRM_ERROR("failed to pin power context: %d\n", ret);
++		goto err_unref;
++	}
++
++	ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
++	if (ret) {
++		DRM_ERROR("failed to set-domain on power context: %d\n", ret);
++		goto err_unpin;
++	}
++	mutex_unlock(&dev->struct_mutex);
++
++	return pwrctx;
++
++err_unpin:
++	i915_gem_object_unpin(pwrctx);
++err_unref:
++	drm_gem_object_unreference(pwrctx);
++	mutex_unlock(&dev->struct_mutex);
++	return NULL;
++}
++
+ void intel_init_clock_gating(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -4255,7 +4599,7 @@ void intel_init_clock_gating(struct drm_device *dev)
+ 	 * Disable clock gating reported to work incorrectly according to the
+ 	 * specs, but enable as much else as we can.
+ 	 */
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		return;
+ 	} else if (IS_G4X(dev)) {
+ 		uint32_t dspclk_gate;
+@@ -4289,11 +4633,37 @@ void intel_init_clock_gating(struct drm_device *dev)
+ 		dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
+ 			DSTATE_DOT_CLOCK_GATING;
+ 		I915_WRITE(D_STATE, dstate);
+-	} else if (IS_I855(dev) || IS_I865G(dev)) {
++	} else if (IS_I85X(dev) || IS_I865G(dev)) {
+ 		I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
+ 	} else if (IS_I830(dev)) {
+ 		I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+ 	}
++
++	/*
++	 * GPU can automatically power down the render unit if given a page
++	 * to save state.
++	 */
++	if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
++		struct drm_i915_gem_object *obj_priv = NULL;
++
++		if (dev_priv->pwrctx) {
++			obj_priv = dev_priv->pwrctx->driver_private;
++		} else {
++			struct drm_gem_object *pwrctx;
++
++			pwrctx = intel_alloc_power_context(dev);
++			if (pwrctx) {
++				dev_priv->pwrctx = pwrctx;
++				obj_priv = pwrctx->driver_private;
++			}
++		}
++
++		if (obj_priv) {
++			I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
++			I915_WRITE(MCHBAR_RENDER_STANDBY,
++				   I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
++		}
++	}
+ }
+ 
+ /* Set up chip specific display functions */
+@@ -4302,8 +4672,8 @@ static void intel_init_display(struct drm_device *dev)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 
+ 	/* We always want a DPMS function */
+-	if (IS_IGDNG(dev))
+-		dev_priv->display.dpms = igdng_crtc_dpms;
++	if (IS_IRONLAKE(dev))
++		dev_priv->display.dpms = ironlake_crtc_dpms;
+ 	else
+ 		dev_priv->display.dpms = i9xx_crtc_dpms;
+ 
+@@ -4322,13 +4692,13 @@ static void intel_init_display(struct drm_device *dev)
+ 	}
+ 
+ 	/* Returns the core display clock speed */
+-	if (IS_I945G(dev))
++	if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
+ 		dev_priv->display.get_display_clock_speed =
+ 			i945_get_display_clock_speed;
+ 	else if (IS_I915G(dev))
+ 		dev_priv->display.get_display_clock_speed =
+ 			i915_get_display_clock_speed;
+-	else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
++	else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
+ 		dev_priv->display.get_display_clock_speed =
+ 			i9xx_misc_get_display_clock_speed;
+ 	else if (IS_I915GM(dev))
+@@ -4337,7 +4707,7 @@ static void intel_init_display(struct drm_device *dev)
+ 	else if (IS_I865G(dev))
+ 		dev_priv->display.get_display_clock_speed =
+ 			i865_get_display_clock_speed;
+-	else if (IS_I855(dev))
++	else if (IS_I85X(dev))
+ 		dev_priv->display.get_display_clock_speed =
+ 			i855_get_display_clock_speed;
+ 	else /* 852, 830 */
+@@ -4345,7 +4715,7 @@ static void intel_init_display(struct drm_device *dev)
+ 			i830_get_display_clock_speed;
+ 
+ 	/* For FIFO watermark updates */
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		dev_priv->display.update_wm = NULL;
+ 	else if (IS_G4X(dev))
+ 		dev_priv->display.update_wm = g4x_update_wm;
+@@ -4401,7 +4771,7 @@ void intel_modeset_init(struct drm_device *dev)
+ 		num_pipe = 2;
+ 	else
+ 		num_pipe = 1;
+-	DRM_DEBUG("%d display pipe%s available.\n",
++	DRM_DEBUG_KMS("%d display pipe%s available.\n",
+ 		  num_pipe, num_pipe > 1 ? "s" : "");
+ 
+ 	if (IS_I85X(dev))
+@@ -4420,6 +4790,15 @@ void intel_modeset_init(struct drm_device *dev)
+ 	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
+ 	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
+ 		    (unsigned long)dev);
++
++	intel_setup_overlay(dev);
++
++	if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
++							dev_priv->fsb_freq,
++							dev_priv->mem_freq))
++		DRM_INFO("failed to find known CxSR latency "
++			 "(found fsb freq %d, mem freq %d), disabling CxSR\n",
++			 dev_priv->fsb_freq, dev_priv->mem_freq);
+ }
+ 
+ void intel_modeset_cleanup(struct drm_device *dev)
+@@ -4442,11 +4821,21 @@ void intel_modeset_cleanup(struct drm_device *dev)
+ 
+ 	del_timer_sync(&dev_priv->idle_timer);
+ 
+-	mutex_unlock(&dev->struct_mutex);
+-
+ 	if (dev_priv->display.disable_fbc)
+ 		dev_priv->display.disable_fbc(dev);
+ 
++	if (dev_priv->pwrctx) {
++		struct drm_i915_gem_object *obj_priv;
++
++		obj_priv = dev_priv->pwrctx->driver_private;
++		I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
++		I915_READ(PWRCTXA);
++		i915_gem_object_unpin(dev_priv->pwrctx);
++		drm_gem_object_unreference(dev_priv->pwrctx);
++	}
++
++	mutex_unlock(&dev->struct_mutex);
++
+ 	drm_mode_config_cleanup(dev);
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index d487771..439506c 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -33,7 +33,8 @@
+ #include "intel_drv.h"
+ #include "i915_drm.h"
+ #include "i915_drv.h"
+-#include "intel_dp.h"
++#include "drm_dp_helper.h"
++
+ 
+ #define DP_LINK_STATUS_SIZE	6
+ #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
+@@ -124,9 +125,15 @@ intel_dp_link_clock(uint8_t link_bw)
+ 
+ /* I think this is a fiction */
+ static int
+-intel_dp_link_required(int pixel_clock)
++intel_dp_link_required(struct drm_device *dev,
++		       struct intel_output *intel_output, int pixel_clock)
+ {
+-	return pixel_clock * 3;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++
++	if (IS_eDP(intel_output))
++		return (pixel_clock * dev_priv->edp_bpp) / 8;
++	else
++		return pixel_clock * 3;
+ }
+ 
+ static int
+@@ -137,7 +144,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
+ 	int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output));
+ 	int max_lanes = intel_dp_max_lane_count(intel_output);
+ 
+-	if (intel_dp_link_required(mode->clock) > max_link_clock * max_lanes)
++	if (intel_dp_link_required(connector->dev, intel_output, mode->clock)
++			> max_link_clock * max_lanes)
+ 		return MODE_CLOCK_HIGH;
+ 
+ 	if (mode->clock < 10000)
+@@ -223,8 +231,8 @@ intel_dp_aux_ch(struct intel_output *intel_output,
+ 	 */
+ 	if (IS_eDP(intel_output))
+ 		aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+-	else if (IS_IGDNG(dev))
+-		aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */
++	else if (IS_IRONLAKE(dev))
++		aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
+ 	else
+ 		aux_clock_divider = intel_hrawclk(dev) / 2;
+ 
+@@ -282,7 +290,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
+ 	/* Timeouts occur when the device isn't connected, so they're
+ 	 * "normal" -- don't fill the kernel log with these */
+ 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
+-		DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status);
++		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+ 		return -ETIMEDOUT;
+ 	}
+ 
+@@ -382,17 +390,77 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
+ }
+ 
+ static int
+-intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
+-		    uint8_t *send, int send_bytes,
+-		    uint8_t *recv, int recv_bytes)
++intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
++		    uint8_t write_byte, uint8_t *read_byte)
+ {
++	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+ 	struct intel_dp_priv *dp_priv = container_of(adapter,
+ 						     struct intel_dp_priv,
+ 						     adapter);
+ 	struct intel_output *intel_output = dp_priv->intel_output;
++	uint16_t address = algo_data->address;
++	uint8_t msg[5];
++	uint8_t reply[2];
++	int msg_bytes;
++	int reply_bytes;
++	int ret;
++
++	/* Set up the command byte */
++	if (mode & MODE_I2C_READ)
++		msg[0] = AUX_I2C_READ << 4;
++	else
++		msg[0] = AUX_I2C_WRITE << 4;
++
++	if (!(mode & MODE_I2C_STOP))
++		msg[0] |= AUX_I2C_MOT << 4;
++
++	msg[1] = address >> 8;
++	msg[2] = address;
++
++	switch (mode) {
++	case MODE_I2C_WRITE:
++		msg[3] = 0;
++		msg[4] = write_byte;
++		msg_bytes = 5;
++		reply_bytes = 1;
++		break;
++	case MODE_I2C_READ:
++		msg[3] = 0;
++		msg_bytes = 4;
++		reply_bytes = 2;
++		break;
++	default:
++		msg_bytes = 3;
++		reply_bytes = 1;
++		break;
++	}
+ 
+-	return intel_dp_aux_ch(intel_output,
+-			       send, send_bytes, recv, recv_bytes);
++	for (;;) {
++	  ret = intel_dp_aux_ch(intel_output,
++				msg, msg_bytes,
++				reply, reply_bytes);
++		if (ret < 0) {
++			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
++			return ret;
++		}
++		switch (reply[0] & AUX_I2C_REPLY_MASK) {
++		case AUX_I2C_REPLY_ACK:
++			if (mode == MODE_I2C_READ) {
++				*read_byte = reply[1];
++			}
++			return reply_bytes - 1;
++		case AUX_I2C_REPLY_NACK:
++			DRM_DEBUG_KMS("aux_ch nack\n");
++			return -EREMOTEIO;
++		case AUX_I2C_REPLY_DEFER:
++			DRM_DEBUG_KMS("aux_ch defer\n");
++			udelay(100);
++			break;
++		default:
++			DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
++			return -EREMOTEIO;
++		}
++	}
+ }
+ 
+ static int
+@@ -431,11 +499,13 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 		for (clock = 0; clock <= max_clock; clock++) {
+ 			int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
+ 
+-			if (intel_dp_link_required(mode->clock) <= link_avail) {
++			if (intel_dp_link_required(encoder->dev, intel_output, mode->clock)
++					<= link_avail) {
+ 				dp_priv->link_bw = bws[clock];
+ 				dp_priv->lane_count = lane_count;
+ 				adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
+-				DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n",
++				DRM_DEBUG_KMS("Display port link bw %02x lane "
++						"count %d clock %d\n",
+ 				       dp_priv->link_bw, dp_priv->lane_count,
+ 				       adjusted_mode->clock);
+ 				return true;
+@@ -514,7 +584,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ 	intel_dp_compute_m_n(3, lane_count,
+ 			     mode->clock, adjusted_mode->clock, &m_n);
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		if (intel_crtc->pipe == 0) {
+ 			I915_WRITE(TRANSA_DATA_M1,
+ 				   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+@@ -606,23 +676,23 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
+ 	}
+ }
+ 
+-static void igdng_edp_backlight_on (struct drm_device *dev)
++static void ironlake_edp_backlight_on (struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 pp;
+ 
+-	DRM_DEBUG("\n");
++	DRM_DEBUG_KMS("\n");
+ 	pp = I915_READ(PCH_PP_CONTROL);
+ 	pp |= EDP_BLC_ENABLE;
+ 	I915_WRITE(PCH_PP_CONTROL, pp);
+ }
+ 
+-static void igdng_edp_backlight_off (struct drm_device *dev)
++static void ironlake_edp_backlight_off (struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 pp;
+ 
+-	DRM_DEBUG("\n");
++	DRM_DEBUG_KMS("\n");
+ 	pp = I915_READ(PCH_PP_CONTROL);
+ 	pp &= ~EDP_BLC_ENABLE;
+ 	I915_WRITE(PCH_PP_CONTROL, pp);
+@@ -641,13 +711,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
+ 		if (dp_reg & DP_PORT_EN) {
+ 			intel_dp_link_down(intel_output, dp_priv->DP);
+ 			if (IS_eDP(intel_output))
+-				igdng_edp_backlight_off(dev);
++				ironlake_edp_backlight_off(dev);
+ 		}
+ 	} else {
+ 		if (!(dp_reg & DP_PORT_EN)) {
+ 			intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
+ 			if (IS_eDP(intel_output))
+-				igdng_edp_backlight_on(dev);
++				ironlake_edp_backlight_on(dev);
+ 		}
+ 	}
+ 	dp_priv->dpms_mode = mode;
+@@ -1010,7 +1080,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+ 
+-	DRM_DEBUG("\n");
++	DRM_DEBUG_KMS("\n");
+ 
+ 	if (IS_eDP(intel_output)) {
+ 		DP &= ~DP_PLL_ENABLE;
+@@ -1071,7 +1141,7 @@ intel_dp_check_link_status(struct intel_output *intel_output)
+ }
+ 
+ static enum drm_connector_status
+-igdng_dp_detect(struct drm_connector *connector)
++ironlake_dp_detect(struct drm_connector *connector)
+ {
+ 	struct intel_output *intel_output = to_intel_output(connector);
+ 	struct intel_dp_priv *dp_priv = intel_output->dev_priv;
+@@ -1106,8 +1176,8 @@ intel_dp_detect(struct drm_connector *connector)
+ 
+ 	dp_priv->has_audio = false;
+ 
+-	if (IS_IGDNG(dev))
+-		return igdng_dp_detect(connector);
++	if (IS_IRONLAKE(dev))
++		return ironlake_dp_detect(connector);
+ 
+ 	temp = I915_READ(PORT_HOTPLUG_EN);
+ 
+@@ -1261,11 +1331,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
+ 	else if (output_reg == DP_D || output_reg == PCH_DP_D)
+ 		intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
+ 
+-	if (IS_eDP(intel_output)) {
+-		intel_output->crtc_mask = (1 << 1);
++	if (IS_eDP(intel_output))
+ 		intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
+-	} else
+-		intel_output->crtc_mask = (1 << 0) | (1 << 1);
++
++	intel_output->crtc_mask = (1 << 0) | (1 << 1);
+ 	connector->interlace_allowed = true;
+ 	connector->doublescan_allowed = 0;
+ 
+diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
+deleted file mode 100644
+index 2b38054..0000000
+--- a/drivers/gpu/drm/i915/intel_dp.h
++++ /dev/null
+@@ -1,144 +0,0 @@
+-/*
+- * Copyright © 2008 Keith Packard
+- *
+- * Permission to use, copy, modify, distribute, and sell this software and its
+- * documentation for any purpose is hereby granted without fee, provided that
+- * the above copyright notice appear in all copies and that both that copyright
+- * notice and this permission notice appear in supporting documentation, and
+- * that the name of the copyright holders not be used in advertising or
+- * publicity pertaining to distribution of the software without specific,
+- * written prior permission.  The copyright holders make no representations
+- * about the suitability of this software for any purpose.  It is provided "as
+- * is" without express or implied warranty.
+- *
+- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+- * OF THIS SOFTWARE.
+- */
+-
+-#ifndef _INTEL_DP_H_
+-#define _INTEL_DP_H_
+-
+-/* From the VESA DisplayPort spec */
+-
+-#define AUX_NATIVE_WRITE	0x8
+-#define AUX_NATIVE_READ		0x9
+-#define AUX_I2C_WRITE		0x0
+-#define AUX_I2C_READ		0x1
+-#define AUX_I2C_STATUS		0x2
+-#define AUX_I2C_MOT		0x4
+-
+-#define AUX_NATIVE_REPLY_ACK	(0x0 << 4)
+-#define AUX_NATIVE_REPLY_NACK	(0x1 << 4)
+-#define AUX_NATIVE_REPLY_DEFER	(0x2 << 4)
+-#define AUX_NATIVE_REPLY_MASK	(0x3 << 4)
+-
+-#define AUX_I2C_REPLY_ACK	(0x0 << 6)
+-#define AUX_I2C_REPLY_NACK	(0x1 << 6)
+-#define AUX_I2C_REPLY_DEFER	(0x2 << 6)
+-#define AUX_I2C_REPLY_MASK	(0x3 << 6)
+-
+-/* AUX CH addresses */
+-#define	DP_LINK_BW_SET		0x100
+-# define DP_LINK_BW_1_62		    0x06
+-# define DP_LINK_BW_2_7			    0x0a
+-
+-#define DP_LANE_COUNT_SET	0x101
+-# define DP_LANE_COUNT_MASK		    0x0f
+-# define DP_LANE_COUNT_ENHANCED_FRAME_EN    (1 << 7)
+-
+-#define DP_TRAINING_PATTERN_SET	0x102
+-
+-# define DP_TRAINING_PATTERN_DISABLE	    0
+-# define DP_TRAINING_PATTERN_1		    1
+-# define DP_TRAINING_PATTERN_2		    2
+-# define DP_TRAINING_PATTERN_MASK	    0x3
+-
+-# define DP_LINK_QUAL_PATTERN_DISABLE	    (0 << 2)
+-# define DP_LINK_QUAL_PATTERN_D10_2	    (1 << 2)
+-# define DP_LINK_QUAL_PATTERN_ERROR_RATE    (2 << 2)
+-# define DP_LINK_QUAL_PATTERN_PRBS7	    (3 << 2)
+-# define DP_LINK_QUAL_PATTERN_MASK	    (3 << 2)
+-
+-# define DP_RECOVERED_CLOCK_OUT_EN	    (1 << 4)
+-# define DP_LINK_SCRAMBLING_DISABLE	    (1 << 5)
+-
+-# define DP_SYMBOL_ERROR_COUNT_BOTH	    (0 << 6)
+-# define DP_SYMBOL_ERROR_COUNT_DISPARITY    (1 << 6)
+-# define DP_SYMBOL_ERROR_COUNT_SYMBOL	    (2 << 6)
+-# define DP_SYMBOL_ERROR_COUNT_MASK	    (3 << 6)
+-
+-#define DP_TRAINING_LANE0_SET		    0x103
+-#define DP_TRAINING_LANE1_SET		    0x104
+-#define DP_TRAINING_LANE2_SET		    0x105
+-#define DP_TRAINING_LANE3_SET		    0x106
+-
+-# define DP_TRAIN_VOLTAGE_SWING_MASK	    0x3
+-# define DP_TRAIN_VOLTAGE_SWING_SHIFT	    0
+-# define DP_TRAIN_MAX_SWING_REACHED	    (1 << 2)
+-# define DP_TRAIN_VOLTAGE_SWING_400	    (0 << 0)
+-# define DP_TRAIN_VOLTAGE_SWING_600	    (1 << 0)
+-# define DP_TRAIN_VOLTAGE_SWING_800	    (2 << 0)
+-# define DP_TRAIN_VOLTAGE_SWING_1200	    (3 << 0)
+-
+-# define DP_TRAIN_PRE_EMPHASIS_MASK	    (3 << 3)
+-# define DP_TRAIN_PRE_EMPHASIS_0	    (0 << 3)
+-# define DP_TRAIN_PRE_EMPHASIS_3_5	    (1 << 3)
+-# define DP_TRAIN_PRE_EMPHASIS_6	    (2 << 3)
+-# define DP_TRAIN_PRE_EMPHASIS_9_5	    (3 << 3)
+-
+-# define DP_TRAIN_PRE_EMPHASIS_SHIFT	    3
+-# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED  (1 << 5)
+-
+-#define DP_DOWNSPREAD_CTRL		    0x107
+-# define DP_SPREAD_AMP_0_5		    (1 << 4)
+-
+-#define DP_MAIN_LINK_CHANNEL_CODING_SET	    0x108
+-# define DP_SET_ANSI_8B10B		    (1 << 0)
+-
+-#define DP_LANE0_1_STATUS		    0x202
+-#define DP_LANE2_3_STATUS		    0x203
+-
+-# define DP_LANE_CR_DONE		    (1 << 0)
+-# define DP_LANE_CHANNEL_EQ_DONE	    (1 << 1)
+-# define DP_LANE_SYMBOL_LOCKED		    (1 << 2)
+-
+-#define DP_LANE_ALIGN_STATUS_UPDATED	    0x204
+-
+-#define DP_INTERLANE_ALIGN_DONE		    (1 << 0)
+-#define DP_DOWNSTREAM_PORT_STATUS_CHANGED   (1 << 6)
+-#define DP_LINK_STATUS_UPDATED		    (1 << 7)
+-
+-#define DP_SINK_STATUS			    0x205
+-
+-#define DP_RECEIVE_PORT_0_STATUS	    (1 << 0)
+-#define DP_RECEIVE_PORT_1_STATUS	    (1 << 1)
+-
+-#define DP_ADJUST_REQUEST_LANE0_1	    0x206
+-#define DP_ADJUST_REQUEST_LANE2_3	    0x207
+-
+-#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK  0x03
+-#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
+-#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK   0x0c
+-#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT  2
+-#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK  0x30
+-#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
+-#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK   0xc0
+-#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT  6
+-
+-struct i2c_algo_dp_aux_data {
+-	bool running;
+-	u16 address;
+-	int (*aux_ch) (struct i2c_adapter *adapter,
+-		       uint8_t *send, int send_bytes,
+-		       uint8_t *recv, int recv_bytes);
+-};
+-
+-int
+-i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
+-
+-#endif /* _INTEL_DP_H_ */
+diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/i915/intel_dp_i2c.c
+deleted file mode 100644
+index a63b6f5..0000000
+--- a/drivers/gpu/drm/i915/intel_dp_i2c.c
++++ /dev/null
+@@ -1,273 +0,0 @@
+-/*
+- * Copyright © 2009 Keith Packard
+- *
+- * Permission to use, copy, modify, distribute, and sell this software and its
+- * documentation for any purpose is hereby granted without fee, provided that
+- * the above copyright notice appear in all copies and that both that copyright
+- * notice and this permission notice appear in supporting documentation, and
+- * that the name of the copyright holders not be used in advertising or
+- * publicity pertaining to distribution of the software without specific,
+- * written prior permission.  The copyright holders make no representations
+- * about the suitability of this software for any purpose.  It is provided "as
+- * is" without express or implied warranty.
+- *
+- * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+- * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+- * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+- * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+- * OF THIS SOFTWARE.
+- */
+-
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/delay.h>
+-#include <linux/slab.h>
+-#include <linux/init.h>
+-#include <linux/errno.h>
+-#include <linux/sched.h>
+-#include <linux/i2c.h>
+-#include "intel_dp.h"
+-#include "drmP.h"
+-
+-/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
+-
+-#define MODE_I2C_START	1
+-#define MODE_I2C_WRITE	2
+-#define MODE_I2C_READ	4
+-#define MODE_I2C_STOP	8
+-
+-static int
+-i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
+-			    uint8_t write_byte, uint8_t *read_byte)
+-{
+-	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+-	uint16_t address = algo_data->address;
+-	uint8_t msg[5];
+-	uint8_t reply[2];
+-	int msg_bytes;
+-	int reply_bytes;
+-	int ret;
+-
+-	/* Set up the command byte */
+-	if (mode & MODE_I2C_READ)
+-		msg[0] = AUX_I2C_READ << 4;
+-	else
+-		msg[0] = AUX_I2C_WRITE << 4;
+-
+-	if (!(mode & MODE_I2C_STOP))
+-		msg[0] |= AUX_I2C_MOT << 4;
+-
+-	msg[1] = address >> 8;
+-	msg[2] = address;
+-
+-	switch (mode) {
+-	case MODE_I2C_WRITE:
+-		msg[3] = 0;
+-		msg[4] = write_byte;
+-		msg_bytes = 5;
+-		reply_bytes = 1;
+-		break;
+-	case MODE_I2C_READ:
+-		msg[3] = 0;
+-		msg_bytes = 4;
+-		reply_bytes = 2;
+-		break;
+-	default:
+-		msg_bytes = 3;
+-		reply_bytes = 1;
+-		break;
+-	}
+-
+-	for (;;) {
+-		ret = (*algo_data->aux_ch)(adapter,
+-					   msg, msg_bytes,
+-					   reply, reply_bytes);
+-		if (ret < 0) {
+-			DRM_DEBUG("aux_ch failed %d\n", ret);
+-			return ret;
+-		}
+-		switch (reply[0] & AUX_I2C_REPLY_MASK) {
+-		case AUX_I2C_REPLY_ACK:
+-			if (mode == MODE_I2C_READ) {
+-				*read_byte = reply[1];
+-			}
+-			return reply_bytes - 1;
+-		case AUX_I2C_REPLY_NACK:
+-			DRM_DEBUG("aux_ch nack\n");
+-			return -EREMOTEIO;
+-		case AUX_I2C_REPLY_DEFER:
+-			DRM_DEBUG("aux_ch defer\n");
+-			udelay(100);
+-			break;
+-		default:
+-			DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
+-			return -EREMOTEIO;
+-		}
+-	}
+-}
+-
+-/*
+- * I2C over AUX CH
+- */
+-
+-/*
+- * Send the address. If the I2C link is running, this 'restarts'
+- * the connection with the new address, this is used for doing
+- * a write followed by a read (as needed for DDC)
+- */
+-static int
+-i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
+-{
+-	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+-	int mode = MODE_I2C_START;
+-	int ret;
+-
+-	if (reading)
+-		mode |= MODE_I2C_READ;
+-	else
+-		mode |= MODE_I2C_WRITE;
+-	algo_data->address = address;
+-	algo_data->running = true;
+-	ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+-	return ret;
+-}
+-
+-/*
+- * Stop the I2C transaction. This closes out the link, sending
+- * a bare address packet with the MOT bit turned off
+- */
+-static void
+-i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
+-{
+-	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+-	int mode = MODE_I2C_STOP;
+-
+-	if (reading)
+-		mode |= MODE_I2C_READ;
+-	else
+-		mode |= MODE_I2C_WRITE;
+-	if (algo_data->running) {
+-		(void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
+-		algo_data->running = false;
+-	}
+-}
+-
+-/*
+- * Write a single byte to the current I2C address, the
+- * the I2C link must be running or this returns -EIO
+- */
+-static int
+-i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
+-{
+-	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+-	int ret;
+-
+-	if (!algo_data->running)
+-		return -EIO;
+-
+-	ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
+-	return ret;
+-}
+-
+-/*
+- * Read a single byte from the current I2C address, the
+- * I2C link must be running or this returns -EIO
+- */
+-static int
+-i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
+-{
+-	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+-	int ret;
+-
+-	if (!algo_data->running)
+-		return -EIO;
+-
+-	ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
+-	return ret;
+-}
+-
+-static int
+-i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
+-		     struct i2c_msg *msgs,
+-		     int num)
+-{
+-	int ret = 0;
+-	bool reading = false;
+-	int m;
+-	int b;
+-
+-	for (m = 0; m < num; m++) {
+-		u16 len = msgs[m].len;
+-		u8 *buf = msgs[m].buf;
+-		reading = (msgs[m].flags & I2C_M_RD) != 0;
+-		ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
+-		if (ret < 0)
+-			break;
+-		if (reading) {
+-			for (b = 0; b < len; b++) {
+-				ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
+-				if (ret < 0)
+-					break;
+-			}
+-		} else {
+-			for (b = 0; b < len; b++) {
+-				ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
+-				if (ret < 0)
+-					break;
+-			}
+-		}
+-		if (ret < 0)
+-			break;
+-	}
+-	if (ret >= 0)
+-		ret = num;
+-	i2c_algo_dp_aux_stop(adapter, reading);
+-	DRM_DEBUG("dp_aux_xfer return %d\n", ret);
+-	return ret;
+-}
+-
+-static u32
+-i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
+-{
+-	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+-	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+-	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+-	       I2C_FUNC_10BIT_ADDR;
+-}
+-
+-static const struct i2c_algorithm i2c_dp_aux_algo = {
+-	.master_xfer	= i2c_algo_dp_aux_xfer,
+-	.functionality	= i2c_algo_dp_aux_functionality,
+-};
+-
+-static void
+-i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
+-{
+-	(void) i2c_algo_dp_aux_address(adapter, 0, false);
+-	(void) i2c_algo_dp_aux_stop(adapter, false);
+-					   
+-}
+-
+-static int
+-i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
+-{
+-	adapter->algo = &i2c_dp_aux_algo;
+-	adapter->retries = 3;
+-	i2c_dp_aux_reset_bus(adapter);
+-	return 0;
+-}
+-
+-int
+-i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
+-{
+-	int error;
+-	
+-	error = i2c_dp_aux_prepare_bus(adapter);
+-	if (error)
+-		return error;
+-	error = i2c_add_adapter(adapter);
+-	return error;
+-}
+-EXPORT_SYMBOL(i2c_dp_aux_add_bus);
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 6c7c19f..a51573d 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -110,6 +110,32 @@ struct intel_output {
+ 	int clone_mask;
+ };
+ 
++struct intel_crtc;
++struct intel_overlay {
++	struct drm_device *dev;
++	struct intel_crtc *crtc;
++	struct drm_i915_gem_object *vid_bo;
++	struct drm_i915_gem_object *old_vid_bo;
++	int active;
++	int pfit_active;
++	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
++	u32 color_key;
++	u32 brightness, contrast, saturation;
++	u32 old_xscale, old_yscale;
++	/* register access */
++	u32 flip_addr;
++	struct drm_i915_gem_object *reg_bo;
++	void *virt_addr;
++	/* flip handling */
++	uint32_t last_flip_req;
++	int hw_wedged;
++#define HW_WEDGED		1
++#define NEEDS_WAIT_FOR_FLIP	2
++#define RELEASE_OLD_VID		3
++#define SWITCH_OFF_STAGE_1	4
++#define SWITCH_OFF_STAGE_2	5
++};
++
+ struct intel_crtc {
+ 	struct drm_crtc base;
+ 	enum pipe pipe;
+@@ -121,6 +147,8 @@ struct intel_crtc {
+ 	bool busy; /* is scanout buffer being updated frequently? */
+ 	struct timer_list idle_timer;
+ 	bool lowfreq_avail;
++	struct intel_overlay *overlay;
++	struct intel_unpin_work *unpin_work;
+ };
+ 
+ #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
+@@ -150,6 +178,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ extern void intel_edp_link_config (struct intel_output *, int *, int *);
+ 
+ 
++extern int intel_panel_fitter_pipe (struct drm_device *dev);
+ extern void intel_crtc_load_lut(struct drm_crtc *crtc);
+ extern void intel_encoder_prepare (struct drm_encoder *encoder);
+ extern void intel_encoder_commit (struct drm_encoder *encoder);
+@@ -179,10 +208,23 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+ 				    u16 blue, int regno);
+ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+ 				    u16 *blue, int regno);
++extern void intel_init_clock_gating(struct drm_device *dev);
+ 
+ extern int intel_framebuffer_create(struct drm_device *dev,
+ 				    struct drm_mode_fb_cmd *mode_cmd,
+ 				    struct drm_framebuffer **fb,
+ 				    struct drm_gem_object *obj);
+ 
++extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
++extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
++
++extern void intel_setup_overlay(struct drm_device *dev);
++extern void intel_cleanup_overlay(struct drm_device *dev);
++extern int intel_overlay_switch_off(struct intel_overlay *overlay);
++extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
++						int interruptible);
++extern int intel_overlay_put_image(struct drm_device *dev, void *data,
++				   struct drm_file *file_priv);
++extern int intel_overlay_attrs(struct drm_device *dev, void *data,
++			       struct drm_file *file_priv);
+ #endif /* __INTEL_DRV_H__ */
+diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
+index 1318ac2..aaabbcb 100644
+--- a/drivers/gpu/drm/i915/intel_fb.c
++++ b/drivers/gpu/drm/i915/intel_fb.c
+@@ -70,7 +70,7 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+ 
+ 
+ /**
+- * Curretly it is assumed that the old framebuffer is reused.
++ * Currently it is assumed that the old framebuffer is reused.
+  *
+  * LOCKING
+  * caller should hold the mode config lock.
+@@ -230,8 +230,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
+ 	par->intel_fb = intel_fb;
+ 
+ 	/* To allow resizeing without swapping buffers */
+-	DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
+-		  intel_fb->base.height, obj_priv->gtt_offset, fbo);
++	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
++			intel_fb->base.width, intel_fb->base.height,
++			obj_priv->gtt_offset, fbo);
+ 
+ 	mutex_unlock(&dev->struct_mutex);
+ 	return 0;
+@@ -249,7 +250,7 @@ int intelfb_probe(struct drm_device *dev)
+ {
+ 	int ret;
+ 
+-	DRM_DEBUG("\n");
++	DRM_DEBUG_KMS("\n");
+ 	ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
+index 85760bf..0e268de 100644
+--- a/drivers/gpu/drm/i915/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/intel_hdmi.c
+@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+ 	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
+ 	 * we do this anyway which shows more stable in testing.
+ 	 */
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
+ 		POSTING_READ(hdmi_priv->sdvox_reg);
+ 	}
+@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
+ 	/* HW workaround, need to write this twice for issue that may result
+ 	 * in first write getting masked.
+ 	 */
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		I915_WRITE(hdmi_priv->sdvox_reg, temp);
+ 		POSTING_READ(hdmi_priv->sdvox_reg);
+ 	}
+@@ -225,7 +225,6 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
+ 	.destroy = intel_hdmi_enc_destroy,
+ };
+ 
+-
+ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index b94acc4..8673c73 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -39,7 +39,7 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 
+ 	/* When using bit bashing for I2C, this bit needs to be set to 1 */
+-	if (!IS_IGD(dev))
++	if (!IS_PINEVIEW(dev))
+ 		return;
+ 	if (enable)
+ 		I915_WRITE(DSPCLK_GATE_D,
+@@ -128,7 +128,7 @@ intel_i2c_reset_gmbus(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		I915_WRITE(PCH_GMBUS0, 0);
+ 	} else {
+ 		I915_WRITE(GMBUS0, 0);
+diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
+index 952bb4e..c2e8a45 100644
+--- a/drivers/gpu/drm/i915/intel_lvds.c
++++ b/drivers/gpu/drm/i915/intel_lvds.c
+@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 blc_pwm_ctl, reg;
+ 
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		reg = BLC_PWM_CPU_CTL;
+ 	else
+ 		reg = BLC_PWM_CTL;
+@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 reg;
+ 
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		reg = BLC_PWM_PCH_CTL2;
+ 	else
+ 		reg = BLC_PWM_CTL;
+@@ -91,7 +91,7 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 pp_status, ctl_reg, status_reg;
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		ctl_reg = PCH_PP_CONTROL;
+ 		status_reg = PCH_PP_STATUS;
+ 	} else {
+@@ -137,7 +137,7 @@ static void intel_lvds_save(struct drm_connector *connector)
+ 	u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
+ 	u32 pwm_ctl_reg;
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		pp_on_reg = PCH_PP_ON_DELAYS;
+ 		pp_off_reg = PCH_PP_OFF_DELAYS;
+ 		pp_ctl_reg = PCH_PP_CONTROL;
+@@ -174,7 +174,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
+ 	u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
+ 	u32 pwm_ctl_reg;
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		pp_on_reg = PCH_PP_ON_DELAYS;
+ 		pp_off_reg = PCH_PP_OFF_DELAYS;
+ 		pp_ctl_reg = PCH_PP_CONTROL;
+@@ -297,7 +297,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ 	}
+ 
+ 	/* full screen scale for now */
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		goto out;
+ 
+ 	/* 965+ wants fuzzy fitting */
+@@ -327,7 +327,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
+ 	 * to register description and PRM.
+ 	 * Change the value here to see the borders for debugging
+ 	 */
+-	if (!IS_IGDNG(dev)) {
++	if (!IS_IRONLAKE(dev)) {
+ 		I915_WRITE(BCLRPAT_A, 0);
+ 		I915_WRITE(BCLRPAT_B, 0);
+ 	}
+@@ -548,7 +548,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	u32 reg;
+ 
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		reg = BLC_PWM_CPU_CTL;
+ 	else
+ 		reg = BLC_PWM_CTL;
+@@ -587,7 +587,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
+ 	 * settings.
+ 	 */
+ 
+-	if (IS_IGDNG(dev))
++	if (IS_IRONLAKE(dev))
+ 		return;
+ 
+ 	/*
+@@ -623,12 +623,26 @@ static const struct dmi_system_id bad_lid_status[] = {
+ 		},
+ 	},
+ 	{
++		.ident = "Aspire 1810T",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
++		},
++	},
++	{
+ 		.ident = "PC-81005",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
+ 		},
+ 	},
++	{
++		.ident = "Clevo M5x0N",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
++			DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
++		},
++	},
+ 	{ }
+ };
+ 
+@@ -643,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
+ {
+ 	enum drm_connector_status status = connector_status_connected;
+ 
+-	if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
++	if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
+ 		status = connector_status_disconnected;
+ 
+ 	return status;
+@@ -882,64 +896,101 @@ static const struct dmi_system_id intel_no_lvds[] = {
+ 	{ }	/* terminating entry */
+ };
+ 
+-#ifdef CONFIG_ACPI
+-/*
+- * check_lid_device -- check whether @handle is an ACPI LID device.
+- * @handle: ACPI device handle
+- * @level : depth in the ACPI namespace tree
+- * @context: the number of LID device when we find the device
+- * @rv: a return value to fill if desired (Not use)
++/**
++ * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
++ * @dev: drm device
++ * @connector: LVDS connector
++ *
++ * Find the reduced downclock for LVDS in EDID.
+  */
+-static acpi_status
+-check_lid_device(acpi_handle handle, u32 level, void *context,
+-			void **return_value)
++static void intel_find_lvds_downclock(struct drm_device *dev,
++				struct drm_connector *connector)
+ {
+-	struct acpi_device *acpi_dev;
+-	int *lid_present = context;
+-
+-	acpi_dev = NULL;
+-	/* Get the acpi device for device handle */
+-	if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
+-		/* If there is no ACPI device for handle, return */
+-		return AE_OK;
+-	}
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct drm_display_mode *scan, *panel_fixed_mode;
++	int temp_downclock;
+ 
+-	if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
+-		*lid_present = 1;
++	panel_fixed_mode = dev_priv->panel_fixed_mode;
++	temp_downclock = panel_fixed_mode->clock;
+ 
+-	return AE_OK;
++	mutex_lock(&dev->mode_config.mutex);
++	list_for_each_entry(scan, &connector->probed_modes, head) {
++		/*
++		 * If one mode has the same resolution with the fixed_panel
++		 * mode while they have the different refresh rate, it means
++		 * that the reduced downclock is found for the LVDS. In such
++		 * case we can set the different FPx0/1 to dynamically select
++		 * between low and high frequency.
++		 */
++		if (scan->hdisplay == panel_fixed_mode->hdisplay &&
++			scan->hsync_start == panel_fixed_mode->hsync_start &&
++			scan->hsync_end == panel_fixed_mode->hsync_end &&
++			scan->htotal == panel_fixed_mode->htotal &&
++			scan->vdisplay == panel_fixed_mode->vdisplay &&
++			scan->vsync_start == panel_fixed_mode->vsync_start &&
++			scan->vsync_end == panel_fixed_mode->vsync_end &&
++			scan->vtotal == panel_fixed_mode->vtotal) {
++			if (scan->clock < temp_downclock) {
++				/*
++				 * The downclock is already found. But we
++				 * expect to find the lower downclock.
++				 */
++				temp_downclock = scan->clock;
++			}
++		}
++	}
++	mutex_unlock(&dev->mode_config.mutex);
++	if (temp_downclock < panel_fixed_mode->clock &&
++	    i915_lvds_downclock) {
++		/* We found the downclock for LVDS. */
++		dev_priv->lvds_downclock_avail = 1;
++		dev_priv->lvds_downclock = temp_downclock;
++		DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
++				"Normal clock %dKhz, downclock %dKhz\n",
++				panel_fixed_mode->clock, temp_downclock);
++	}
++	return;
+ }
+ 
+-/**
+- * check whether there exists the ACPI LID device by enumerating the ACPI
+- * device tree.
++/*
++ * Enumerate the child dev array parsed from VBT to check whether
++ * the LVDS is present.
++ * If it is present, return 1.
++ * If it is not present, return false.
++ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
++ * Note: The addin_offset should also be checked for LVDS panel.
++ * Only when it is non-zero, it is assumed that it is present.
+  */
+-static int intel_lid_present(void)
++static int lvds_is_present_in_vbt(struct drm_device *dev)
+ {
+-	int lid_present = 0;
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct child_device_config *p_child;
++	int i, ret;
+ 
+-	if (acpi_disabled) {
+-		/* If ACPI is disabled, there is no ACPI device tree to
+-		 * check, so assume the LID device would have been present.
+-		 */
++	if (!dev_priv->child_dev_num)
+ 		return 1;
+-	}
+ 
+-	acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+-				ACPI_UINT32_MAX,
+-				check_lid_device, &lid_present, NULL);
++	ret = 0;
++	for (i = 0; i < dev_priv->child_dev_num; i++) {
++		p_child = dev_priv->child_dev + i;
++		/*
++		 * If the device type is not LFP, continue.
++		 * If the device type is 0x22, it is also regarded as LFP.
++		 */
++		if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
++			p_child->device_type != DEVICE_TYPE_LFP)
++			continue;
+ 
+-	return lid_present;
+-}
+-#else
+-static int intel_lid_present(void)
+-{
+-	/* In the absence of ACPI built in, assume that the LID device would
+-	 * have been present.
+-	 */
+-	return 1;
++		/* The addin_offset should be checked. Only when it is
++		 * non-zero, it is regarded as present.
++		 */
++		if (p_child->addin_offset) {
++			ret = 1;
++			break;
++		}
++	}
++	return ret;
+ }
+-#endif
+ 
+ /**
+  * intel_lvds_init - setup LVDS connectors on this device
+@@ -964,21 +1015,16 @@ void intel_lvds_init(struct drm_device *dev)
+ 	if (dmi_check_system(intel_no_lvds))
+ 		return;
+ 
+-	/* Assume that any device without an ACPI LID device also doesn't
+-	 * have an integrated LVDS.  We would be better off parsing the BIOS
+-	 * to get a reliable indicator, but that code isn't written yet.
+-	 *
+-	 * In the case of all-in-one desktops using LVDS that we've seen,
+-	 * they're using SDVO LVDS.
+-	 */
+-	if (!intel_lid_present())
++	if (!lvds_is_present_in_vbt(dev)) {
++		DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ 		return;
++	}
+ 
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
+ 			return;
+ 		if (dev_priv->edp_support) {
+-			DRM_DEBUG("disable LVDS for eDP support\n");
++			DRM_DEBUG_KMS("disable LVDS for eDP support\n");
+ 			return;
+ 		}
+ 		gpio = PCH_GPIOC;
+@@ -1051,6 +1097,7 @@ void intel_lvds_init(struct drm_device *dev)
+ 			dev_priv->panel_fixed_mode =
+ 				drm_mode_duplicate(dev, scan);
+ 			mutex_unlock(&dev->mode_config.mutex);
++			intel_find_lvds_downclock(dev, connector);
+ 			goto out;
+ 		}
+ 		mutex_unlock(&dev->mode_config.mutex);
+@@ -1075,8 +1122,8 @@ void intel_lvds_init(struct drm_device *dev)
+ 	 * correct mode.
+ 	 */
+ 
+-	/* IGDNG: FIXME if still fail, not try pipe mode now */
+-	if (IS_IGDNG(dev))
++	/* Ironlake: FIXME if still fail, not try pipe mode now */
++	if (IS_IRONLAKE(dev))
+ 		goto failed;
+ 
+ 	lvds = I915_READ(LVDS);
+@@ -1097,7 +1144,7 @@ void intel_lvds_init(struct drm_device *dev)
+ 		goto failed;
+ 
+ out:
+-	if (IS_IGDNG(dev)) {
++	if (IS_IRONLAKE(dev)) {
+ 		u32 pwm;
+ 		/* make sure PWM is enabled */
+ 		pwm = I915_READ(BLC_PWM_CPU_CTL2);
+@@ -1110,7 +1157,7 @@ out:
+ 	}
+ 	dev_priv->lid_notifier.notifier_call = intel_lid_notify;
+ 	if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
+-		DRM_DEBUG("lid notifier registration failed\n");
++		DRM_DEBUG_KMS("lid notifier registration failed\n");
+ 		dev_priv->lid_notifier.notifier_call = NULL;
+ 	}
+ 	/* keep the LVDS connector */
+@@ -1123,5 +1170,6 @@ failed:
+ 	if (intel_output->ddc_bus)
+ 		intel_i2c_destroy(intel_output->ddc_bus);
+ 	drm_connector_cleanup(connector);
++	drm_encoder_cleanup(encoder);
+ 	kfree(intel_output);
+ }
+diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
+new file mode 100644
+index 0000000..2639591
+--- /dev/null
++++ b/drivers/gpu/drm/i915/intel_overlay.c
+@@ -0,0 +1,1416 @@
++/*
++ * Copyright © 2009
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ * Authors:
++ *    Daniel Vetter <daniel at ffwll.ch>
++ *
++ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
++ */
++#include "drmP.h"
++#include "drm.h"
++#include "i915_drm.h"
++#include "i915_drv.h"
++#include "i915_reg.h"
++#include "intel_drv.h"
++
++/* Limits for overlay size. According to intel doc, the real limits are:
++ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
++ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
++ * the mininum of both.  */
++#define IMAGE_MAX_WIDTH		2048
++#define IMAGE_MAX_HEIGHT	2046 /* 2 * 1023 */
++/* on 830 and 845 these large limits result in the card hanging */
++#define IMAGE_MAX_WIDTH_LEGACY	1024
++#define IMAGE_MAX_HEIGHT_LEGACY	1088
++
++/* overlay register definitions */
++/* OCMD register */
++#define OCMD_TILED_SURFACE	(0x1<<19)
++#define OCMD_MIRROR_MASK	(0x3<<17)
++#define OCMD_MIRROR_MODE	(0x3<<17)
++#define OCMD_MIRROR_HORIZONTAL	(0x1<<17)
++#define OCMD_MIRROR_VERTICAL	(0x2<<17)
++#define OCMD_MIRROR_BOTH	(0x3<<17)
++#define OCMD_BYTEORDER_MASK	(0x3<<14) /* zero for YUYV or FOURCC YUY2 */
++#define OCMD_UV_SWAP		(0x1<<14) /* YVYU */
++#define OCMD_Y_SWAP		(0x2<<14) /* UYVY or FOURCC UYVY */
++#define OCMD_Y_AND_UV_SWAP	(0x3<<14) /* VYUY */
++#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
++#define OCMD_RGB_888		(0x1<<10) /* not in i965 Intel docs */
++#define OCMD_RGB_555		(0x2<<10) /* not in i965 Intel docs */
++#define OCMD_RGB_565		(0x3<<10) /* not in i965 Intel docs */
++#define OCMD_YUV_422_PACKED	(0x8<<10)
++#define OCMD_YUV_411_PACKED	(0x9<<10) /* not in i965 Intel docs */
++#define OCMD_YUV_420_PLANAR	(0xc<<10)
++#define OCMD_YUV_422_PLANAR	(0xd<<10)
++#define OCMD_YUV_410_PLANAR	(0xe<<10) /* also 411 */
++#define OCMD_TVSYNCFLIP_PARITY	(0x1<<9)
++#define OCMD_TVSYNCFLIP_ENABLE	(0x1<<7)
++#define OCMD_BUF_TYPE_MASK	(Ox1<<5)
++#define OCMD_BUF_TYPE_FRAME	(0x0<<5)
++#define OCMD_BUF_TYPE_FIELD	(0x1<<5)
++#define OCMD_TEST_MODE		(0x1<<4)
++#define OCMD_BUFFER_SELECT	(0x3<<2)
++#define OCMD_BUFFER0		(0x0<<2)
++#define OCMD_BUFFER1		(0x1<<2)
++#define OCMD_FIELD_SELECT	(0x1<<2)
++#define OCMD_FIELD0		(0x0<<1)
++#define OCMD_FIELD1		(0x1<<1)
++#define OCMD_ENABLE		(0x1<<0)
++
++/* OCONFIG register */
++#define OCONF_PIPE_MASK		(0x1<<18)
++#define OCONF_PIPE_A		(0x0<<18)
++#define OCONF_PIPE_B		(0x1<<18)
++#define OCONF_GAMMA2_ENABLE	(0x1<<16)
++#define OCONF_CSC_MODE_BT601	(0x0<<5)
++#define OCONF_CSC_MODE_BT709	(0x1<<5)
++#define OCONF_CSC_BYPASS	(0x1<<4)
++#define OCONF_CC_OUT_8BIT	(0x1<<3)
++#define OCONF_TEST_MODE		(0x1<<2)
++#define OCONF_THREE_LINE_BUFFER	(0x1<<0)
++#define OCONF_TWO_LINE_BUFFER	(0x0<<0)
++
++/* DCLRKM (dst-key) register */
++#define DST_KEY_ENABLE		(0x1<<31)
++#define CLK_RGB24_MASK		0x0
++#define CLK_RGB16_MASK		0x070307
++#define CLK_RGB15_MASK		0x070707
++#define CLK_RGB8I_MASK		0xffffff
++
++#define RGB16_TO_COLORKEY(c) \
++	(((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
++#define RGB15_TO_COLORKEY(c) \
++	(((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
++
++/* overlay flip addr flag */
++#define OFC_UPDATE		0x1
++
++/* polyphase filter coefficients */
++#define N_HORIZ_Y_TAPS          5
++#define N_VERT_Y_TAPS           3
++#define N_HORIZ_UV_TAPS         3
++#define N_VERT_UV_TAPS          3
++#define N_PHASES                17
++#define MAX_TAPS                5
++
++/* memory bufferd overlay registers */
++struct overlay_registers {
++    u32 OBUF_0Y;
++    u32 OBUF_1Y;
++    u32 OBUF_0U;
++    u32 OBUF_0V;
++    u32 OBUF_1U;
++    u32 OBUF_1V;
++    u32 OSTRIDE;
++    u32 YRGB_VPH;
++    u32 UV_VPH;
++    u32 HORZ_PH;
++    u32 INIT_PHS;
++    u32 DWINPOS;
++    u32 DWINSZ;
++    u32 SWIDTH;
++    u32 SWIDTHSW;
++    u32 SHEIGHT;
++    u32 YRGBSCALE;
++    u32 UVSCALE;
++    u32 OCLRC0;
++    u32 OCLRC1;
++    u32 DCLRKV;
++    u32 DCLRKM;
++    u32 SCLRKVH;
++    u32 SCLRKVL;
++    u32 SCLRKEN;
++    u32 OCONFIG;
++    u32 OCMD;
++    u32 RESERVED1; /* 0x6C */
++    u32 OSTART_0Y;
++    u32 OSTART_1Y;
++    u32 OSTART_0U;
++    u32 OSTART_0V;
++    u32 OSTART_1U;
++    u32 OSTART_1V;
++    u32 OTILEOFF_0Y;
++    u32 OTILEOFF_1Y;
++    u32 OTILEOFF_0U;
++    u32 OTILEOFF_0V;
++    u32 OTILEOFF_1U;
++    u32 OTILEOFF_1V;
++    u32 FASTHSCALE; /* 0xA0 */
++    u32 UVSCALEV; /* 0xA4 */
++    u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
++    u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
++    u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
++    u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
++    u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
++    u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
++    u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
++    u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
++    u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
++};
++
++/* overlay flip addr flag */
++#define OFC_UPDATE		0x1
++
++#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
++#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
++
++
++static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
++{
++        drm_i915_private_t *dev_priv = overlay->dev->dev_private;
++	struct overlay_registers *regs;
++
++	/* no recursive mappings */
++	BUG_ON(overlay->virt_addr);
++
++	if (OVERLAY_NONPHYSICAL(overlay->dev)) {
++		regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
++				overlay->reg_bo->gtt_offset);
++
++		if (!regs) {
++			DRM_ERROR("failed to map overlay regs in GTT\n");
++			return NULL;
++		}
++	} else
++		regs = overlay->reg_bo->phys_obj->handle->vaddr;
++
++	return overlay->virt_addr = regs;
++}
++
++static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
++{
++	struct drm_device *dev = overlay->dev;
++        drm_i915_private_t *dev_priv = dev->dev_private;
++
++	if (OVERLAY_NONPHYSICAL(overlay->dev))
++		io_mapping_unmap_atomic(overlay->virt_addr);
++
++	overlay->virt_addr = NULL;
++
++	I915_READ(OVADD); /* flush wc cashes */
++
++	return;
++}
++
++/* overlay needs to be disable in OCMD reg */
++static int intel_overlay_on(struct intel_overlay *overlay)
++{
++	struct drm_device *dev = overlay->dev;
++        drm_i915_private_t *dev_priv = dev->dev_private;
++	int ret;
++	RING_LOCALS;
++
++	BUG_ON(overlay->active);
++
++	overlay->active = 1;
++	overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
++
++	BEGIN_LP_RING(6);
++	OUT_RING(MI_FLUSH);
++	OUT_RING(MI_NOOP);
++	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
++	OUT_RING(overlay->flip_addr | OFC_UPDATE);
++	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
++	OUT_RING(MI_NOOP);
++	ADVANCE_LP_RING();
++
++	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
++	if (overlay->last_flip_req == 0)
++		return -ENOMEM;
++
++	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
++	if (ret != 0)
++		return ret;
++
++	overlay->hw_wedged = 0;
++	overlay->last_flip_req = 0;
++	return 0;
++}
++
++/* overlay needs to be enabled in OCMD reg */
++static void intel_overlay_continue(struct intel_overlay *overlay,
++			    bool load_polyphase_filter)
++{
++	struct drm_device *dev = overlay->dev;
++        drm_i915_private_t *dev_priv = dev->dev_private;
++	u32 flip_addr = overlay->flip_addr;
++	u32 tmp;
++	RING_LOCALS;
++
++	BUG_ON(!overlay->active);
++
++	if (load_polyphase_filter)
++		flip_addr |= OFC_UPDATE;
++
++	/* check for underruns */
++	tmp = I915_READ(DOVSTA);
++	if (tmp & (1 << 17))
++		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
++
++	BEGIN_LP_RING(4);
++	OUT_RING(MI_FLUSH);
++	OUT_RING(MI_NOOP);
++	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
++	OUT_RING(flip_addr);
++        ADVANCE_LP_RING();
++
++	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
++}
++
++static int intel_overlay_wait_flip(struct intel_overlay *overlay)
++{
++	struct drm_device *dev = overlay->dev;
++        drm_i915_private_t *dev_priv = dev->dev_private;
++	int ret;
++	u32 tmp;
++	RING_LOCALS;
++
++	if (overlay->last_flip_req != 0) {
++		ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
++		if (ret == 0) {
++			overlay->last_flip_req = 0;
++
++			tmp = I915_READ(ISR);
++
++			if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
++				return 0;
++		}
++	}
++
++	/* synchronous slowpath */
++	overlay->hw_wedged = RELEASE_OLD_VID;
++
++	BEGIN_LP_RING(2);
++        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
++        OUT_RING(MI_NOOP);
++        ADVANCE_LP_RING();
++
++	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
++	if (overlay->last_flip_req == 0)
++		return -ENOMEM;
++
++	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
++	if (ret != 0)
++		return ret;
++
++	overlay->hw_wedged = 0;
++	overlay->last_flip_req = 0;
++	return 0;
++}
++
++/* overlay needs to be disabled in OCMD reg */
++static int intel_overlay_off(struct intel_overlay *overlay)
++{
++	u32 flip_addr = overlay->flip_addr;
++	struct drm_device *dev = overlay->dev;
++        drm_i915_private_t *dev_priv = dev->dev_private;
++	int ret;
++	RING_LOCALS;
++
++	BUG_ON(!overlay->active);
++
++	/* According to intel docs the overlay hw may hang (when switching
++	 * off) without loading the filter coeffs. It is however unclear whether
++	 * this applies to the disabling of the overlay or to the switching off
++	 * of the hw. Do it in both cases */
++	flip_addr |= OFC_UPDATE;
++
++	/* wait for overlay to go idle */
++	overlay->hw_wedged = SWITCH_OFF_STAGE_1;
++
++	BEGIN_LP_RING(6);
++	OUT_RING(MI_FLUSH);
++	OUT_RING(MI_NOOP);
++	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
++	OUT_RING(flip_addr);
++        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
++        OUT_RING(MI_NOOP);
++        ADVANCE_LP_RING();
++
++	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
++	if (overlay->last_flip_req == 0)
++		return -ENOMEM;
++
++	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
++	if (ret != 0)
++		return ret;
++
++	/* turn overlay off */
++	overlay->hw_wedged = SWITCH_OFF_STAGE_2;
++
++	BEGIN_LP_RING(6);
++        OUT_RING(MI_FLUSH);
++        OUT_RING(MI_NOOP);
++        OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
++	OUT_RING(flip_addr);
++        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
++        OUT_RING(MI_NOOP);
++	ADVANCE_LP_RING();
++
++	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
++	if (overlay->last_flip_req == 0)
++		return -ENOMEM;
++
++	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
++	if (ret != 0)
++		return ret;
++
++	overlay->hw_wedged = 0;
++	overlay->last_flip_req = 0;
++	return ret;
++}
++
++static void intel_overlay_off_tail(struct intel_overlay *overlay)
++{
++	struct drm_gem_object *obj;
++
++	/* never have the overlay hw on without showing a frame */
++	BUG_ON(!overlay->vid_bo);
++	obj = overlay->vid_bo->obj;
++
++	i915_gem_object_unpin(obj);
++	drm_gem_object_unreference(obj);
++	overlay->vid_bo = NULL;
++
++	overlay->crtc->overlay = NULL;
++	overlay->crtc = NULL;
++	overlay->active = 0;
++}
++
++/* recover from an interruption due to a signal
++ * We have to be careful not to repeat work forever an make forward progess. */
++int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
++					 int interruptible)
++{
++	struct drm_device *dev = overlay->dev;
++        drm_i915_private_t *dev_priv = dev->dev_private;
++	struct drm_gem_object *obj;
++	u32 flip_addr;
++	int ret;
++	RING_LOCALS;
++
++	if (overlay->hw_wedged == HW_WEDGED)
++		return -EIO;
++
++	if (overlay->last_flip_req == 0) {
++		overlay->last_flip_req = i915_add_request(dev, NULL, 0);
++		if (overlay->last_flip_req == 0)
++			return -ENOMEM;
++	}
++
++	ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
++	if (ret != 0)
++		return ret;
++
++	switch (overlay->hw_wedged) {
++		case RELEASE_OLD_VID:
++			obj = overlay->old_vid_bo->obj;
++			i915_gem_object_unpin(obj);
++			drm_gem_object_unreference(obj);
++			overlay->old_vid_bo = NULL;
++			break;
++		case SWITCH_OFF_STAGE_1:
++			flip_addr = overlay->flip_addr;
++			flip_addr |= OFC_UPDATE;
++
++			overlay->hw_wedged = SWITCH_OFF_STAGE_2;
++
++			BEGIN_LP_RING(6);
++			OUT_RING(MI_FLUSH);
++			OUT_RING(MI_NOOP);
++			OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
++			OUT_RING(flip_addr);
++			OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
++			OUT_RING(MI_NOOP);
++			ADVANCE_LP_RING();
++
++			overlay->last_flip_req = i915_add_request(dev, NULL, 0);
++			if (overlay->last_flip_req == 0)
++				return -ENOMEM;
++
++			ret = i915_do_wait_request(dev, overlay->last_flip_req,
++					interruptible);
++			if (ret != 0)
++				return ret;
++
++		case SWITCH_OFF_STAGE_2:
++			intel_overlay_off_tail(overlay);
++			break;
++		default:
++			BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
++	}
++
++	overlay->hw_wedged = 0;
++	overlay->last_flip_req = 0;
++	return 0;
++}
++
++/* Wait for pending overlay flip and release old frame.
++ * Needs to be called before the overlay register are changed
++ * via intel_overlay_(un)map_regs_atomic */
++static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
++{
++	int ret;
++	struct drm_gem_object *obj;
++
++	/* only wait if there is actually an old frame to release to
++	 * guarantee forward progress */
++	if (!overlay->old_vid_bo)
++		return 0;
++
++	ret = intel_overlay_wait_flip(overlay);
++	if (ret != 0)
++		return ret;
++
++	obj = overlay->old_vid_bo->obj;
++	i915_gem_object_unpin(obj);
++	drm_gem_object_unreference(obj);
++	overlay->old_vid_bo = NULL;
++
++	return 0;
++}
++
++struct put_image_params {
++	int format;
++	short dst_x;
++	short dst_y;
++	short dst_w;
++	short dst_h;
++	short src_w;
++	short src_scan_h;
++	short src_scan_w;
++	short src_h;
++	short stride_Y;
++	short stride_UV;
++	int offset_Y;
++	int offset_U;
++	int offset_V;
++};
++
++static int packed_depth_bytes(u32 format)
++{
++	switch (format & I915_OVERLAY_DEPTH_MASK) {
++		case I915_OVERLAY_YUV422:
++			return 4;
++		case I915_OVERLAY_YUV411:
++			/* return 6; not implemented */
++		default:
++			return -EINVAL;
++	}
++}
++
++static int packed_width_bytes(u32 format, short width)
++{
++	switch (format & I915_OVERLAY_DEPTH_MASK) {
++		case I915_OVERLAY_YUV422:
++			return width << 1;
++		default:
++			return -EINVAL;
++	}
++}
++
++static int uv_hsubsampling(u32 format)
++{
++	switch (format & I915_OVERLAY_DEPTH_MASK) {
++		case I915_OVERLAY_YUV422:
++		case I915_OVERLAY_YUV420:
++			return 2;
++		case I915_OVERLAY_YUV411:
++		case I915_OVERLAY_YUV410:
++			return 4;
++		default:
++			return -EINVAL;
++	}
++}
++
++static int uv_vsubsampling(u32 format)
++{
++	switch (format & I915_OVERLAY_DEPTH_MASK) {
++		case I915_OVERLAY_YUV420:
++		case I915_OVERLAY_YUV410:
++			return 2;
++		case I915_OVERLAY_YUV422:
++		case I915_OVERLAY_YUV411:
++			return 1;
++		default:
++			return -EINVAL;
++	}
++}
++
++static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
++{
++	u32 mask, shift, ret;
++	if (IS_I9XX(dev)) {
++		mask = 0x3f;
++		shift = 6;
++	} else {
++		mask = 0x1f;
++		shift = 5;
++	}
++	ret = ((offset + width + mask) >> shift) - (offset >> shift);
++	if (IS_I9XX(dev))
++		ret <<= 1;
++	ret -=1;
++	return ret << 2;
++}
++
++static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
++	0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
++	0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
++	0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
++	0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
++	0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
++	0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
++	0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
++	0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
++	0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
++	0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
++	0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
++	0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
++	0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
++	0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
++	0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
++	0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
++	0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
++static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
++	0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
++	0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
++	0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
++	0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
++	0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
++	0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
++	0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
++	0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
++	0x3000, 0x0800, 0x3000};
++
++static void update_polyphase_filter(struct overlay_registers *regs)
++{
++	memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
++	memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
++}
++
++static bool update_scaling_factors(struct intel_overlay *overlay,
++				   struct overlay_registers *regs,
++				   struct put_image_params *params)
++{
++	/* fixed point with a 12 bit shift */
++	u32 xscale, yscale, xscale_UV, yscale_UV;
++#define FP_SHIFT 12
++#define FRACT_MASK 0xfff
++	bool scale_changed = false;
++	int uv_hscale = uv_hsubsampling(params->format);
++	int uv_vscale = uv_vsubsampling(params->format);
++
++	if (params->dst_w > 1)
++		xscale = ((params->src_scan_w - 1) << FP_SHIFT)
++			/(params->dst_w);
++	else
++		xscale = 1 << FP_SHIFT;
++
++	if (params->dst_h > 1)
++		yscale = ((params->src_scan_h - 1) << FP_SHIFT)
++			/(params->dst_h);
++	else
++		yscale = 1 << FP_SHIFT;
++
++	/*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
++		xscale_UV = xscale/uv_hscale;
++		yscale_UV = yscale/uv_vscale;
++		/* make the Y scale to UV scale ratio an exact multiply */
++		xscale = xscale_UV * uv_hscale;
++		yscale = yscale_UV * uv_vscale;
++	/*} else {
++		xscale_UV = 0;
++		yscale_UV = 0;
++	}*/
++
++	if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
++		scale_changed = true;
++	overlay->old_xscale = xscale;
++	overlay->old_yscale = yscale;
++
++	regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
++		| ((xscale >> FP_SHIFT) << 16)
++		| ((xscale & FRACT_MASK) << 3);
++	regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
++		| ((xscale_UV >> FP_SHIFT) << 16)
++		| ((xscale_UV & FRACT_MASK) << 3);
++	regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
++		| ((yscale_UV >> FP_SHIFT) << 0);
++
++	if (scale_changed)
++		update_polyphase_filter(regs);
++
++	return scale_changed;
++}
++
++static void update_colorkey(struct intel_overlay *overlay,
++			    struct overlay_registers *regs)
++{
++	u32 key = overlay->color_key;
++	switch (overlay->crtc->base.fb->bits_per_pixel) {
++		case 8:
++			regs->DCLRKV = 0;
++			regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
++		case 16:
++			if (overlay->crtc->base.fb->depth == 15) {
++				regs->DCLRKV = RGB15_TO_COLORKEY(key);
++				regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
++			} else {
++				regs->DCLRKV = RGB16_TO_COLORKEY(key);
++				regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
++			}
++		case 24:
++		case 32:
++			regs->DCLRKV = key;
++			regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
++	}
++}
++
++static u32 overlay_cmd_reg(struct put_image_params *params)
++{
++	u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
++
++	if (params->format & I915_OVERLAY_YUV_PLANAR) {
++		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
++			case I915_OVERLAY_YUV422:
++				cmd |= OCMD_YUV_422_PLANAR;
++				break;
++			case I915_OVERLAY_YUV420:
++				cmd |= OCMD_YUV_420_PLANAR;
++				break;
++			case I915_OVERLAY_YUV411:
++			case I915_OVERLAY_YUV410:
++				cmd |= OCMD_YUV_410_PLANAR;
++				break;
++		}
++	} else { /* YUV packed */
++		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
++			case I915_OVERLAY_YUV422:
++				cmd |= OCMD_YUV_422_PACKED;
++				break;
++			case I915_OVERLAY_YUV411:
++				cmd |= OCMD_YUV_411_PACKED;
++				break;
++		}
++
++		switch (params->format & I915_OVERLAY_SWAP_MASK) {
++			case I915_OVERLAY_NO_SWAP:
++				break;
++			case I915_OVERLAY_UV_SWAP:
++				cmd |= OCMD_UV_SWAP;
++				break;
++			case I915_OVERLAY_Y_SWAP:
++				cmd |= OCMD_Y_SWAP;
++				break;
++			case I915_OVERLAY_Y_AND_UV_SWAP:
++				cmd |= OCMD_Y_AND_UV_SWAP;
++				break;
++		}
++	}
++
++	return cmd;
++}
++
++int intel_overlay_do_put_image(struct intel_overlay *overlay,
++			       struct drm_gem_object *new_bo,
++			       struct put_image_params *params)
++{
++	int ret, tmp_width;
++	struct overlay_registers *regs;
++	bool scale_changed = false;
++	struct drm_i915_gem_object *bo_priv = new_bo->driver_private;
++	struct drm_device *dev = overlay->dev;
++
++	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
++	BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
++	BUG_ON(!overlay);
++
++	ret = intel_overlay_release_old_vid(overlay);
++	if (ret != 0)
++		return ret;
++
++	ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
++	if (ret != 0)
++		return ret;
++
++	ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
++	if (ret != 0)
++		goto out_unpin;
++
++	if (!overlay->active) {
++		regs = intel_overlay_map_regs_atomic(overlay);
++		if (!regs) {
++			ret = -ENOMEM;
++			goto out_unpin;
++		}
++		regs->OCONFIG = OCONF_CC_OUT_8BIT;
++		if (IS_I965GM(overlay->dev))
++			regs->OCONFIG |= OCONF_CSC_MODE_BT709;
++		regs->OCONFIG |= overlay->crtc->pipe == 0 ?
++			OCONF_PIPE_A : OCONF_PIPE_B;
++		intel_overlay_unmap_regs_atomic(overlay);
++
++		ret = intel_overlay_on(overlay);
++		if (ret != 0)
++			goto out_unpin;
++	}
++
++	regs = intel_overlay_map_regs_atomic(overlay);
++	if (!regs) {
++		ret = -ENOMEM;
++		goto out_unpin;
++	}
++
++	regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
++	regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
++
++	if (params->format & I915_OVERLAY_YUV_PACKED)
++		tmp_width = packed_width_bytes(params->format, params->src_w);
++	else
++		tmp_width = params->src_w;
++
++	regs->SWIDTH = params->src_w;
++	regs->SWIDTHSW = calc_swidthsw(overlay->dev,
++			params->offset_Y, tmp_width);
++	regs->SHEIGHT = params->src_h;
++	regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
++	regs->OSTRIDE = params->stride_Y;
++
++	if (params->format & I915_OVERLAY_YUV_PLANAR) {
++		int uv_hscale = uv_hsubsampling(params->format);
++		int uv_vscale = uv_vsubsampling(params->format);
++		u32 tmp_U, tmp_V;
++		regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
++		tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
++				params->src_w/uv_hscale);
++		tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
++				params->src_w/uv_hscale);
++		regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
++		regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
++		regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
++		regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
++		regs->OSTRIDE |= params->stride_UV << 16;
++	}
++
++	scale_changed = update_scaling_factors(overlay, regs, params);
++
++	update_colorkey(overlay, regs);
++
++	regs->OCMD = overlay_cmd_reg(params);
++
++	intel_overlay_unmap_regs_atomic(overlay);
++
++	intel_overlay_continue(overlay, scale_changed);
++
++	overlay->old_vid_bo = overlay->vid_bo;
++	overlay->vid_bo = new_bo->driver_private;
++
++	return 0;
++
++out_unpin:
++	i915_gem_object_unpin(new_bo);
++	return ret;
++}
++
++int intel_overlay_switch_off(struct intel_overlay *overlay)
++{
++	int ret;
++	struct overlay_registers *regs;
++	struct drm_device *dev = overlay->dev;
++
++	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
++	BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
++
++	if (overlay->hw_wedged) {
++		ret = intel_overlay_recover_from_interrupt(overlay, 1);
++		if (ret != 0)
++			return ret;
++	}
++
++	if (!overlay->active)
++		return 0;
++
++	ret = intel_overlay_release_old_vid(overlay);
++	if (ret != 0)
++		return ret;
++
++	regs = intel_overlay_map_regs_atomic(overlay);
++	regs->OCMD = 0;
++	intel_overlay_unmap_regs_atomic(overlay);
++
++	ret = intel_overlay_off(overlay);
++	if (ret != 0)
++		return ret;
++
++	intel_overlay_off_tail(overlay);
++
++	return 0;
++}
++
++static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
++					  struct intel_crtc *crtc)
++{
++        drm_i915_private_t *dev_priv = overlay->dev->dev_private;
++	u32 pipeconf;
++	int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
++
++	if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
++		return -EINVAL;
++
++	pipeconf = I915_READ(pipeconf_reg);
++
++	/* can't use the overlay with double wide pipe */
++	if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
++		return -EINVAL;
++
++	return 0;
++}
++
++static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
++{
++	struct drm_device *dev = overlay->dev;
++        drm_i915_private_t *dev_priv = dev->dev_private;
++	u32 ratio;
++	u32 pfit_control = I915_READ(PFIT_CONTROL);
++
++	/* XXX: This is not the same logic as in the xorg driver, but more in
++	 * line with the intel documentation for the i965 */
++	if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
++		ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
++	} else { /* on i965 use the PGM reg to read out the autoscaler values */
++		ratio = I915_READ(PFIT_PGM_RATIOS);
++		if (IS_I965G(dev))
++			ratio >>= PFIT_VERT_SCALE_SHIFT_965;
++		else
++			ratio >>= PFIT_VERT_SCALE_SHIFT;
++	}
++
++	overlay->pfit_vscale_ratio = ratio;
++}
++
++static int check_overlay_dst(struct intel_overlay *overlay,
++			     struct drm_intel_overlay_put_image *rec)
++{
++	struct drm_display_mode *mode = &overlay->crtc->base.mode;
++
++	if ((rec->dst_x < mode->crtc_hdisplay)
++	    && (rec->dst_x + rec->dst_width
++		    <= mode->crtc_hdisplay)
++	    && (rec->dst_y < mode->crtc_vdisplay)
++	    && (rec->dst_y + rec->dst_height
++		    <= mode->crtc_vdisplay))
++		return 0;
++	else
++		return -EINVAL;
++}
++
++static int check_overlay_scaling(struct put_image_params *rec)
++{
++	u32 tmp;
++
++	/* downscaling limit is 8.0 */
++	tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
++	if (tmp > 7)
++		return -EINVAL;
++	tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
++	if (tmp > 7)
++		return -EINVAL;
++
++	return 0;
++}
++
++static int check_overlay_src(struct drm_device *dev,
++			     struct drm_intel_overlay_put_image *rec,
++			     struct drm_gem_object *new_bo)
++{
++	u32 stride_mask;
++	int depth;
++	int uv_hscale = uv_hsubsampling(rec->flags);
++	int uv_vscale = uv_vsubsampling(rec->flags);
++	size_t tmp;
++
++	/* check src dimensions */
++	if (IS_845G(dev) || IS_I830(dev)) {
++		if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
++		    || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
++			return -EINVAL;
++	} else {
++		if (rec->src_height > IMAGE_MAX_HEIGHT
++		    || rec->src_width > IMAGE_MAX_WIDTH)
++			return -EINVAL;
++	}
++	/* better safe than sorry, use 4 as the maximal subsampling ratio */
++	if (rec->src_height < N_VERT_Y_TAPS*4
++	    || rec->src_width < N_HORIZ_Y_TAPS*4)
++		return -EINVAL;
++
++	/* check alingment constrains */
++	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
++		case I915_OVERLAY_RGB:
++			/* not implemented */
++			return -EINVAL;
++		case I915_OVERLAY_YUV_PACKED:
++			depth = packed_depth_bytes(rec->flags);
++			if (uv_vscale != 1)
++				return -EINVAL;
++			if (depth < 0)
++				return depth;
++			/* ignore UV planes */
++			rec->stride_UV = 0;
++			rec->offset_U = 0;
++			rec->offset_V = 0;
++			/* check pixel alignment */
++			if (rec->offset_Y % depth)
++				return -EINVAL;
++			break;
++		case I915_OVERLAY_YUV_PLANAR:
++			if (uv_vscale < 0 || uv_hscale < 0)
++				return -EINVAL;
++			/* no offset restrictions for planar formats */
++			break;
++		default:
++			return -EINVAL;
++	}
++
++	if (rec->src_width % uv_hscale)
++		return -EINVAL;
++
++	/* stride checking */
++	stride_mask = 63;
++
++	if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
++		return -EINVAL;
++	if (IS_I965G(dev) && rec->stride_Y < 512)
++		return -EINVAL;
++
++	tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
++		4 : 8;
++	if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
++		return -EINVAL;
++
++	/* check buffer dimensions */
++	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
++		case I915_OVERLAY_RGB:
++		case I915_OVERLAY_YUV_PACKED:
++			/* always 4 Y values per depth pixels */
++			if (packed_width_bytes(rec->flags, rec->src_width)
++					> rec->stride_Y)
++				return -EINVAL;
++
++			tmp = rec->stride_Y*rec->src_height;
++			if (rec->offset_Y + tmp > new_bo->size)
++				return -EINVAL;
++			break;
++		case I915_OVERLAY_YUV_PLANAR:
++			if (rec->src_width > rec->stride_Y)
++				return -EINVAL;
++			if (rec->src_width/uv_hscale > rec->stride_UV)
++				return -EINVAL;
++
++			tmp = rec->stride_Y*rec->src_height;
++			if (rec->offset_Y + tmp > new_bo->size)
++				return -EINVAL;
++			tmp = rec->stride_UV*rec->src_height;
++			tmp /= uv_vscale;
++			if (rec->offset_U + tmp > new_bo->size
++			    || rec->offset_V + tmp > new_bo->size)
++				return -EINVAL;
++			break;
++	}
++
++	return 0;
++}
++
++int intel_overlay_put_image(struct drm_device *dev, void *data,
++                            struct drm_file *file_priv)
++{
++	struct drm_intel_overlay_put_image *put_image_rec = data;
++	drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_overlay *overlay;
++	struct drm_mode_object *drmmode_obj;
++	struct intel_crtc *crtc;
++	struct drm_gem_object *new_bo;
++	struct put_image_params *params;
++	int ret;
++
++	if (!dev_priv) {
++		DRM_ERROR("called with no initialization\n");
++		return -EINVAL;
++	}
++
++	overlay = dev_priv->overlay;
++	if (!overlay) {
++		DRM_DEBUG("userspace bug: no overlay\n");
++		return -ENODEV;
++	}
++
++	if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
++		mutex_lock(&dev->mode_config.mutex);
++		mutex_lock(&dev->struct_mutex);
++
++		ret = intel_overlay_switch_off(overlay);
++
++		mutex_unlock(&dev->struct_mutex);
++		mutex_unlock(&dev->mode_config.mutex);
++
++		return ret;
++	}
++
++	params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
++	if (!params)
++		return -ENOMEM;
++
++	drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
++                        DRM_MODE_OBJECT_CRTC);
++	if (!drmmode_obj)
++		return -ENOENT;
++	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
++
++	new_bo = drm_gem_object_lookup(dev, file_priv,
++			put_image_rec->bo_handle);
++	if (!new_bo)
++		return -ENOENT;
++
++	mutex_lock(&dev->mode_config.mutex);
++	mutex_lock(&dev->struct_mutex);
++
++	if (overlay->hw_wedged) {
++		ret = intel_overlay_recover_from_interrupt(overlay, 1);
++		if (ret != 0)
++			goto out_unlock;
++	}
++
++	if (overlay->crtc != crtc) {
++		struct drm_display_mode *mode = &crtc->base.mode;
++		ret = intel_overlay_switch_off(overlay);
++		if (ret != 0)
++			goto out_unlock;
++
++		ret = check_overlay_possible_on_crtc(overlay, crtc);
++		if (ret != 0)
++			goto out_unlock;
++
++		overlay->crtc = crtc;
++		crtc->overlay = overlay;
++
++		if (intel_panel_fitter_pipe(dev) == crtc->pipe
++		    /* and line to wide, i.e. one-line-mode */
++		    && mode->hdisplay > 1024) {
++			overlay->pfit_active = 1;
++			update_pfit_vscale_ratio(overlay);
++		} else
++			overlay->pfit_active = 0;
++	}
++
++	ret = check_overlay_dst(overlay, put_image_rec);
++	if (ret != 0)
++		goto out_unlock;
++
++	if (overlay->pfit_active) {
++		params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
++			overlay->pfit_vscale_ratio);
++		/* shifting right rounds downwards, so add 1 */
++		params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
++			overlay->pfit_vscale_ratio) + 1;
++	} else {
++		params->dst_y = put_image_rec->dst_y;
++		params->dst_h = put_image_rec->dst_height;
++	}
++	params->dst_x = put_image_rec->dst_x;
++	params->dst_w = put_image_rec->dst_width;
++
++	params->src_w = put_image_rec->src_width;
++	params->src_h = put_image_rec->src_height;
++	params->src_scan_w = put_image_rec->src_scan_width;
++	params->src_scan_h = put_image_rec->src_scan_height;
++	if (params->src_scan_h > params->src_h
++	    || params->src_scan_w > params->src_w) {
++		ret = -EINVAL;
++		goto out_unlock;
++	}
++
++	ret = check_overlay_src(dev, put_image_rec, new_bo);
++	if (ret != 0)
++		goto out_unlock;
++	params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
++	params->stride_Y = put_image_rec->stride_Y;
++	params->stride_UV = put_image_rec->stride_UV;
++	params->offset_Y = put_image_rec->offset_Y;
++	params->offset_U = put_image_rec->offset_U;
++	params->offset_V = put_image_rec->offset_V;
++
++	/* Check scaling after src size to prevent a divide-by-zero. */
++	ret = check_overlay_scaling(params);
++	if (ret != 0)
++		goto out_unlock;
++
++	ret = intel_overlay_do_put_image(overlay, new_bo, params);
++	if (ret != 0)
++		goto out_unlock;
++
++	mutex_unlock(&dev->struct_mutex);
++	mutex_unlock(&dev->mode_config.mutex);
++
++	kfree(params);
++
++	return 0;
++
++out_unlock:
++	mutex_unlock(&dev->struct_mutex);
++	mutex_unlock(&dev->mode_config.mutex);
++	drm_gem_object_unreference(new_bo);
++	kfree(params);
++
++	return ret;
++}
++
++static void update_reg_attrs(struct intel_overlay *overlay,
++			     struct overlay_registers *regs)
++{
++	regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
++	regs->OCLRC1 = overlay->saturation;
++}
++
++static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
++{
++	int i;
++
++	if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
++		return false;
++
++	for (i = 0; i < 3; i++) {
++		if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
++			return false;
++	}
++
++	return true;
++}
++
++static bool check_gamma5_errata(u32 gamma5)
++{
++	int i;
++
++	for (i = 0; i < 3; i++) {
++		if (((gamma5 >> i*8) & 0xff) == 0x80)
++			return false;
++	}
++
++	return true;
++}
++
++static int check_gamma(struct drm_intel_overlay_attrs *attrs)
++{
++	if (!check_gamma_bounds(0, attrs->gamma0)
++	    || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
++	    || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
++	    || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
++	    || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
++	    || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
++	    || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
++		return -EINVAL;
++	if (!check_gamma5_errata(attrs->gamma5))
++		return -EINVAL;
++	return 0;
++}
++
++int intel_overlay_attrs(struct drm_device *dev, void *data,
++                        struct drm_file *file_priv)
++{
++	struct drm_intel_overlay_attrs *attrs = data;
++        drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_overlay *overlay;
++	struct overlay_registers *regs;
++	int ret;
++
++	if (!dev_priv) {
++		DRM_ERROR("called with no initialization\n");
++		return -EINVAL;
++	}
++
++	overlay = dev_priv->overlay;
++	if (!overlay) {
++		DRM_DEBUG("userspace bug: no overlay\n");
++		return -ENODEV;
++	}
++
++	mutex_lock(&dev->mode_config.mutex);
++	mutex_lock(&dev->struct_mutex);
++
++	if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
++		attrs->color_key = overlay->color_key;
++		attrs->brightness = overlay->brightness;
++		attrs->contrast = overlay->contrast;
++		attrs->saturation = overlay->saturation;
++
++		if (IS_I9XX(dev)) {
++			attrs->gamma0 = I915_READ(OGAMC0);
++			attrs->gamma1 = I915_READ(OGAMC1);
++			attrs->gamma2 = I915_READ(OGAMC2);
++			attrs->gamma3 = I915_READ(OGAMC3);
++			attrs->gamma4 = I915_READ(OGAMC4);
++			attrs->gamma5 = I915_READ(OGAMC5);
++		}
++		ret = 0;
++	} else {
++		overlay->color_key = attrs->color_key;
++		if (attrs->brightness >= -128 && attrs->brightness <= 127) {
++			overlay->brightness = attrs->brightness;
++		} else {
++			ret = -EINVAL;
++			goto out_unlock;
++		}
++		if (attrs->contrast <= 255) {
++			overlay->contrast = attrs->contrast;
++		} else {
++			ret = -EINVAL;
++			goto out_unlock;
++		}
++		if (attrs->saturation <= 1023) {
++			overlay->saturation = attrs->saturation;
++		} else {
++			ret = -EINVAL;
++			goto out_unlock;
++		}
++
++		regs = intel_overlay_map_regs_atomic(overlay);
++		if (!regs) {
++			ret = -ENOMEM;
++			goto out_unlock;
++		}
++
++		update_reg_attrs(overlay, regs);
++
++		intel_overlay_unmap_regs_atomic(overlay);
++
++		if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
++			if (!IS_I9XX(dev)) {
++				ret = -EINVAL;
++				goto out_unlock;
++			}
++
++			if (overlay->active) {
++				ret = -EBUSY;
++				goto out_unlock;
++			}
++
++			ret = check_gamma(attrs);
++			if (ret != 0)
++				goto out_unlock;
++
++			I915_WRITE(OGAMC0, attrs->gamma0);
++			I915_WRITE(OGAMC1, attrs->gamma1);
++			I915_WRITE(OGAMC2, attrs->gamma2);
++			I915_WRITE(OGAMC3, attrs->gamma3);
++			I915_WRITE(OGAMC4, attrs->gamma4);
++			I915_WRITE(OGAMC5, attrs->gamma5);
++		}
++		ret = 0;
++	}
++
++out_unlock:
++	mutex_unlock(&dev->struct_mutex);
++	mutex_unlock(&dev->mode_config.mutex);
++
++	return ret;
++}
++
++void intel_setup_overlay(struct drm_device *dev)
++{
++        drm_i915_private_t *dev_priv = dev->dev_private;
++	struct intel_overlay *overlay;
++	struct drm_gem_object *reg_bo;
++	struct overlay_registers *regs;
++	int ret;
++
++	if (!OVERLAY_EXISTS(dev))
++		return;
++
++	overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
++	if (!overlay)
++		return;
++	overlay->dev = dev;
++
++	reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
++	if (!reg_bo)
++		goto out_free;
++	overlay->reg_bo = reg_bo->driver_private;
++
++	if (OVERLAY_NONPHYSICAL(dev)) {
++		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
++		if (ret) {
++                        DRM_ERROR("failed to pin overlay register bo\n");
++                        goto out_free_bo;
++                }
++		overlay->flip_addr = overlay->reg_bo->gtt_offset;
++	} else {
++		ret = i915_gem_attach_phys_object(dev, reg_bo,
++				I915_GEM_PHYS_OVERLAY_REGS);
++                if (ret) {
++                        DRM_ERROR("failed to attach phys overlay regs\n");
++                        goto out_free_bo;
++                }
++		overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
++	}
++
++	/* init all values */
++	overlay->color_key = 0x0101fe;
++	overlay->brightness = -19;
++	overlay->contrast = 75;
++	overlay->saturation = 146;
++
++	regs = intel_overlay_map_regs_atomic(overlay);
++	if (!regs)
++		goto out_free_bo;
++
++	memset(regs, 0, sizeof(struct overlay_registers));
++	update_polyphase_filter(regs);
++
++	update_reg_attrs(overlay, regs);
++
++	intel_overlay_unmap_regs_atomic(overlay);
++
++	dev_priv->overlay = overlay;
++	DRM_INFO("initialized overlay support\n");
++	return;
++
++out_free_bo:
++	drm_gem_object_unreference(reg_bo);
++out_free:
++	kfree(overlay);
++	return;
++}
++
++void intel_cleanup_overlay(struct drm_device *dev)
++{
++        drm_i915_private_t *dev_priv = dev->dev_private;
++
++	if (dev_priv->overlay) {
++		/* The bo's should be free'd by the generic code already.
++		 * Furthermore modesetting teardown happens beforehand so the
++		 * hardware should be off already */
++		BUG_ON(dev_priv->overlay->active);
++
++		kfree(dev_priv->overlay);
++	}
++}
+diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
+index 3f5aaf1..82678d3 100644
+--- a/drivers/gpu/drm/i915/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/intel_sdvo.c
+@@ -36,8 +36,6 @@
+ #include "i915_drv.h"
+ #include "intel_sdvo_regs.h"
+ 
+-#undef SDVO_DEBUG
+-
+ static char *tv_format_names[] = {
+ 	"NTSC_M"   , "NTSC_J"  , "NTSC_443",
+ 	"PAL_B"    , "PAL_D"   , "PAL_G"   ,
+@@ -356,7 +354,6 @@ static const struct _sdvo_cmd_name {
+ #define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
+ #define SDVO_PRIV(output)   ((struct intel_sdvo_priv *) (output)->dev_priv)
+ 
+-#ifdef SDVO_DEBUG
+ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
+ 				   void *args, int args_len)
+ {
+@@ -379,9 +376,6 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
+ 		DRM_LOG_KMS("(%02X)", cmd);
+ 	DRM_LOG_KMS("\n");
+ }
+-#else
+-#define intel_sdvo_debug_write(o, c, a, l)
+-#endif
+ 
+ static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
+ 				 void *args, int args_len)
+@@ -398,7 +392,6 @@ static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
+ 	intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
+ }
+ 
+-#ifdef SDVO_DEBUG
+ static const char *cmd_status_names[] = {
+ 	"Power on",
+ 	"Success",
+@@ -427,9 +420,6 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
+ 		DRM_LOG_KMS("(??? %d)", status);
+ 	DRM_LOG_KMS("\n");
+ }
+-#else
+-#define intel_sdvo_debug_response(o, r, l, s)
+-#endif
+ 
+ static u8 intel_sdvo_read_response(struct intel_output *intel_output,
+ 				   void *response, int response_len)
+@@ -1702,6 +1692,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
+ 
+ 	intel_sdvo_write_cmd(intel_output,
+ 			     SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
++	if (sdvo_priv->is_tv) {
++		/* add 30ms delay when the output type is SDVO-TV */
++		mdelay(30);
++	}
+ 	status = intel_sdvo_read_response(intel_output, &response, 2);
+ 
+ 	DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
+@@ -2351,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
+ 		connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+ 		intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ 					(1 << INTEL_ANALOG_CLONE_BIT);
++	} else if (flags & SDVO_OUTPUT_CVBS0) {
++
++		sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
++		encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
++		connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
++		sdvo_priv->is_tv = true;
++		intel_output->needs_tv_clock = true;
++		intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+ 	} else if (flags & SDVO_OUTPUT_LVDS0) {
+ 
+ 		sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
+@@ -2804,7 +2806,7 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
+ 	/* Wrap with our custom algo which switches to DDC mode */
+ 	intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
+ 
+-	/* In defaut case sdvo lvds is false */
++	/* In default case sdvo lvds is false */
+ 	intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
+ 
+ 	if (intel_sdvo_output_setup(intel_output,
+diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
+index ce026f0..552ec11 100644
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -1413,16 +1413,16 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
+ 	 *  0 0 0 Component
+ 	 */
+ 	if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+-		DRM_DEBUG("Detected Composite TV connection\n");
++		DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ 		type = DRM_MODE_CONNECTOR_Composite;
+ 	} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+-		DRM_DEBUG("Detected S-Video TV connection\n");
++		DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ 		type = DRM_MODE_CONNECTOR_SVIDEO;
+ 	} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+-		DRM_DEBUG("Detected Component TV connection\n");
++		DRM_DEBUG_KMS("Detected Component TV connection\n");
+ 		type = DRM_MODE_CONNECTOR_Component;
+ 	} else {
+-		DRM_DEBUG("No TV connection detected\n");
++		DRM_DEBUG_KMS("No TV connection detected\n");
+ 		type = -1;
+ 	}
+ 
+@@ -1699,6 +1699,41 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
+ 	.destroy = intel_tv_enc_destroy,
+ };
+ 
++/*
++ * Enumerate the child dev array parsed from VBT to check whether
++ * the integrated TV is present.
++ * If it is present, return 1.
++ * If it is not present, return false.
++ * If no child dev is parsed from VBT, it assumes that the TV is present.
++ */
++static int tv_is_present_in_vbt(struct drm_device *dev)
++{
++	struct drm_i915_private *dev_priv = dev->dev_private;
++	struct child_device_config *p_child;
++	int i, ret;
++
++	if (!dev_priv->child_dev_num)
++		return 1;
++
++	ret = 0;
++	for (i = 0; i < dev_priv->child_dev_num; i++) {
++		p_child = dev_priv->child_dev + i;
++		/*
++		 * If the device type is not TV, continue.
++		 */
++		if (p_child->device_type != DEVICE_TYPE_INT_TV &&
++			p_child->device_type != DEVICE_TYPE_TV)
++			continue;
++		/* Only when the addin_offset is non-zero, it is regarded
++		 * as present.
++		 */
++		if (p_child->addin_offset) {
++			ret = 1;
++			break;
++		}
++	}
++	return ret;
++}
+ 
+ void
+ intel_tv_init(struct drm_device *dev)
+@@ -1714,6 +1749,10 @@ intel_tv_init(struct drm_device *dev)
+ 	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+ 		return;
+ 
++	if (!tv_is_present_in_vbt(dev)) {
++		DRM_DEBUG_KMS("Integrated TV is not present.\n");
++		return;
++	}
+ 	/* Even if we have an encoder we may not have a connector */
+ 	if (!dev_priv->int_tv_support)
+ 		return;
+@@ -1801,8 +1840,6 @@ intel_tv_init(struct drm_device *dev)
+ 	drm_connector_attach_property(connector,
+ 				   dev->mode_config.tv_bottom_margin_property,
+ 				   tv_priv->margin[TV_MARGIN_BOTTOM]);
+-
+-	dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
+ out:
+ 	drm_sysfs_connector_add(connector);
+ }
+diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
+index 97ee566..ddfe161 100644
+--- a/drivers/gpu/drm/mga/mga_drv.c
++++ b/drivers/gpu/drm/mga/mga_drv.c
+@@ -68,7 +68,7 @@ static struct drm_driver driver = {
+ 		.owner = THIS_MODULE,
+ 		.open = drm_open,
+ 		.release = drm_release,
+-		.ioctl = drm_ioctl,
++		.unlocked_ioctl = drm_ioctl,
+ 		.mmap = drm_mmap,
+ 		.poll = drm_poll,
+ 		.fasync = drm_fasync,
+diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
+index 30d0047..c1f877b 100644
+--- a/drivers/gpu/drm/mga/mga_ioc32.c
++++ b/drivers/gpu/drm/mga/mga_ioc32.c
+@@ -100,8 +100,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
+ 	if (err)
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_MGA_INIT, (unsigned long)init);
++	return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init);
+ }
+ 
+ typedef struct drm_mga_getparam32 {
+@@ -125,8 +124,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
+ 			  &getparam->value))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
++	return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
+ }
+ 
+ typedef struct drm_mga_drm_bootstrap32 {
+@@ -166,8 +164,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
+ 	    || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
+ 		return -EFAULT;
+ 
+-	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+-			DRM_IOCTL_MGA_DMA_BOOTSTRAP,
++	err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP,
+ 			(unsigned long)dma_bootstrap);
+ 	if (err)
+ 		return err;
+@@ -220,12 +217,10 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
+ 		fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
+ 
+-	lock_kernel();		/* XXX for now */
+ 	if (fn != NULL)
+ 		ret = (*fn) (filp, cmd, arg);
+ 	else
+-		ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+-	unlock_kernel();
++		ret = drm_ioctl(filp, cmd, arg);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
+new file mode 100644
+index 0000000..1175429
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/Kconfig
+@@ -0,0 +1,44 @@
++config DRM_NOUVEAU
++	tristate "Nouveau (nVidia) cards"
++	depends on DRM
++        select FW_LOADER
++	select DRM_KMS_HELPER
++	select DRM_TTM
++	select FB_CFB_FILLRECT
++	select FB_CFB_COPYAREA
++	select FB_CFB_IMAGEBLIT
++	select FB
++	select FRAMEBUFFER_CONSOLE if !EMBEDDED
++	select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
++	help
++	  Choose this option for open-source nVidia support.
++
++config DRM_NOUVEAU_BACKLIGHT
++	bool "Support for backlight control"
++	depends on DRM_NOUVEAU
++	default y
++	help
++	  Say Y here if you want to control the backlight of your display
++	  (e.g. a laptop panel).
++
++config DRM_NOUVEAU_DEBUG
++	bool "Build in Nouveau's debugfs support"
++	depends on DRM_NOUVEAU && DEBUG_FS
++	default y
++	help
++	  Say Y here if you want Nouveau to output debugging information
++	  via debugfs.
++
++menu "I2C encoder or helper chips"
++     depends on DRM && DRM_KMS_HELPER && I2C
++
++config DRM_I2C_CH7006
++	tristate "Chrontel ch7006 TV encoder"
++	default m if DRM_NOUVEAU
++	help
++	  Support for Chrontel ch7006 and similar TV encoders, found
++	  on some nVidia video cards.
++
++	  This driver is currently only useful if you're also using
++	  the nouveau driver.
++endmenu
+diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
+new file mode 100644
+index 0000000..48c290b
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/Makefile
+@@ -0,0 +1,32 @@
++#
++# Makefile for the drm device driver.  This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++
++ccflags-y := -Iinclude/drm
++nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
++             nouveau_object.o nouveau_irq.o nouveau_notifier.o \
++             nouveau_sgdma.o nouveau_dma.o \
++             nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
++             nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
++             nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
++             nouveau_dp.o nouveau_grctx.o \
++             nv04_timer.o \
++             nv04_mc.o nv40_mc.o nv50_mc.o \
++             nv04_fb.o nv10_fb.o nv40_fb.o \
++             nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
++             nv04_graph.o nv10_graph.o nv20_graph.o \
++             nv40_graph.o nv50_graph.o \
++             nv40_grctx.o \
++             nv04_instmem.o nv50_instmem.o \
++             nv50_crtc.o nv50_dac.o nv50_sor.o \
++             nv50_cursor.o nv50_display.o nv50_fbcon.o \
++             nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
++             nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
++             nv17_gpio.o
++
++nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
++nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
++nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
++nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
++
++obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
+diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+new file mode 100644
+index 0000000..48227e7
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
+@@ -0,0 +1,125 @@
++#include <linux/pci.h>
++#include <linux/acpi.h>
++#include <acpi/acpi_drivers.h>
++#include <acpi/acpi_bus.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sarea.h"
++#include "drm_crtc_helper.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++#include "nv50_display.h"
++
++#define NOUVEAU_DSM_SUPPORTED 0x00
++#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
++
++#define NOUVEAU_DSM_ACTIVE 0x01
++#define NOUVEAU_DSM_ACTIVE_QUERY 0x00
++
++#define NOUVEAU_DSM_LED 0x02
++#define NOUVEAU_DSM_LED_STATE 0x00
++#define NOUVEAU_DSM_LED_OFF 0x10
++#define NOUVEAU_DSM_LED_STAMINA 0x11
++#define NOUVEAU_DSM_LED_SPEED 0x12
++
++#define NOUVEAU_DSM_POWER 0x03
++#define NOUVEAU_DSM_POWER_STATE 0x00
++#define NOUVEAU_DSM_POWER_SPEED 0x01
++#define NOUVEAU_DSM_POWER_STAMINA 0x02
++
++static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
++{
++	static char muid[] = {
++		0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
++		0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
++	};
++
++	struct pci_dev *pdev = dev->pdev;
++	struct acpi_handle *handle;
++	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
++	struct acpi_object_list input;
++	union acpi_object params[4];
++	union acpi_object *obj;
++	int err;
++
++	handle = DEVICE_ACPI_HANDLE(&pdev->dev);
++
++	if (!handle)
++		return -ENODEV;
++
++	input.count = 4;
++	input.pointer = params;
++	params[0].type = ACPI_TYPE_BUFFER;
++	params[0].buffer.length = sizeof(muid);
++	params[0].buffer.pointer = (char *)muid;
++	params[1].type = ACPI_TYPE_INTEGER;
++	params[1].integer.value = 0x00000102;
++	params[2].type = ACPI_TYPE_INTEGER;
++	params[2].integer.value = func;
++	params[3].type = ACPI_TYPE_INTEGER;
++	params[3].integer.value = arg;
++
++	err = acpi_evaluate_object(handle, "_DSM", &input, &output);
++	if (err) {
++		NV_INFO(dev, "failed to evaluate _DSM: %d\n", err);
++		return err;
++	}
++
++	obj = (union acpi_object *)output.pointer;
++
++	if (obj->type == ACPI_TYPE_INTEGER)
++		if (obj->integer.value == 0x80000002)
++			return -ENODEV;
++
++	if (obj->type == ACPI_TYPE_BUFFER) {
++		if (obj->buffer.length == 4 && result) {
++			*result = 0;
++			*result |= obj->buffer.pointer[0];
++			*result |= (obj->buffer.pointer[1] << 8);
++			*result |= (obj->buffer.pointer[2] << 16);
++			*result |= (obj->buffer.pointer[3] << 24);
++		}
++	}
++
++	kfree(output.pointer);
++	return 0;
++}
++
++int nouveau_hybrid_setup(struct drm_device *dev)
++{
++	int result;
++
++	if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
++								&result))
++		return -ENODEV;
++
++	NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
++
++	if (result) { /* Ensure that the external GPU is enabled */
++		nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
++		nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
++									NULL);
++	} else { /* Stamina mode - disable the external GPU */
++		nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
++									NULL);
++		nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
++									NULL);
++	}
++
++	return 0;
++}
++
++bool nouveau_dsm_probe(struct drm_device *dev)
++{
++	int support = 0;
++
++	if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED,
++				NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support))
++		return false;
++
++	if (!support)
++		return false;
++
++	return true;
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
+new file mode 100644
+index 0000000..20564f8
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
+@@ -0,0 +1,155 @@
++/*
++ * Copyright (C) 2009 Red Hat <mjg at redhat.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++/*
++ * Authors:
++ *  Matthew Garrett <mjg at redhat.com>
++ *
++ * Register locations derived from NVClock by Roderick Colenbrander
++ */
++
++#include <linux/backlight.h>
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++#include "nouveau_reg.h"
++
++static int nv40_get_intensity(struct backlight_device *bd)
++{
++	struct drm_device *dev = bl_get_data(bd);
++	int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)
++									>> 16;
++
++	return val;
++}
++
++static int nv40_set_intensity(struct backlight_device *bd)
++{
++	struct drm_device *dev = bl_get_data(bd);
++	int val = bd->props.brightness;
++	int reg = nv_rd32(dev, NV40_PMC_BACKLIGHT);
++
++	nv_wr32(dev, NV40_PMC_BACKLIGHT,
++		 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
++
++	return 0;
++}
++
++static struct backlight_ops nv40_bl_ops = {
++	.options = BL_CORE_SUSPENDRESUME,
++	.get_brightness = nv40_get_intensity,
++	.update_status = nv40_set_intensity,
++};
++
++static int nv50_get_intensity(struct backlight_device *bd)
++{
++	struct drm_device *dev = bl_get_data(bd);
++
++	return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT);
++}
++
++static int nv50_set_intensity(struct backlight_device *bd)
++{
++	struct drm_device *dev = bl_get_data(bd);
++	int val = bd->props.brightness;
++
++	nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT,
++		val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE);
++	return 0;
++}
++
++static struct backlight_ops nv50_bl_ops = {
++	.options = BL_CORE_SUSPENDRESUME,
++	.get_brightness = nv50_get_intensity,
++	.update_status = nv50_set_intensity,
++};
++
++static int nouveau_nv40_backlight_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct backlight_device *bd;
++
++	if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
++		return 0;
++
++	bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
++				       &nv40_bl_ops);
++	if (IS_ERR(bd))
++		return PTR_ERR(bd);
++
++	dev_priv->backlight = bd;
++	bd->props.max_brightness = 31;
++	bd->props.brightness = nv40_get_intensity(bd);
++	backlight_update_status(bd);
++
++	return 0;
++}
++
++static int nouveau_nv50_backlight_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct backlight_device *bd;
++
++	if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
++		return 0;
++
++	bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
++				       &nv50_bl_ops);
++	if (IS_ERR(bd))
++		return PTR_ERR(bd);
++
++	dev_priv->backlight = bd;
++	bd->props.max_brightness = 1025;
++	bd->props.brightness = nv50_get_intensity(bd);
++	backlight_update_status(bd);
++	return 0;
++}
++
++int nouveau_backlight_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	switch (dev_priv->card_type) {
++	case NV_40:
++		return nouveau_nv40_backlight_init(dev);
++	case NV_50:
++		return nouveau_nv50_backlight_init(dev);
++	default:
++		break;
++	}
++
++	return 0;
++}
++
++void nouveau_backlight_exit(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->backlight) {
++		backlight_device_unregister(dev_priv->backlight);
++		dev_priv->backlight = NULL;
++	}
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
+new file mode 100644
+index 0000000..0e9cd1d
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
+@@ -0,0 +1,6051 @@
++/*
++ * Copyright 2005-2006 Erik Waling
++ * Copyright 2006 Stephane Marchesin
++ * Copyright 2007-2009 Stuart Bennett
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
++ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++
++#include "drmP.h"
++#define NV_DEBUG_NOTRACE
++#include "nouveau_drv.h"
++#include "nouveau_hw.h"
++
++/* these defines are made up */
++#define NV_CIO_CRE_44_HEADA 0x0
++#define NV_CIO_CRE_44_HEADB 0x3
++#define FEATURE_MOBILE 0x10	/* also FEATURE_QUADRO for BMP */
++#define LEGACY_I2C_CRT 0x80
++#define LEGACY_I2C_PANEL 0x81
++#define LEGACY_I2C_TV 0x82
++
++#define EDID1_LEN 128
++
++#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
++#define LOG_OLD_VALUE(x)
++
++#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
++#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
++
++struct init_exec {
++	bool execute;
++	bool repeat;
++};
++
++static bool nv_cksum(const uint8_t *data, unsigned int length)
++{
++	/*
++	 * There's a few checksums in the BIOS, so here's a generic checking
++	 * function.
++	 */
++	int i;
++	uint8_t sum = 0;
++
++	for (i = 0; i < length; i++)
++		sum += data[i];
++
++	if (sum)
++		return true;
++
++	return false;
++}
++
++static int
++score_vbios(struct drm_device *dev, const uint8_t *data, const bool writeable)
++{
++	if (!(data[0] == 0x55 && data[1] == 0xAA)) {
++		NV_TRACEWARN(dev, "... BIOS signature not found\n");
++		return 0;
++	}
++
++	if (nv_cksum(data, data[2] * 512)) {
++		NV_TRACEWARN(dev, "... BIOS checksum invalid\n");
++		/* if a ro image is somewhat bad, it's probably all rubbish */
++		return writeable ? 2 : 1;
++	} else
++		NV_TRACE(dev, "... appears to be valid\n");
++
++	return 3;
++}
++
++static void load_vbios_prom(struct drm_device *dev, uint8_t *data)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t pci_nv_20, save_pci_nv_20;
++	int pcir_ptr;
++	int i;
++
++	if (dev_priv->card_type >= NV_50)
++		pci_nv_20 = 0x88050;
++	else
++		pci_nv_20 = NV_PBUS_PCI_NV_20;
++
++	/* enable ROM access */
++	save_pci_nv_20 = nvReadMC(dev, pci_nv_20);
++	nvWriteMC(dev, pci_nv_20,
++		  save_pci_nv_20 & ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
++
++	/* bail if no rom signature */
++	if (nv_rd08(dev, NV_PROM_OFFSET) != 0x55 ||
++	    nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
++		goto out;
++
++	/* additional check (see note below) - read PCI record header */
++	pcir_ptr = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
++		   nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
++	if (nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr) != 'P' ||
++	    nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 1) != 'C' ||
++	    nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 2) != 'I' ||
++	    nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 3) != 'R')
++		goto out;
++
++	/* on some 6600GT/6800LE prom reads are messed up.  nvclock alleges a
++	 * a good read may be obtained by waiting or re-reading (cargocult: 5x)
++	 * each byte.  we'll hope pramin has something usable instead
++	 */
++	for (i = 0; i < NV_PROM_SIZE; i++)
++		data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
++
++out:
++	/* disable ROM access */
++	nvWriteMC(dev, pci_nv_20,
++		  save_pci_nv_20 | NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
++}
++
++static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t old_bar0_pramin = 0;
++	int i;
++
++	if (dev_priv->card_type >= NV_50) {
++		uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8;
++
++		if (!vbios_vram)
++			vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000;
++
++		old_bar0_pramin = nv_rd32(dev, 0x1700);
++		nv_wr32(dev, 0x1700, vbios_vram >> 16);
++	}
++
++	/* bail if no rom signature */
++	if (nv_rd08(dev, NV_PRAMIN_OFFSET) != 0x55 ||
++	    nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
++		goto out;
++
++	for (i = 0; i < NV_PROM_SIZE; i++)
++		data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
++
++out:
++	if (dev_priv->card_type >= NV_50)
++		nv_wr32(dev, 0x1700, old_bar0_pramin);
++}
++
++static void load_vbios_pci(struct drm_device *dev, uint8_t *data)
++{
++	void __iomem *rom = NULL;
++	size_t rom_len;
++	int ret;
++
++	ret = pci_enable_rom(dev->pdev);
++	if (ret)
++		return;
++
++	rom = pci_map_rom(dev->pdev, &rom_len);
++	if (!rom)
++		goto out;
++	memcpy_fromio(data, rom, rom_len);
++	pci_unmap_rom(dev->pdev, rom);
++
++out:
++	pci_disable_rom(dev->pdev);
++}
++
++struct methods {
++	const char desc[8];
++	void (*loadbios)(struct drm_device *, uint8_t *);
++	const bool rw;
++};
++
++static struct methods nv04_methods[] = {
++	{ "PROM", load_vbios_prom, false },
++	{ "PRAMIN", load_vbios_pramin, true },
++	{ "PCIROM", load_vbios_pci, true },
++};
++
++static struct methods nv50_methods[] = {
++	{ "PRAMIN", load_vbios_pramin, true },
++	{ "PROM", load_vbios_prom, false },
++	{ "PCIROM", load_vbios_pci, true },
++};
++
++#define METHODCNT 3
++
++static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct methods *methods;
++	int i;
++	int testscore = 3;
++	int scores[METHODCNT];
++
++	if (nouveau_vbios) {
++		methods = nv04_methods;
++		for (i = 0; i < METHODCNT; i++)
++			if (!strcasecmp(nouveau_vbios, methods[i].desc))
++				break;
++
++		if (i < METHODCNT) {
++			NV_INFO(dev, "Attempting to use BIOS image from %s\n",
++				methods[i].desc);
++
++			methods[i].loadbios(dev, data);
++			if (score_vbios(dev, data, methods[i].rw))
++				return true;
++		}
++
++		NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
++	}
++
++	if (dev_priv->card_type < NV_50)
++		methods = nv04_methods;
++	else
++		methods = nv50_methods;
++
++	for (i = 0; i < METHODCNT; i++) {
++		NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
++			 methods[i].desc);
++		data[0] = data[1] = 0;	/* avoid reuse of previous image */
++		methods[i].loadbios(dev, data);
++		scores[i] = score_vbios(dev, data, methods[i].rw);
++		if (scores[i] == testscore)
++			return true;
++	}
++
++	while (--testscore > 0) {
++		for (i = 0; i < METHODCNT; i++) {
++			if (scores[i] == testscore) {
++				NV_TRACE(dev, "Using BIOS image from %s\n",
++					 methods[i].desc);
++				methods[i].loadbios(dev, data);
++				return true;
++			}
++		}
++	}
++
++	NV_ERROR(dev, "No valid BIOS image found\n");
++	return false;
++}
++
++struct init_tbl_entry {
++	char *name;
++	uint8_t id;
++	int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
++};
++
++struct bit_entry {
++	uint8_t id[2];
++	uint16_t length;
++	uint16_t offset;
++};
++
++static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
++
++#define MACRO_INDEX_SIZE	2
++#define MACRO_SIZE		8
++#define CONDITION_SIZE		12
++#define IO_FLAG_CONDITION_SIZE	9
++#define IO_CONDITION_SIZE	5
++#define MEM_INIT_SIZE		66
++
++static void still_alive(void)
++{
++#if 0
++	sync();
++	msleep(2);
++#endif
++}
++
++static uint32_t
++munge_reg(struct nvbios *bios, uint32_t reg)
++{
++	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
++	struct dcb_entry *dcbent = bios->display.output;
++
++	if (dev_priv->card_type < NV_50)
++		return reg;
++
++	if (reg & 0x40000000) {
++		BUG_ON(!dcbent);
++
++		reg += (ffs(dcbent->or) - 1) * 0x800;
++		if ((reg & 0x20000000) && !(dcbent->sorconf.link & 1))
++			reg += 0x00000080;
++	}
++
++	reg &= ~0x60000000;
++	return reg;
++}
++
++static int
++valid_reg(struct nvbios *bios, uint32_t reg)
++{
++	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
++	struct drm_device *dev = bios->dev;
++
++	/* C51 has misaligned regs on purpose. Marvellous */
++	if (reg & 0x2 ||
++	    (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
++		NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
++
++	/* warn on C51 regs that haven't been verified accessible in tracing */
++	if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
++	    reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
++		NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
++			reg);
++
++	if (reg >= (8*1024*1024)) {
++		NV_ERROR(dev, "=== reg 0x%08x out of mapped bounds ===\n", reg);
++		return 0;
++	}
++
++	return 1;
++}
++
++static bool
++valid_idx_port(struct nvbios *bios, uint16_t port)
++{
++	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
++	struct drm_device *dev = bios->dev;
++
++	/*
++	 * If adding more ports here, the read/write functions below will need
++	 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
++	 * used for the port in question
++	 */
++	if (dev_priv->card_type < NV_50) {
++		if (port == NV_CIO_CRX__COLOR)
++			return true;
++		if (port == NV_VIO_SRX)
++			return true;
++	} else {
++		if (port == NV_CIO_CRX__COLOR)
++			return true;
++	}
++
++	NV_ERROR(dev, "========== unknown indexed io port 0x%04X ==========\n",
++		 port);
++
++	return false;
++}
++
++static bool
++valid_port(struct nvbios *bios, uint16_t port)
++{
++	struct drm_device *dev = bios->dev;
++
++	/*
++	 * If adding more ports here, the read/write functions below will need
++	 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
++	 * used for the port in question
++	 */
++	if (port == NV_VIO_VSE2)
++		return true;
++
++	NV_ERROR(dev, "========== unknown io port 0x%04X ==========\n", port);
++
++	return false;
++}
++
++static uint32_t
++bios_rd32(struct nvbios *bios, uint32_t reg)
++{
++	uint32_t data;
++
++	reg = munge_reg(bios, reg);
++	if (!valid_reg(bios, reg))
++		return 0;
++
++	/*
++	 * C51 sometimes uses regs with bit0 set in the address. For these
++	 * cases there should exist a translation in a BIOS table to an IO
++	 * port address which the BIOS uses for accessing the reg
++	 *
++	 * These only seem to appear for the power control regs to a flat panel,
++	 * and the GPIO regs at 0x60081*.  In C51 mmio traces the normal regs
++	 * for 0x1308 and 0x1310 are used - hence the mask below.  An S3
++	 * suspend-resume mmio trace from a C51 will be required to see if this
++	 * is true for the power microcode in 0x14.., or whether the direct IO
++	 * port access method is needed
++	 */
++	if (reg & 0x1)
++		reg &= ~0x1;
++
++	data = nv_rd32(bios->dev, reg);
++
++	BIOSLOG(bios, "	Read:  Reg: 0x%08X, Data: 0x%08X\n", reg, data);
++
++	return data;
++}
++
++static void
++bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
++{
++	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
++
++	reg = munge_reg(bios, reg);
++	if (!valid_reg(bios, reg))
++		return;
++
++	/* see note in bios_rd32 */
++	if (reg & 0x1)
++		reg &= 0xfffffffe;
++
++	LOG_OLD_VALUE(bios_rd32(bios, reg));
++	BIOSLOG(bios, "	Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
++
++	if (dev_priv->VBIOS.execute) {
++		still_alive();
++		nv_wr32(bios->dev, reg, data);
++	}
++}
++
++static uint8_t
++bios_idxprt_rd(struct nvbios *bios, uint16_t port, uint8_t index)
++{
++	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
++	struct drm_device *dev = bios->dev;
++	uint8_t data;
++
++	if (!valid_idx_port(bios, port))
++		return 0;
++
++	if (dev_priv->card_type < NV_50) {
++		if (port == NV_VIO_SRX)
++			data = NVReadVgaSeq(dev, bios->state.crtchead, index);
++		else	/* assume NV_CIO_CRX__COLOR */
++			data = NVReadVgaCrtc(dev, bios->state.crtchead, index);
++	} else {
++		uint32_t data32;
++
++		data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
++		data = (data32 >> ((index & 3) << 3)) & 0xff;
++	}
++
++	BIOSLOG(bios, "	Indexed IO read:  Port: 0x%04X, Index: 0x%02X, "
++		      "Head: 0x%02X, Data: 0x%02X\n",
++		port, index, bios->state.crtchead, data);
++	return data;
++}
++
++static void
++bios_idxprt_wr(struct nvbios *bios, uint16_t port, uint8_t index, uint8_t data)
++{
++	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
++	struct drm_device *dev = bios->dev;
++
++	if (!valid_idx_port(bios, port))
++		return;
++
++	/*
++	 * The current head is maintained in the nvbios member  state.crtchead.
++	 * We trap changes to CR44 and update the head variable and hence the
++	 * register set written.
++	 * As CR44 only exists on CRTC0, we update crtchead to head0 in advance
++	 * of the write, and to head1 after the write
++	 */
++	if (port == NV_CIO_CRX__COLOR && index == NV_CIO_CRE_44 &&
++	    data != NV_CIO_CRE_44_HEADB)
++		bios->state.crtchead = 0;
++
++	LOG_OLD_VALUE(bios_idxprt_rd(bios, port, index));
++	BIOSLOG(bios, "	Indexed IO write: Port: 0x%04X, Index: 0x%02X, "
++		      "Head: 0x%02X, Data: 0x%02X\n",
++		port, index, bios->state.crtchead, data);
++
++	if (bios->execute && dev_priv->card_type < NV_50) {
++		still_alive();
++		if (port == NV_VIO_SRX)
++			NVWriteVgaSeq(dev, bios->state.crtchead, index, data);
++		else	/* assume NV_CIO_CRX__COLOR */
++			NVWriteVgaCrtc(dev, bios->state.crtchead, index, data);
++	} else
++	if (bios->execute) {
++		uint32_t data32, shift = (index & 3) << 3;
++
++		still_alive();
++
++		data32  = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
++		data32 &= ~(0xff << shift);
++		data32 |= (data << shift);
++		bios_wr32(bios, NV50_PDISPLAY_VGACRTC(index & ~3), data32);
++	}
++
++	if (port == NV_CIO_CRX__COLOR &&
++	    index == NV_CIO_CRE_44 && data == NV_CIO_CRE_44_HEADB)
++		bios->state.crtchead = 1;
++}
++
++static uint8_t
++bios_port_rd(struct nvbios *bios, uint16_t port)
++{
++	uint8_t data, head = bios->state.crtchead;
++
++	if (!valid_port(bios, port))
++		return 0;
++
++	data = NVReadPRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port);
++
++	BIOSLOG(bios, "	IO read:  Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
++		port, head, data);
++
++	return data;
++}
++
++static void
++bios_port_wr(struct nvbios *bios, uint16_t port, uint8_t data)
++{
++	int head = bios->state.crtchead;
++
++	if (!valid_port(bios, port))
++		return;
++
++	LOG_OLD_VALUE(bios_port_rd(bios, port));
++	BIOSLOG(bios, "	IO write: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
++		port, head, data);
++
++	if (!bios->execute)
++		return;
++
++	still_alive();
++	NVWritePRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port, data);
++}
++
++static bool
++io_flag_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
++{
++	/*
++	 * The IO flag condition entry has 2 bytes for the CRTC port; 1 byte
++	 * for the CRTC index; 1 byte for the mask to apply to the value
++	 * retrieved from the CRTC; 1 byte for the shift right to apply to the
++	 * masked CRTC value; 2 bytes for the offset to the flag array, to
++	 * which the shifted value is added; 1 byte for the mask applied to the
++	 * value read from the flag array; and 1 byte for the value to compare
++	 * against the masked byte from the flag table.
++	 */
++
++	uint16_t condptr = bios->io_flag_condition_tbl_ptr + cond * IO_FLAG_CONDITION_SIZE;
++	uint16_t crtcport = ROM16(bios->data[condptr]);
++	uint8_t crtcindex = bios->data[condptr + 2];
++	uint8_t mask = bios->data[condptr + 3];
++	uint8_t shift = bios->data[condptr + 4];
++	uint16_t flagarray = ROM16(bios->data[condptr + 5]);
++	uint8_t flagarraymask = bios->data[condptr + 7];
++	uint8_t cmpval = bios->data[condptr + 8];
++	uint8_t data;
++
++	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
++		      "Shift: 0x%02X, FlagArray: 0x%04X, FAMask: 0x%02X, "
++		      "Cmpval: 0x%02X\n",
++		offset, crtcport, crtcindex, mask, shift, flagarray, flagarraymask, cmpval);
++
++	data = bios_idxprt_rd(bios, crtcport, crtcindex);
++
++	data = bios->data[flagarray + ((data & mask) >> shift)];
++	data &= flagarraymask;
++
++	BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
++		offset, data, cmpval);
++
++	return (data == cmpval);
++}
++
++static bool
++bios_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
++{
++	/*
++	 * The condition table entry has 4 bytes for the address of the
++	 * register to check, 4 bytes for a mask to apply to the register and
++	 * 4 for a test comparison value
++	 */
++
++	uint16_t condptr = bios->condition_tbl_ptr + cond * CONDITION_SIZE;
++	uint32_t reg = ROM32(bios->data[condptr]);
++	uint32_t mask = ROM32(bios->data[condptr + 4]);
++	uint32_t cmpval = ROM32(bios->data[condptr + 8]);
++	uint32_t data;
++
++	BIOSLOG(bios, "0x%04X: Cond: 0x%02X, Reg: 0x%08X, Mask: 0x%08X\n",
++		offset, cond, reg, mask);
++
++	data = bios_rd32(bios, reg) & mask;
++
++	BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
++		offset, data, cmpval);
++
++	return (data == cmpval);
++}
++
++static bool
++io_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
++{
++	/*
++	 * The IO condition entry has 2 bytes for the IO port address; 1 byte
++	 * for the index to write to io_port; 1 byte for the mask to apply to
++	 * the byte read from io_port+1; and 1 byte for the value to compare
++	 * against the masked byte.
++	 */
++
++	uint16_t condptr = bios->io_condition_tbl_ptr + cond * IO_CONDITION_SIZE;
++	uint16_t io_port = ROM16(bios->data[condptr]);
++	uint8_t port_index = bios->data[condptr + 2];
++	uint8_t mask = bios->data[condptr + 3];
++	uint8_t cmpval = bios->data[condptr + 4];
++
++	uint8_t data = bios_idxprt_rd(bios, io_port, port_index) & mask;
++
++	BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
++		offset, data, cmpval);
++
++	return (data == cmpval);
++}
++
++static int
++nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t reg0 = nv_rd32(dev, reg + 0);
++	uint32_t reg1 = nv_rd32(dev, reg + 4);
++	struct nouveau_pll_vals pll;
++	struct pll_lims pll_limits;
++	int ret;
++
++	ret = get_pll_limits(dev, reg, &pll_limits);
++	if (ret)
++		return ret;
++
++	clk = nouveau_calc_pll_mnp(dev, &pll_limits, clk, &pll);
++	if (!clk)
++		return -ERANGE;
++
++	reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
++	reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
++
++	if (dev_priv->VBIOS.execute) {
++		still_alive();
++		nv_wr32(dev, reg + 4, reg1);
++		nv_wr32(dev, reg + 0, reg0);
++	}
++
++	return 0;
++}
++
++static int
++setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
++{
++	struct drm_device *dev = bios->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	/* clk in kHz */
++	struct pll_lims pll_lim;
++	struct nouveau_pll_vals pllvals;
++	int ret;
++
++	if (dev_priv->card_type >= NV_50)
++		return nv50_pll_set(dev, reg, clk);
++
++	/* high regs (such as in the mac g5 table) are not -= 4 */
++	ret = get_pll_limits(dev, reg > 0x405c ? reg : reg - 4, &pll_lim);
++	if (ret)
++		return ret;
++
++	clk = nouveau_calc_pll_mnp(dev, &pll_lim, clk, &pllvals);
++	if (!clk)
++		return -ERANGE;
++
++	if (bios->execute) {
++		still_alive();
++		nouveau_hw_setpll(dev, reg, &pllvals);
++	}
++
++	return 0;
++}
++
++static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++
++	/*
++	 * For the results of this function to be correct, CR44 must have been
++	 * set (using bios_idxprt_wr to set crtchead), CR58 set for CR57 = 0,
++	 * and the DCB table parsed, before the script calling the function is
++	 * run.  run_digital_op_script is example of how to do such setup
++	 */
++
++	uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
++
++	if (dcb_entry > bios->bdcb.dcb.entries) {
++		NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
++				"(%02X)\n", dcb_entry);
++		dcb_entry = 0x7f;	/* unused / invalid marker */
++	}
++
++	return dcb_entry;
++}
++
++static struct nouveau_i2c_chan *
++init_i2c_device_find(struct drm_device *dev, int i2c_index)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb;
++
++	if (i2c_index == 0xff) {
++		/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
++		int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
++		int default_indices = bdcb->i2c_default_indices;
++
++		if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default)
++			shift = 4;
++
++		i2c_index = (default_indices >> shift) & 0xf;
++	}
++	if (i2c_index == 0x80)	/* g80+ */
++		i2c_index = bdcb->i2c_default_indices & 0xf;
++
++	return nouveau_i2c_find(dev, i2c_index);
++}
++
++static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
++{
++	/*
++	 * For mlv < 0x80, it is an index into a table of TMDS base addresses.
++	 * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
++	 * CR58 for CR57 = 0 to index a table of offsets to the basic
++	 * 0x6808b0 address.
++	 * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
++	 * CR58 for CR57 = 0 to index a table of offsets to the basic
++	 * 0x6808b0 address, and then flip the offset by 8.
++	 */
++
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	const int pramdac_offset[13] = {
++		0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
++	const uint32_t pramdac_table[4] = {
++		0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
++
++	if (mlv >= 0x80) {
++		int dcb_entry, dacoffset;
++
++		/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
++		dcb_entry = dcb_entry_idx_from_crtchead(dev);
++		if (dcb_entry == 0x7f)
++			return 0;
++		dacoffset = pramdac_offset[
++				dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
++		if (mlv == 0x81)
++			dacoffset ^= 8;
++		return 0x6808b0 + dacoffset;
++	} else {
++		if (mlv > ARRAY_SIZE(pramdac_table)) {
++			NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
++									mlv);
++			return 0;
++		}
++		return pramdac_table[mlv];
++	}
++}
++
++static int
++init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
++		      struct init_exec *iexec)
++{
++	/*
++	 * INIT_IO_RESTRICT_PROG   opcode: 0x32 ('2')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (16 bit): CRTC port
++	 * offset + 3  (8  bit): CRTC index
++	 * offset + 4  (8  bit): mask
++	 * offset + 5  (8  bit): shift
++	 * offset + 6  (8  bit): count
++	 * offset + 7  (32 bit): register
++	 * offset + 11 (32 bit): configuration 1
++	 * ...
++	 *
++	 * Starting at offset + 11 there are "count" 32 bit values.
++	 * To find out which value to use read index "CRTC index" on "CRTC
++	 * port", AND this value with "mask" and then bit shift right "shift"
++	 * bits.  Read the appropriate value using this index and write to
++	 * "register"
++	 */
++
++	uint16_t crtcport = ROM16(bios->data[offset + 1]);
++	uint8_t crtcindex = bios->data[offset + 3];
++	uint8_t mask = bios->data[offset + 4];
++	uint8_t shift = bios->data[offset + 5];
++	uint8_t count = bios->data[offset + 6];
++	uint32_t reg = ROM32(bios->data[offset + 7]);
++	uint8_t config;
++	uint32_t configval;
++	int len = 11 + count * 4;
++
++	if (!iexec->execute)
++		return len;
++
++	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
++		      "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
++		offset, crtcport, crtcindex, mask, shift, count, reg);
++
++	config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
++	if (config > count) {
++		NV_ERROR(bios->dev,
++			 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
++			 offset, config, count);
++		return 0;
++	}
++
++	configval = ROM32(bios->data[offset + 11 + config * 4]);
++
++	BIOSLOG(bios, "0x%04X: Writing config %02X\n", offset, config);
++
++	bios_wr32(bios, reg, configval);
++
++	return len;
++}
++
++static int
++init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_REPEAT   opcode: 0x33 ('3')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): count
++	 *
++	 * Execute script following this opcode up to INIT_REPEAT_END
++	 * "count" times
++	 */
++
++	uint8_t count = bios->data[offset + 1];
++	uint8_t i;
++
++	/* no iexec->execute check by design */
++
++	BIOSLOG(bios, "0x%04X: Repeating following segment %d times\n",
++		offset, count);
++
++	iexec->repeat = true;
++
++	/*
++	 * count - 1, as the script block will execute once when we leave this
++	 * opcode -- this is compatible with bios behaviour as:
++	 * a) the block is always executed at least once, even if count == 0
++	 * b) the bios interpreter skips to the op following INIT_END_REPEAT,
++	 * while we don't
++	 */
++	for (i = 0; i < count - 1; i++)
++		parse_init_table(bios, offset + 2, iexec);
++
++	iexec->repeat = false;
++
++	return 2;
++}
++
++static int
++init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
++		     struct init_exec *iexec)
++{
++	/*
++	 * INIT_IO_RESTRICT_PLL   opcode: 0x34 ('4')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (16 bit): CRTC port
++	 * offset + 3  (8  bit): CRTC index
++	 * offset + 4  (8  bit): mask
++	 * offset + 5  (8  bit): shift
++	 * offset + 6  (8  bit): IO flag condition index
++	 * offset + 7  (8  bit): count
++	 * offset + 8  (32 bit): register
++	 * offset + 12 (16 bit): frequency 1
++	 * ...
++	 *
++	 * Starting at offset + 12 there are "count" 16 bit frequencies (10kHz).
++	 * Set PLL register "register" to coefficients for frequency n,
++	 * selected by reading index "CRTC index" of "CRTC port" ANDed with
++	 * "mask" and shifted right by "shift".
++	 *
++	 * If "IO flag condition index" > 0, and condition met, double
++	 * frequency before setting it.
++	 */
++
++	uint16_t crtcport = ROM16(bios->data[offset + 1]);
++	uint8_t crtcindex = bios->data[offset + 3];
++	uint8_t mask = bios->data[offset + 4];
++	uint8_t shift = bios->data[offset + 5];
++	int8_t io_flag_condition_idx = bios->data[offset + 6];
++	uint8_t count = bios->data[offset + 7];
++	uint32_t reg = ROM32(bios->data[offset + 8]);
++	uint8_t config;
++	uint16_t freq;
++	int len = 12 + count * 2;
++
++	if (!iexec->execute)
++		return len;
++
++	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
++		      "Shift: 0x%02X, IO Flag Condition: 0x%02X, "
++		      "Count: 0x%02X, Reg: 0x%08X\n",
++		offset, crtcport, crtcindex, mask, shift,
++		io_flag_condition_idx, count, reg);
++
++	config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
++	if (config > count) {
++		NV_ERROR(bios->dev,
++			 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
++			 offset, config, count);
++		return 0;
++	}
++
++	freq = ROM16(bios->data[offset + 12 + config * 2]);
++
++	if (io_flag_condition_idx > 0) {
++		if (io_flag_condition_met(bios, offset, io_flag_condition_idx)) {
++			BIOSLOG(bios, "0x%04X: Condition fulfilled -- "
++				      "frequency doubled\n", offset);
++			freq *= 2;
++		} else
++			BIOSLOG(bios, "0x%04X: Condition not fulfilled -- "
++				      "frequency unchanged\n", offset);
++	}
++
++	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %d0kHz\n",
++		offset, reg, config, freq);
++
++	setPLL(bios, reg, freq * 10);
++
++	return len;
++}
++
++static int
++init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_END_REPEAT   opcode: 0x36 ('6')
++	 *
++	 * offset      (8 bit): opcode
++	 *
++	 * Marks the end of the block for INIT_REPEAT to repeat
++	 */
++
++	/* no iexec->execute check by design */
++
++	/*
++	 * iexec->repeat flag necessary to go past INIT_END_REPEAT opcode when
++	 * we're not in repeat mode
++	 */
++	if (iexec->repeat)
++		return 0;
++
++	return 1;
++}
++
++static int
++init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_COPY   opcode: 0x37 ('7')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): register
++	 * offset + 5  (8  bit): shift
++	 * offset + 6  (8  bit): srcmask
++	 * offset + 7  (16 bit): CRTC port
++	 * offset + 9  (8 bit): CRTC index
++	 * offset + 10  (8 bit): mask
++	 *
++	 * Read index "CRTC index" on "CRTC port", AND with "mask", OR with
++	 * (REGVAL("register") >> "shift" & "srcmask") and write-back to CRTC
++	 * port
++	 */
++
++	uint32_t reg = ROM32(bios->data[offset + 1]);
++	uint8_t shift = bios->data[offset + 5];
++	uint8_t srcmask = bios->data[offset + 6];
++	uint16_t crtcport = ROM16(bios->data[offset + 7]);
++	uint8_t crtcindex = bios->data[offset + 9];
++	uint8_t mask = bios->data[offset + 10];
++	uint32_t data;
++	uint8_t crtcdata;
++
++	if (!iexec->execute)
++		return 11;
++
++	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
++		      "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
++		offset, reg, shift, srcmask, crtcport, crtcindex, mask);
++
++	data = bios_rd32(bios, reg);
++
++	if (shift < 0x80)
++		data >>= shift;
++	else
++		data <<= (0x100 - shift);
++
++	data &= srcmask;
++
++	crtcdata  = bios_idxprt_rd(bios, crtcport, crtcindex) & mask;
++	crtcdata |= (uint8_t)data;
++	bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
++
++	return 11;
++}
++
++static int
++init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_NOT   opcode: 0x38 ('8')
++	 *
++	 * offset      (8  bit): opcode
++	 *
++	 * Invert the current execute / no-execute condition (i.e. "else")
++	 */
++	if (iexec->execute)
++		BIOSLOG(bios, "0x%04X: ------ Skipping following commands  ------\n", offset);
++	else
++		BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
++
++	iexec->execute = !iexec->execute;
++	return 1;
++}
++
++static int
++init_io_flag_condition(struct nvbios *bios, uint16_t offset,
++		       struct init_exec *iexec)
++{
++	/*
++	 * INIT_IO_FLAG_CONDITION   opcode: 0x39 ('9')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): condition number
++	 *
++	 * Check condition "condition number" in the IO flag condition table.
++	 * If condition not met skip subsequent opcodes until condition is
++	 * inverted (INIT_NOT), or we hit INIT_RESUME
++	 */
++
++	uint8_t cond = bios->data[offset + 1];
++
++	if (!iexec->execute)
++		return 2;
++
++	if (io_flag_condition_met(bios, offset, cond))
++		BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
++	else {
++		BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
++		iexec->execute = false;
++	}
++
++	return 2;
++}
++
++static int
++init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
++		      struct init_exec *iexec)
++{
++	/*
++	 * INIT_INDEX_ADDRESS_LATCHED   opcode: 0x49 ('I')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): control register
++	 * offset + 5  (32 bit): data register
++	 * offset + 9  (32 bit): mask
++	 * offset + 13 (32 bit): data
++	 * offset + 17 (8  bit): count
++	 * offset + 18 (8  bit): address 1
++	 * offset + 19 (8  bit): data 1
++	 * ...
++	 *
++	 * For each of "count" address and data pairs, write "data n" to
++	 * "data register", read the current value of "control register",
++	 * and write it back once ANDed with "mask", ORed with "data",
++	 * and ORed with "address n"
++	 */
++
++	uint32_t controlreg = ROM32(bios->data[offset + 1]);
++	uint32_t datareg = ROM32(bios->data[offset + 5]);
++	uint32_t mask = ROM32(bios->data[offset + 9]);
++	uint32_t data = ROM32(bios->data[offset + 13]);
++	uint8_t count = bios->data[offset + 17];
++	int len = 18 + count * 2;
++	uint32_t value;
++	int i;
++
++	if (!iexec->execute)
++		return len;
++
++	BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
++		      "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
++		offset, controlreg, datareg, mask, data, count);
++
++	for (i = 0; i < count; i++) {
++		uint8_t instaddress = bios->data[offset + 18 + i * 2];
++		uint8_t instdata = bios->data[offset + 19 + i * 2];
++
++		BIOSLOG(bios, "0x%04X: Address: 0x%02X, Data: 0x%02X\n",
++			offset, instaddress, instdata);
++
++		bios_wr32(bios, datareg, instdata);
++		value  = bios_rd32(bios, controlreg) & mask;
++		value |= data;
++		value |= instaddress;
++		bios_wr32(bios, controlreg, value);
++	}
++
++	return len;
++}
++
++static int
++init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
++		      struct init_exec *iexec)
++{
++	/*
++	 * INIT_IO_RESTRICT_PLL2   opcode: 0x4A ('J')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (16 bit): CRTC port
++	 * offset + 3  (8  bit): CRTC index
++	 * offset + 4  (8  bit): mask
++	 * offset + 5  (8  bit): shift
++	 * offset + 6  (8  bit): count
++	 * offset + 7  (32 bit): register
++	 * offset + 11 (32 bit): frequency 1
++	 * ...
++	 *
++	 * Starting at offset + 11 there are "count" 32 bit frequencies (kHz).
++	 * Set PLL register "register" to coefficients for frequency n,
++	 * selected by reading index "CRTC index" of "CRTC port" ANDed with
++	 * "mask" and shifted right by "shift".
++	 */
++
++	uint16_t crtcport = ROM16(bios->data[offset + 1]);
++	uint8_t crtcindex = bios->data[offset + 3];
++	uint8_t mask = bios->data[offset + 4];
++	uint8_t shift = bios->data[offset + 5];
++	uint8_t count = bios->data[offset + 6];
++	uint32_t reg = ROM32(bios->data[offset + 7]);
++	int len = 11 + count * 4;
++	uint8_t config;
++	uint32_t freq;
++
++	if (!iexec->execute)
++		return len;
++
++	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
++		      "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
++		offset, crtcport, crtcindex, mask, shift, count, reg);
++
++	if (!reg)
++		return len;
++
++	config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
++	if (config > count) {
++		NV_ERROR(bios->dev,
++			 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
++			 offset, config, count);
++		return 0;
++	}
++
++	freq = ROM32(bios->data[offset + 11 + config * 4]);
++
++	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %dkHz\n",
++		offset, reg, config, freq);
++
++	setPLL(bios, reg, freq);
++
++	return len;
++}
++
++static int
++init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_PLL2   opcode: 0x4B ('K')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): register
++	 * offset + 5  (32 bit): freq
++	 *
++	 * Set PLL register "register" to coefficients for frequency "freq"
++	 */
++
++	uint32_t reg = ROM32(bios->data[offset + 1]);
++	uint32_t freq = ROM32(bios->data[offset + 5]);
++
++	if (!iexec->execute)
++		return 9;
++
++	BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
++		offset, reg, freq);
++
++	setPLL(bios, reg, freq);
++	return 9;
++}
++
++static int
++init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_I2C_BYTE   opcode: 0x4C ('L')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): DCB I2C table entry index
++	 * offset + 2  (8 bit): I2C slave address
++	 * offset + 3  (8 bit): count
++	 * offset + 4  (8 bit): I2C register 1
++	 * offset + 5  (8 bit): mask 1
++	 * offset + 6  (8 bit): data 1
++	 * ...
++	 *
++	 * For each of "count" registers given by "I2C register n" on the device
++	 * addressed by "I2C slave address" on the I2C bus given by
++	 * "DCB I2C table entry index", read the register, AND the result with
++	 * "mask n" and OR it with "data n" before writing it back to the device
++	 */
++
++	uint8_t i2c_index = bios->data[offset + 1];
++	uint8_t i2c_address = bios->data[offset + 2];
++	uint8_t count = bios->data[offset + 3];
++	int len = 4 + count * 3;
++	struct nouveau_i2c_chan *chan;
++	struct i2c_msg msg;
++	int i;
++
++	if (!iexec->execute)
++		return len;
++
++	BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
++		      "Count: 0x%02X\n",
++		offset, i2c_index, i2c_address, count);
++
++	chan = init_i2c_device_find(bios->dev, i2c_index);
++	if (!chan)
++		return 0;
++
++	for (i = 0; i < count; i++) {
++		uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
++		uint8_t mask = bios->data[offset + 5 + i * 3];
++		uint8_t data = bios->data[offset + 6 + i * 3];
++		uint8_t value;
++
++		msg.addr = i2c_address;
++		msg.flags = I2C_M_RD;
++		msg.len = 1;
++		msg.buf = &value;
++		if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
++			return 0;
++
++		BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
++			      "Mask: 0x%02X, Data: 0x%02X\n",
++			offset, i2c_reg, value, mask, data);
++
++		value = (value & mask) | data;
++
++		if (bios->execute) {
++			msg.addr = i2c_address;
++			msg.flags = 0;
++			msg.len = 1;
++			msg.buf = &value;
++			if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
++				return 0;
++		}
++	}
++
++	return len;
++}
++
++static int
++init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_ZM_I2C_BYTE   opcode: 0x4D ('M')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): DCB I2C table entry index
++	 * offset + 2  (8 bit): I2C slave address
++	 * offset + 3  (8 bit): count
++	 * offset + 4  (8 bit): I2C register 1
++	 * offset + 5  (8 bit): data 1
++	 * ...
++	 *
++	 * For each of "count" registers given by "I2C register n" on the device
++	 * addressed by "I2C slave address" on the I2C bus given by
++	 * "DCB I2C table entry index", set the register to "data n"
++	 */
++
++	uint8_t i2c_index = bios->data[offset + 1];
++	uint8_t i2c_address = bios->data[offset + 2];
++	uint8_t count = bios->data[offset + 3];
++	int len = 4 + count * 2;
++	struct nouveau_i2c_chan *chan;
++	struct i2c_msg msg;
++	int i;
++
++	if (!iexec->execute)
++		return len;
++
++	BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
++		      "Count: 0x%02X\n",
++		offset, i2c_index, i2c_address, count);
++
++	chan = init_i2c_device_find(bios->dev, i2c_index);
++	if (!chan)
++		return 0;
++
++	for (i = 0; i < count; i++) {
++		uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
++		uint8_t data = bios->data[offset + 5 + i * 2];
++
++		BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
++			offset, i2c_reg, data);
++
++		if (bios->execute) {
++			msg.addr = i2c_address;
++			msg.flags = 0;
++			msg.len = 1;
++			msg.buf = &data;
++			if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
++				return 0;
++		}
++	}
++
++	return len;
++}
++
++static int
++init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_ZM_I2C   opcode: 0x4E ('N')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): DCB I2C table entry index
++	 * offset + 2  (8 bit): I2C slave address
++	 * offset + 3  (8 bit): count
++	 * offset + 4  (8 bit): data 1
++	 * ...
++	 *
++	 * Send "count" bytes ("data n") to the device addressed by "I2C slave
++	 * address" on the I2C bus given by "DCB I2C table entry index"
++	 */
++
++	uint8_t i2c_index = bios->data[offset + 1];
++	uint8_t i2c_address = bios->data[offset + 2];
++	uint8_t count = bios->data[offset + 3];
++	int len = 4 + count;
++	struct nouveau_i2c_chan *chan;
++	struct i2c_msg msg;
++	uint8_t data[256];
++	int i;
++
++	if (!iexec->execute)
++		return len;
++
++	BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
++		      "Count: 0x%02X\n",
++		offset, i2c_index, i2c_address, count);
++
++	chan = init_i2c_device_find(bios->dev, i2c_index);
++	if (!chan)
++		return 0;
++
++	for (i = 0; i < count; i++) {
++		data[i] = bios->data[offset + 4 + i];
++
++		BIOSLOG(bios, "0x%04X: Data: 0x%02X\n", offset, data[i]);
++	}
++
++	if (bios->execute) {
++		msg.addr = i2c_address;
++		msg.flags = 0;
++		msg.len = count;
++		msg.buf = data;
++		if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
++			return 0;
++	}
++
++	return len;
++}
++
++static int
++init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_TMDS   opcode: 0x4F ('O')	(non-canon name)
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): magic lookup value
++	 * offset + 2  (8 bit): TMDS address
++	 * offset + 3  (8 bit): mask
++	 * offset + 4  (8 bit): data
++	 *
++	 * Read the data reg for TMDS address "TMDS address", AND it with mask
++	 * and OR it with data, then write it back
++	 * "magic lookup value" determines which TMDS base address register is
++	 * used -- see get_tmds_index_reg()
++	 */
++
++	uint8_t mlv = bios->data[offset + 1];
++	uint32_t tmdsaddr = bios->data[offset + 2];
++	uint8_t mask = bios->data[offset + 3];
++	uint8_t data = bios->data[offset + 4];
++	uint32_t reg, value;
++
++	if (!iexec->execute)
++		return 5;
++
++	BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
++		      "Mask: 0x%02X, Data: 0x%02X\n",
++		offset, mlv, tmdsaddr, mask, data);
++
++	reg = get_tmds_index_reg(bios->dev, mlv);
++	if (!reg)
++		return 0;
++
++	bios_wr32(bios, reg,
++		  tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
++	value = (bios_rd32(bios, reg + 4) & mask) | data;
++	bios_wr32(bios, reg + 4, value);
++	bios_wr32(bios, reg, tmdsaddr);
++
++	return 5;
++}
++
++static int
++init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
++		   struct init_exec *iexec)
++{
++	/*
++	 * INIT_ZM_TMDS_GROUP   opcode: 0x50 ('P')	(non-canon name)
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): magic lookup value
++	 * offset + 2  (8 bit): count
++	 * offset + 3  (8 bit): addr 1
++	 * offset + 4  (8 bit): data 1
++	 * ...
++	 *
++	 * For each of "count" TMDS address and data pairs write "data n" to
++	 * "addr n".  "magic lookup value" determines which TMDS base address
++	 * register is used -- see get_tmds_index_reg()
++	 */
++
++	uint8_t mlv = bios->data[offset + 1];
++	uint8_t count = bios->data[offset + 2];
++	int len = 3 + count * 2;
++	uint32_t reg;
++	int i;
++
++	if (!iexec->execute)
++		return len;
++
++	BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
++		offset, mlv, count);
++
++	reg = get_tmds_index_reg(bios->dev, mlv);
++	if (!reg)
++		return 0;
++
++	for (i = 0; i < count; i++) {
++		uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
++		uint8_t tmdsdata = bios->data[offset + 4 + i * 2];
++
++		bios_wr32(bios, reg + 4, tmdsdata);
++		bios_wr32(bios, reg, tmdsaddr);
++	}
++
++	return len;
++}
++
++static int
++init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
++		      struct init_exec *iexec)
++{
++	/*
++	 * INIT_CR_INDEX_ADDRESS_LATCHED   opcode: 0x51 ('Q')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): CRTC index1
++	 * offset + 2  (8 bit): CRTC index2
++	 * offset + 3  (8 bit): baseaddr
++	 * offset + 4  (8 bit): count
++	 * offset + 5  (8 bit): data 1
++	 * ...
++	 *
++	 * For each of "count" address and data pairs, write "baseaddr + n" to
++	 * "CRTC index1" and "data n" to "CRTC index2"
++	 * Once complete, restore initial value read from "CRTC index1"
++	 */
++	uint8_t crtcindex1 = bios->data[offset + 1];
++	uint8_t crtcindex2 = bios->data[offset + 2];
++	uint8_t baseaddr = bios->data[offset + 3];
++	uint8_t count = bios->data[offset + 4];
++	int len = 5 + count;
++	uint8_t oldaddr, data;
++	int i;
++
++	if (!iexec->execute)
++		return len;
++
++	BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
++		      "BaseAddr: 0x%02X, Count: 0x%02X\n",
++		offset, crtcindex1, crtcindex2, baseaddr, count);
++
++	oldaddr = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex1);
++
++	for (i = 0; i < count; i++) {
++		bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1,
++				     baseaddr + i);
++		data = bios->data[offset + 5 + i];
++		bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex2, data);
++	}
++
++	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
++
++	return len;
++}
++
++static int
++init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_CR   opcode: 0x52 ('R')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (8  bit): CRTC index
++	 * offset + 2  (8  bit): mask
++	 * offset + 3  (8  bit): data
++	 *
++	 * Assign the value of at "CRTC index" ANDed with mask and ORed with
++	 * data back to "CRTC index"
++	 */
++
++	uint8_t crtcindex = bios->data[offset + 1];
++	uint8_t mask = bios->data[offset + 2];
++	uint8_t data = bios->data[offset + 3];
++	uint8_t value;
++
++	if (!iexec->execute)
++		return 4;
++
++	BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
++		offset, crtcindex, mask, data);
++
++	value  = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex) & mask;
++	value |= data;
++	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
++
++	return 4;
++}
++
++static int
++init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_ZM_CR   opcode: 0x53 ('S')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): CRTC index
++	 * offset + 2  (8 bit): value
++	 *
++	 * Assign "value" to CRTC register with index "CRTC index".
++	 */
++
++	uint8_t crtcindex = ROM32(bios->data[offset + 1]);
++	uint8_t data = bios->data[offset + 2];
++
++	if (!iexec->execute)
++		return 3;
++
++	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
++
++	return 3;
++}
++
++static int
++init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_ZM_CR_GROUP   opcode: 0x54 ('T')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): count
++	 * offset + 2  (8 bit): CRTC index 1
++	 * offset + 3  (8 bit): value 1
++	 * ...
++	 *
++	 * For "count", assign "value n" to CRTC register with index
++	 * "CRTC index n".
++	 */
++
++	uint8_t count = bios->data[offset + 1];
++	int len = 2 + count * 2;
++	int i;
++
++	if (!iexec->execute)
++		return len;
++
++	for (i = 0; i < count; i++)
++		init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
++
++	return len;
++}
++
++static int
++init_condition_time(struct nvbios *bios, uint16_t offset,
++		    struct init_exec *iexec)
++{
++	/*
++	 * INIT_CONDITION_TIME   opcode: 0x56 ('V')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): condition number
++	 * offset + 2  (8 bit): retries / 50
++	 *
++	 * Check condition "condition number" in the condition table.
++	 * Bios code then sleeps for 2ms if the condition is not met, and
++	 * repeats up to "retries" times, but on one C51 this has proved
++	 * insufficient.  In mmiotraces the driver sleeps for 20ms, so we do
++	 * this, and bail after "retries" times, or 2s, whichever is less.
++	 * If still not met after retries, clear execution flag for this table.
++	 */
++
++	uint8_t cond = bios->data[offset + 1];
++	uint16_t retries = bios->data[offset + 2] * 50;
++	unsigned cnt;
++
++	if (!iexec->execute)
++		return 3;
++
++	if (retries > 100)
++		retries = 100;
++
++	BIOSLOG(bios, "0x%04X: Condition: 0x%02X, Retries: 0x%02X\n",
++		offset, cond, retries);
++
++	if (!bios->execute) /* avoid 2s delays when "faking" execution */
++		retries = 1;
++
++	for (cnt = 0; cnt < retries; cnt++) {
++		if (bios_condition_met(bios, offset, cond)) {
++			BIOSLOG(bios, "0x%04X: Condition met, continuing\n",
++								offset);
++			break;
++		} else {
++			BIOSLOG(bios, "0x%04X: "
++				"Condition not met, sleeping for 20ms\n",
++								offset);
++			msleep(20);
++		}
++	}
++
++	if (!bios_condition_met(bios, offset, cond)) {
++		NV_WARN(bios->dev,
++			"0x%04X: Condition still not met after %dms, "
++			"skipping following opcodes\n", offset, 20 * retries);
++		iexec->execute = false;
++	}
++
++	return 3;
++}
++
++static int
++init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
++		     struct init_exec *iexec)
++{
++	/*
++	 * INIT_ZM_REG_SEQUENCE   opcode: 0x58 ('X')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): base register
++	 * offset + 5  (8  bit): count
++	 * offset + 6  (32 bit): value 1
++	 * ...
++	 *
++	 * Starting at offset + 6 there are "count" 32 bit values.
++	 * For "count" iterations set "base register" + 4 * current_iteration
++	 * to "value current_iteration"
++	 */
++
++	uint32_t basereg = ROM32(bios->data[offset + 1]);
++	uint32_t count = bios->data[offset + 5];
++	int len = 6 + count * 4;
++	int i;
++
++	if (!iexec->execute)
++		return len;
++
++	BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
++		offset, basereg, count);
++
++	for (i = 0; i < count; i++) {
++		uint32_t reg = basereg + i * 4;
++		uint32_t data = ROM32(bios->data[offset + 6 + i * 4]);
++
++		bios_wr32(bios, reg, data);
++	}
++
++	return len;
++}
++
++static int
++init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_SUB_DIRECT   opcode: 0x5B ('[')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (16 bit): subroutine offset (in bios)
++	 *
++	 * Calls a subroutine that will execute commands until INIT_DONE
++	 * is found.
++	 */
++
++	uint16_t sub_offset = ROM16(bios->data[offset + 1]);
++
++	if (!iexec->execute)
++		return 3;
++
++	BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
++		offset, sub_offset);
++
++	parse_init_table(bios, sub_offset, iexec);
++
++	BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
++
++	return 3;
++}
++
++static int
++init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_COPY_NV_REG   opcode: 0x5F ('_')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): src reg
++	 * offset + 5  (8  bit): shift
++	 * offset + 6  (32 bit): src mask
++	 * offset + 10 (32 bit): xor
++	 * offset + 14 (32 bit): dst reg
++	 * offset + 18 (32 bit): dst mask
++	 *
++	 * Shift REGVAL("src reg") right by (signed) "shift", AND result with
++	 * "src mask", then XOR with "xor". Write this OR'd with
++	 * (REGVAL("dst reg") AND'd with "dst mask") to "dst reg"
++	 */
++
++	uint32_t srcreg = *((uint32_t *)(&bios->data[offset + 1]));
++	uint8_t shift = bios->data[offset + 5];
++	uint32_t srcmask = *((uint32_t *)(&bios->data[offset + 6]));
++	uint32_t xor = *((uint32_t *)(&bios->data[offset + 10]));
++	uint32_t dstreg = *((uint32_t *)(&bios->data[offset + 14]));
++	uint32_t dstmask = *((uint32_t *)(&bios->data[offset + 18]));
++	uint32_t srcvalue, dstvalue;
++
++	if (!iexec->execute)
++		return 22;
++
++	BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
++		      "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
++		offset, srcreg, shift, srcmask, xor, dstreg, dstmask);
++
++	srcvalue = bios_rd32(bios, srcreg);
++
++	if (shift < 0x80)
++		srcvalue >>= shift;
++	else
++		srcvalue <<= (0x100 - shift);
++
++	srcvalue = (srcvalue & srcmask) ^ xor;
++
++	dstvalue = bios_rd32(bios, dstreg) & dstmask;
++
++	bios_wr32(bios, dstreg, dstvalue | srcvalue);
++
++	return 22;
++}
++
++static int
++init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_ZM_INDEX_IO   opcode: 0x62 ('b')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (16 bit): CRTC port
++	 * offset + 3  (8  bit): CRTC index
++	 * offset + 4  (8  bit): data
++	 *
++	 * Write "data" to index "CRTC index" of "CRTC port"
++	 */
++	uint16_t crtcport = ROM16(bios->data[offset + 1]);
++	uint8_t crtcindex = bios->data[offset + 3];
++	uint8_t data = bios->data[offset + 4];
++
++	if (!iexec->execute)
++		return 5;
++
++	bios_idxprt_wr(bios, crtcport, crtcindex, data);
++
++	return 5;
++}
++
++static int
++init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_COMPUTE_MEM   opcode: 0x63 ('c')
++	 *
++	 * offset      (8 bit): opcode
++	 *
++	 * This opcode is meant to set NV_PFB_CFG0 (0x100200) appropriately so
++	 * that the hardware can correctly calculate how much VRAM it has
++	 * (and subsequently report that value in NV_PFB_CSTATUS (0x10020C))
++	 *
++	 * The implementation of this opcode in general consists of two parts:
++	 * 1) determination of the memory bus width
++	 * 2) determination of how many of the card's RAM pads have ICs attached
++	 *
++	 * 1) is done by a cunning combination of writes to offsets 0x1c and
++	 * 0x3c in the framebuffer, and seeing whether the written values are
++	 * read back correctly. This then affects bits 4-7 of NV_PFB_CFG0
++	 *
++	 * 2) is done by a cunning combination of writes to an offset slightly
++	 * less than the maximum memory reported by NV_PFB_CSTATUS, then seeing
++	 * if the test pattern can be read back. This then affects bits 12-15 of
++	 * NV_PFB_CFG0
++	 *
++	 * In this context a "cunning combination" may include multiple reads
++	 * and writes to varying locations, often alternating the test pattern
++	 * and 0, doubtless to make sure buffers are filled, residual charges
++	 * on tracks are removed etc.
++	 *
++	 * Unfortunately, the "cunning combination"s mentioned above, and the
++	 * changes to the bits in NV_PFB_CFG0 differ with nearly every bios
++	 * trace I have.
++	 *
++	 * Therefore, we cheat and assume the value of NV_PFB_CFG0 with which
++	 * we started was correct, and use that instead
++	 */
++
++	/* no iexec->execute check by design */
++
++	/*
++	 * This appears to be a NOP on G8x chipsets, both io logs of the VBIOS
++	 * and kmmio traces of the binary driver POSTing the card show nothing
++	 * being done for this opcode.  why is it still listed in the table?!
++	 */
++
++	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
++
++	if (dev_priv->card_type >= NV_40)
++		return 1;
++
++	/*
++	 * On every card I've seen, this step gets done for us earlier in
++	 * the init scripts
++	uint8_t crdata = bios_idxprt_rd(dev, NV_VIO_SRX, 0x01);
++	bios_idxprt_wr(dev, NV_VIO_SRX, 0x01, crdata | 0x20);
++	 */
++
++	/*
++	 * This also has probably been done in the scripts, but an mmio trace of
++	 * s3 resume shows nvidia doing it anyway (unlike the NV_VIO_SRX write)
++	 */
++	bios_wr32(bios, NV_PFB_REFCTRL, NV_PFB_REFCTRL_VALID_1);
++
++	/* write back the saved configuration value */
++	bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0);
++
++	return 1;
++}
++
++static int
++init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_RESET   opcode: 0x65 ('e')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): register
++	 * offset + 5  (32 bit): value1
++	 * offset + 9  (32 bit): value2
++	 *
++	 * Assign "value1" to "register", then assign "value2" to "register"
++	 */
++
++	uint32_t reg = ROM32(bios->data[offset + 1]);
++	uint32_t value1 = ROM32(bios->data[offset + 5]);
++	uint32_t value2 = ROM32(bios->data[offset + 9]);
++	uint32_t pci_nv_19, pci_nv_20;
++
++	/* no iexec->execute check by design */
++
++	pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19);
++	bios_wr32(bios, NV_PBUS_PCI_NV_19, 0);
++	bios_wr32(bios, reg, value1);
++
++	udelay(10);
++
++	bios_wr32(bios, reg, value2);
++	bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19);
++
++	pci_nv_20 = bios_rd32(bios, NV_PBUS_PCI_NV_20);
++	pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED;	/* 0xfffffffe */
++	bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
++
++	return 13;
++}
++
++static int
++init_configure_mem(struct nvbios *bios, uint16_t offset,
++		   struct init_exec *iexec)
++{
++	/*
++	 * INIT_CONFIGURE_MEM   opcode: 0x66 ('f')
++	 *
++	 * offset      (8 bit): opcode
++	 *
++	 * Equivalent to INIT_DONE on bios version 3 or greater.
++	 * For early bios versions, sets up the memory registers, using values
++	 * taken from the memory init table
++	 */
++
++	/* no iexec->execute check by design */
++
++	uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
++	uint16_t seqtbloffs = bios->legacy.sdr_seq_tbl_ptr, meminitdata = meminitoffs + 6;
++	uint32_t reg, data;
++
++	if (bios->major_version > 2)
++		return 0;
++
++	bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
++		       bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
++
++	if (bios->data[meminitoffs] & 1)
++		seqtbloffs = bios->legacy.ddr_seq_tbl_ptr;
++
++	for (reg = ROM32(bios->data[seqtbloffs]);
++	     reg != 0xffffffff;
++	     reg = ROM32(bios->data[seqtbloffs += 4])) {
++
++		switch (reg) {
++		case NV_PFB_PRE:
++			data = NV_PFB_PRE_CMD_PRECHARGE;
++			break;
++		case NV_PFB_PAD:
++			data = NV_PFB_PAD_CKE_NORMAL;
++			break;
++		case NV_PFB_REF:
++			data = NV_PFB_REF_CMD_REFRESH;
++			break;
++		default:
++			data = ROM32(bios->data[meminitdata]);
++			meminitdata += 4;
++			if (data == 0xffffffff)
++				continue;
++		}
++
++		bios_wr32(bios, reg, data);
++	}
++
++	return 1;
++}
++
++static int
++init_configure_clk(struct nvbios *bios, uint16_t offset,
++		   struct init_exec *iexec)
++{
++	/*
++	 * INIT_CONFIGURE_CLK   opcode: 0x67 ('g')
++	 *
++	 * offset      (8 bit): opcode
++	 *
++	 * Equivalent to INIT_DONE on bios version 3 or greater.
++	 * For early bios versions, sets up the NVClk and MClk PLLs, using
++	 * values taken from the memory init table
++	 */
++
++	/* no iexec->execute check by design */
++
++	uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
++	int clock;
++
++	if (bios->major_version > 2)
++		return 0;
++
++	clock = ROM16(bios->data[meminitoffs + 4]) * 10;
++	setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
++
++	clock = ROM16(bios->data[meminitoffs + 2]) * 10;
++	if (bios->data[meminitoffs] & 1) /* DDR */
++		clock *= 2;
++	setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
++
++	return 1;
++}
++
++static int
++init_configure_preinit(struct nvbios *bios, uint16_t offset,
++		       struct init_exec *iexec)
++{
++	/*
++	 * INIT_CONFIGURE_PREINIT   opcode: 0x68 ('h')
++	 *
++	 * offset      (8 bit): opcode
++	 *
++	 * Equivalent to INIT_DONE on bios version 3 or greater.
++	 * For early bios versions, does early init, loading ram and crystal
++	 * configuration from straps into CR3C
++	 */
++
++	/* no iexec->execute check by design */
++
++	uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
++	uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
++
++	if (bios->major_version > 2)
++		return 0;
++
++	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
++			     NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
++
++	return 1;
++}
++
++static int
++init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_IO   opcode: 0x69 ('i')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (16 bit): CRTC port
++	 * offset + 3  (8  bit): mask
++	 * offset + 4  (8  bit): data
++	 *
++	 * Assign ((IOVAL("crtc port") & "mask") | "data") to "crtc port"
++	 */
++
++	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
++	uint16_t crtcport = ROM16(bios->data[offset + 1]);
++	uint8_t mask = bios->data[offset + 3];
++	uint8_t data = bios->data[offset + 4];
++
++	if (!iexec->execute)
++		return 5;
++
++	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
++		offset, crtcport, mask, data);
++
++	/*
++	 * I have no idea what this does, but NVIDIA do this magic sequence
++	 * in the places where this INIT_IO happens..
++	 */
++	if (dev_priv->card_type >= NV_50 && crtcport == 0x3c3 && data == 1) {
++		int i;
++
++		bios_wr32(bios, 0x614100, (bios_rd32(
++			  bios, 0x614100) & 0x0fffffff) | 0x00800000);
++
++		bios_wr32(bios, 0x00e18c, bios_rd32(
++			  bios, 0x00e18c) | 0x00020000);
++
++		bios_wr32(bios, 0x614900, (bios_rd32(
++			  bios, 0x614900) & 0x0fffffff) | 0x00800000);
++
++		bios_wr32(bios, 0x000200, bios_rd32(
++			  bios, 0x000200) & ~0x40000000);
++
++		mdelay(10);
++
++		bios_wr32(bios, 0x00e18c, bios_rd32(
++			  bios, 0x00e18c) & ~0x00020000);
++
++		bios_wr32(bios, 0x000200, bios_rd32(
++			  bios, 0x000200) | 0x40000000);
++
++		bios_wr32(bios, 0x614100, 0x00800018);
++		bios_wr32(bios, 0x614900, 0x00800018);
++
++		mdelay(10);
++
++		bios_wr32(bios, 0x614100, 0x10000018);
++		bios_wr32(bios, 0x614900, 0x10000018);
++
++		for (i = 0; i < 3; i++)
++			bios_wr32(bios, 0x614280 + (i*0x800), bios_rd32(
++				  bios, 0x614280 + (i*0x800)) & 0xf0f0f0f0);
++
++		for (i = 0; i < 2; i++)
++			bios_wr32(bios, 0x614300 + (i*0x800), bios_rd32(
++				  bios, 0x614300 + (i*0x800)) & 0xfffff0f0);
++
++		for (i = 0; i < 3; i++)
++			bios_wr32(bios, 0x614380 + (i*0x800), bios_rd32(
++				  bios, 0x614380 + (i*0x800)) & 0xfffff0f0);
++
++		for (i = 0; i < 2; i++)
++			bios_wr32(bios, 0x614200 + (i*0x800), bios_rd32(
++				  bios, 0x614200 + (i*0x800)) & 0xfffffff0);
++
++		for (i = 0; i < 2; i++)
++			bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
++				  bios, 0x614108 + (i*0x800)) & 0x0fffffff);
++		return 5;
++	}
++
++	bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
++									data);
++	return 5;
++}
++
++static int
++init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_SUB   opcode: 0x6B ('k')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): script number
++	 *
++	 * Execute script number "script number", as a subroutine
++	 */
++
++	uint8_t sub = bios->data[offset + 1];
++
++	if (!iexec->execute)
++		return 2;
++
++	BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
++
++	parse_init_table(bios,
++			 ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]),
++			 iexec);
++
++	BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
++
++	return 2;
++}
++
++static int
++init_ram_condition(struct nvbios *bios, uint16_t offset,
++		   struct init_exec *iexec)
++{
++	/*
++	 * INIT_RAM_CONDITION   opcode: 0x6D ('m')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): mask
++	 * offset + 2  (8 bit): cmpval
++	 *
++	 * Test if (NV_PFB_BOOT_0 & "mask") equals "cmpval".
++	 * If condition not met skip subsequent opcodes until condition is
++	 * inverted (INIT_NOT), or we hit INIT_RESUME
++	 */
++
++	uint8_t mask = bios->data[offset + 1];
++	uint8_t cmpval = bios->data[offset + 2];
++	uint8_t data;
++
++	if (!iexec->execute)
++		return 3;
++
++	data = bios_rd32(bios, NV_PFB_BOOT_0) & mask;
++
++	BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
++		offset, data, cmpval);
++
++	if (data == cmpval)
++		BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
++	else {
++		BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
++		iexec->execute = false;
++	}
++
++	return 3;
++}
++
++static int
++init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_NV_REG   opcode: 0x6E ('n')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): register
++	 * offset + 5  (32 bit): mask
++	 * offset + 9  (32 bit): data
++	 *
++	 * Assign ((REGVAL("register") & "mask") | "data") to "register"
++	 */
++
++	uint32_t reg = ROM32(bios->data[offset + 1]);
++	uint32_t mask = ROM32(bios->data[offset + 5]);
++	uint32_t data = ROM32(bios->data[offset + 9]);
++
++	if (!iexec->execute)
++		return 13;
++
++	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
++		offset, reg, mask, data);
++
++	bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
++
++	return 13;
++}
++
++static int
++init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_MACRO   opcode: 0x6F ('o')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): macro number
++	 *
++	 * Look up macro index "macro number" in the macro index table.
++	 * The macro index table entry has 1 byte for the index in the macro
++	 * table, and 1 byte for the number of times to repeat the macro.
++	 * The macro table entry has 4 bytes for the register address and
++	 * 4 bytes for the value to write to that register
++	 */
++
++	uint8_t macro_index_tbl_idx = bios->data[offset + 1];
++	uint16_t tmp = bios->macro_index_tbl_ptr + (macro_index_tbl_idx * MACRO_INDEX_SIZE);
++	uint8_t macro_tbl_idx = bios->data[tmp];
++	uint8_t count = bios->data[tmp + 1];
++	uint32_t reg, data;
++	int i;
++
++	if (!iexec->execute)
++		return 2;
++
++	BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
++		      "Count: 0x%02X\n",
++		offset, macro_index_tbl_idx, macro_tbl_idx, count);
++
++	for (i = 0; i < count; i++) {
++		uint16_t macroentryptr = bios->macro_tbl_ptr + (macro_tbl_idx + i) * MACRO_SIZE;
++
++		reg = ROM32(bios->data[macroentryptr]);
++		data = ROM32(bios->data[macroentryptr + 4]);
++
++		bios_wr32(bios, reg, data);
++	}
++
++	return 2;
++}
++
++static int
++init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_DONE   opcode: 0x71 ('q')
++	 *
++	 * offset      (8  bit): opcode
++	 *
++	 * End the current script
++	 */
++
++	/* mild retval abuse to stop parsing this table */
++	return 0;
++}
++
++static int
++init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_RESUME   opcode: 0x72 ('r')
++	 *
++	 * offset      (8  bit): opcode
++	 *
++	 * End the current execute / no-execute condition
++	 */
++
++	if (iexec->execute)
++		return 1;
++
++	iexec->execute = true;
++	BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
++
++	return 1;
++}
++
++static int
++init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_TIME   opcode: 0x74 ('t')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (16 bit): time
++	 *
++	 * Sleep for "time" microseconds.
++	 */
++
++	unsigned time = ROM16(bios->data[offset + 1]);
++
++	if (!iexec->execute)
++		return 3;
++
++	BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
++		offset, time);
++
++	if (time < 1000)
++		udelay(time);
++	else
++		msleep((time + 900) / 1000);
++
++	return 3;
++}
++
++static int
++init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_CONDITION   opcode: 0x75 ('u')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): condition number
++	 *
++	 * Check condition "condition number" in the condition table.
++	 * If condition not met skip subsequent opcodes until condition is
++	 * inverted (INIT_NOT), or we hit INIT_RESUME
++	 */
++
++	uint8_t cond = bios->data[offset + 1];
++
++	if (!iexec->execute)
++		return 2;
++
++	BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
++
++	if (bios_condition_met(bios, offset, cond))
++		BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
++	else {
++		BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
++		iexec->execute = false;
++	}
++
++	return 2;
++}
++
++static int
++init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_IO_CONDITION  opcode: 0x76
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): condition number
++	 *
++	 * Check condition "condition number" in the io condition table.
++	 * If condition not met skip subsequent opcodes until condition is
++	 * inverted (INIT_NOT), or we hit INIT_RESUME
++	 */
++
++	uint8_t cond = bios->data[offset + 1];
++
++	if (!iexec->execute)
++		return 2;
++
++	BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
++
++	if (io_condition_met(bios, offset, cond))
++		BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
++	else {
++		BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
++		iexec->execute = false;
++	}
++
++	return 2;
++}
++
++static int
++init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_INDEX_IO   opcode: 0x78 ('x')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (16 bit): CRTC port
++	 * offset + 3  (8  bit): CRTC index
++	 * offset + 4  (8  bit): mask
++	 * offset + 5  (8  bit): data
++	 *
++	 * Read value at index "CRTC index" on "CRTC port", AND with "mask",
++	 * OR with "data", write-back
++	 */
++
++	uint16_t crtcport = ROM16(bios->data[offset + 1]);
++	uint8_t crtcindex = bios->data[offset + 3];
++	uint8_t mask = bios->data[offset + 4];
++	uint8_t data = bios->data[offset + 5];
++	uint8_t value;
++
++	if (!iexec->execute)
++		return 6;
++
++	BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
++		      "Data: 0x%02X\n",
++		offset, crtcport, crtcindex, mask, data);
++
++	value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
++	bios_idxprt_wr(bios, crtcport, crtcindex, value);
++
++	return 6;
++}
++
++static int
++init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_PLL   opcode: 0x79 ('y')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): register
++	 * offset + 5  (16 bit): freq
++	 *
++	 * Set PLL register "register" to coefficients for frequency (10kHz)
++	 * "freq"
++	 */
++
++	uint32_t reg = ROM32(bios->data[offset + 1]);
++	uint16_t freq = ROM16(bios->data[offset + 5]);
++
++	if (!iexec->execute)
++		return 7;
++
++	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
++
++	setPLL(bios, reg, freq * 10);
++
++	return 7;
++}
++
++static int
++init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_ZM_REG   opcode: 0x7A ('z')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): register
++	 * offset + 5  (32 bit): value
++	 *
++	 * Assign "value" to "register"
++	 */
++
++	uint32_t reg = ROM32(bios->data[offset + 1]);
++	uint32_t value = ROM32(bios->data[offset + 5]);
++
++	if (!iexec->execute)
++		return 9;
++
++	if (reg == 0x000200)
++		value |= 1;
++
++	bios_wr32(bios, reg, value);
++
++	return 9;
++}
++
++static int
++init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
++		      struct init_exec *iexec)
++{
++	/*
++	 * INIT_RAM_RESTRICT_PLL   opcode: 0x87 ('')
++	 *
++	 * offset      (8 bit): opcode
++	 * offset + 1  (8 bit): PLL type
++	 * offset + 2 (32 bit): frequency 0
++	 *
++	 * Uses the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
++	 * ram_restrict_table_ptr.  The value read from there is used to select
++	 * a frequency from the table starting at 'frequency 0' to be
++	 * programmed into the PLL corresponding to 'type'.
++	 *
++	 * The PLL limits table on cards using this opcode has a mapping of
++	 * 'type' to the relevant registers.
++	 */
++
++	struct drm_device *dev = bios->dev;
++	uint32_t strap = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
++	uint8_t index = bios->data[bios->ram_restrict_tbl_ptr + strap];
++	uint8_t type = bios->data[offset + 1];
++	uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
++	uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
++	int len = 2 + bios->ram_restrict_group_count * 4;
++	int i;
++
++	if (!iexec->execute)
++		return len;
++
++	if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
++		NV_ERROR(dev, "PLL limits table not version 3.x\n");
++		return len; /* deliberate, allow default clocks to remain */
++	}
++
++	entry = pll_limits + pll_limits[1];
++	for (i = 0; i < pll_limits[3]; i++, entry += pll_limits[2]) {
++		if (entry[0] == type) {
++			uint32_t reg = ROM32(entry[3]);
++
++			BIOSLOG(bios, "0x%04X: "
++				      "Type %02x Reg 0x%08x Freq %dKHz\n",
++				offset, type, reg, freq);
++
++			setPLL(bios, reg, freq);
++			return len;
++		}
++	}
++
++	NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
++	return len;
++}
++
++static int
++init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_8C   opcode: 0x8C ('')
++	 *
++	 * NOP so far....
++	 *
++	 */
++
++	return 1;
++}
++
++static int
++init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_8D   opcode: 0x8D ('')
++	 *
++	 * NOP so far....
++	 *
++	 */
++
++	return 1;
++}
++
++static int
++init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_GPIO   opcode: 0x8E ('')
++	 *
++	 * offset      (8 bit): opcode
++	 *
++	 * Loop over all entries in the DCB GPIO table, and initialise
++	 * each GPIO according to various values listed in each entry
++	 */
++
++	const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
++	const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
++	const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr];
++	const uint8_t *gpio_entry;
++	int i;
++
++	if (!iexec->execute)
++		return 1;
++
++	if (bios->bdcb.version != 0x40) {
++		NV_ERROR(bios->dev, "DCB table not version 4.0\n");
++		return 0;
++	}
++
++	if (!bios->bdcb.gpio_table_ptr) {
++		NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
++		return 0;
++	}
++
++	gpio_entry = gpio_table + gpio_table[1];
++	for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) {
++		uint32_t entry = ROM32(gpio_entry[0]), r, s, v;
++		int line = (entry & 0x0000001f);
++
++		BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry);
++
++		if ((entry & 0x0000ff00) == 0x0000ff00)
++			continue;
++
++		r = nv50_gpio_reg[line >> 3];
++		s = (line & 0x07) << 2;
++		v = bios_rd32(bios, r) & ~(0x00000003 << s);
++		if (entry & 0x01000000)
++			v |= (((entry & 0x60000000) >> 29) ^ 2) << s;
++		else
++			v |= (((entry & 0x18000000) >> 27) ^ 2) << s;
++		bios_wr32(bios, r, v);
++
++		r = nv50_gpio_ctl[line >> 4];
++		s = (line & 0x0f);
++		v = bios_rd32(bios, r) & ~(0x00010001 << s);
++		switch ((entry & 0x06000000) >> 25) {
++		case 1:
++			v |= (0x00000001 << s);
++			break;
++		case 2:
++			v |= (0x00010000 << s);
++			break;
++		default:
++			break;
++		}
++		bios_wr32(bios, r, v);
++	}
++
++	return 1;
++}
++
++static int
++init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
++			       struct init_exec *iexec)
++{
++	/*
++	 * INIT_RAM_RESTRICT_ZM_REG_GROUP   opcode: 0x8F ('')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): reg
++	 * offset + 5  (8  bit): regincrement
++	 * offset + 6  (8  bit): count
++	 * offset + 7  (32 bit): value 1,1
++	 * ...
++	 *
++	 * Use the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
++	 * ram_restrict_table_ptr. The value read from here is 'n', and
++	 * "value 1,n" gets written to "reg". This repeats "count" times and on
++	 * each iteration 'm', "reg" increases by "regincrement" and
++	 * "value m,n" is used. The extent of n is limited by a number read
++	 * from the 'M' BIT table, herein called "blocklen"
++	 */
++
++	uint32_t reg = ROM32(bios->data[offset + 1]);
++	uint8_t regincrement = bios->data[offset + 5];
++	uint8_t count = bios->data[offset + 6];
++	uint32_t strap_ramcfg, data;
++	/* previously set by 'M' BIT table */
++	uint16_t blocklen = bios->ram_restrict_group_count * 4;
++	int len = 7 + count * blocklen;
++	uint8_t index;
++	int i;
++
++
++	if (!iexec->execute)
++		return len;
++
++	if (!blocklen) {
++		NV_ERROR(bios->dev,
++			 "0x%04X: Zero block length - has the M table "
++			 "been parsed?\n", offset);
++		return 0;
++	}
++
++	strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
++	index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg];
++
++	BIOSLOG(bios, "0x%04X: Reg: 0x%08X, RegIncrement: 0x%02X, "
++		      "Count: 0x%02X, StrapRamCfg: 0x%02X, Index: 0x%02X\n",
++		offset, reg, regincrement, count, strap_ramcfg, index);
++
++	for (i = 0; i < count; i++) {
++		data = ROM32(bios->data[offset + 7 + index * 4 + blocklen * i]);
++
++		bios_wr32(bios, reg, data);
++
++		reg += regincrement;
++	}
++
++	return len;
++}
++
++static int
++init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_COPY_ZM_REG   opcode: 0x90 ('')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): src reg
++	 * offset + 5  (32 bit): dst reg
++	 *
++	 * Put contents of "src reg" into "dst reg"
++	 */
++
++	uint32_t srcreg = ROM32(bios->data[offset + 1]);
++	uint32_t dstreg = ROM32(bios->data[offset + 5]);
++
++	if (!iexec->execute)
++		return 9;
++
++	bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
++
++	return 9;
++}
++
++static int
++init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
++			       struct init_exec *iexec)
++{
++	/*
++	 * INIT_ZM_REG_GROUP_ADDRESS_LATCHED   opcode: 0x91 ('')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): dst reg
++	 * offset + 5  (8  bit): count
++	 * offset + 6  (32 bit): data 1
++	 * ...
++	 *
++	 * For each of "count" values write "data n" to "dst reg"
++	 */
++
++	uint32_t reg = ROM32(bios->data[offset + 1]);
++	uint8_t count = bios->data[offset + 5];
++	int len = 6 + count * 4;
++	int i;
++
++	if (!iexec->execute)
++		return len;
++
++	for (i = 0; i < count; i++) {
++		uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
++		bios_wr32(bios, reg, data);
++	}
++
++	return len;
++}
++
++static int
++init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_RESERVED   opcode: 0x92 ('')
++	 *
++	 * offset      (8 bit): opcode
++	 *
++	 * Seemingly does nothing
++	 */
++
++	return 1;
++}
++
++static int
++init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_96   opcode: 0x96 ('')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): sreg
++	 * offset + 5  (8  bit): sshift
++	 * offset + 6  (8  bit): smask
++	 * offset + 7  (8  bit): index
++	 * offset + 8  (32 bit): reg
++	 * offset + 12 (32 bit): mask
++	 * offset + 16 (8  bit): shift
++	 *
++	 */
++
++	uint16_t xlatptr = bios->init96_tbl_ptr + (bios->data[offset + 7] * 2);
++	uint32_t reg = ROM32(bios->data[offset + 8]);
++	uint32_t mask = ROM32(bios->data[offset + 12]);
++	uint32_t val;
++
++	val = bios_rd32(bios, ROM32(bios->data[offset + 1]));
++	if (bios->data[offset + 5] < 0x80)
++		val >>= bios->data[offset + 5];
++	else
++		val <<= (0x100 - bios->data[offset + 5]);
++	val &= bios->data[offset + 6];
++
++	val   = bios->data[ROM16(bios->data[xlatptr]) + val];
++	val <<= bios->data[offset + 16];
++
++	if (!iexec->execute)
++		return 17;
++
++	bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
++	return 17;
++}
++
++static int
++init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_97   opcode: 0x97 ('')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): register
++	 * offset + 5  (32 bit): mask
++	 * offset + 9  (32 bit): value
++	 *
++	 * Adds "value" to "register" preserving the fields specified
++	 * by "mask"
++	 */
++
++	uint32_t reg = ROM32(bios->data[offset + 1]);
++	uint32_t mask = ROM32(bios->data[offset + 5]);
++	uint32_t add = ROM32(bios->data[offset + 9]);
++	uint32_t val;
++
++	val = bios_rd32(bios, reg);
++	val = (val & mask) | ((val + add) & ~mask);
++
++	if (!iexec->execute)
++		return 13;
++
++	bios_wr32(bios, reg, val);
++	return 13;
++}
++
++static int
++init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_AUXCH   opcode: 0x98 ('')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): address
++	 * offset + 5  (8  bit): count
++	 * offset + 6  (8  bit): mask 0
++	 * offset + 7  (8  bit): data 0
++	 *  ...
++	 *
++	 */
++
++	struct drm_device *dev = bios->dev;
++	struct nouveau_i2c_chan *auxch;
++	uint32_t addr = ROM32(bios->data[offset + 1]);
++	uint8_t count = bios->data[offset + 5];
++	int len = 6 + count * 2;
++	int ret, i;
++
++	if (!bios->display.output) {
++		NV_ERROR(dev, "INIT_AUXCH: no active output\n");
++		return 0;
++	}
++
++	auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
++	if (!auxch) {
++		NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
++			 bios->display.output->i2c_index);
++		return 0;
++	}
++
++	if (!iexec->execute)
++		return len;
++
++	offset += 6;
++	for (i = 0; i < count; i++, offset += 2) {
++		uint8_t data;
++
++		ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
++		if (ret) {
++			NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
++			return 0;
++		}
++
++		data &= bios->data[offset + 0];
++		data |= bios->data[offset + 1];
++
++		ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
++		if (ret) {
++			NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
++			return 0;
++		}
++	}
++
++	return len;
++}
++
++static int
++init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
++{
++	/*
++	 * INIT_ZM_AUXCH   opcode: 0x99 ('')
++	 *
++	 * offset      (8  bit): opcode
++	 * offset + 1  (32 bit): address
++	 * offset + 5  (8  bit): count
++	 * offset + 6  (8  bit): data 0
++	 *  ...
++	 *
++	 */
++
++	struct drm_device *dev = bios->dev;
++	struct nouveau_i2c_chan *auxch;
++	uint32_t addr = ROM32(bios->data[offset + 1]);
++	uint8_t count = bios->data[offset + 5];
++	int len = 6 + count;
++	int ret, i;
++
++	if (!bios->display.output) {
++		NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
++		return 0;
++	}
++
++	auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
++	if (!auxch) {
++		NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
++			 bios->display.output->i2c_index);
++		return 0;
++	}
++
++	if (!iexec->execute)
++		return len;
++
++	offset += 6;
++	for (i = 0; i < count; i++, offset++) {
++		ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
++		if (ret) {
++			NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
++			return 0;
++		}
++	}
++
++	return len;
++}
++
++static struct init_tbl_entry itbl_entry[] = {
++	/* command name                       , id  , length  , offset  , mult    , command handler                 */
++	/* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
++	{ "INIT_IO_RESTRICT_PROG"             , 0x32, init_io_restrict_prog           },
++	{ "INIT_REPEAT"                       , 0x33, init_repeat                     },
++	{ "INIT_IO_RESTRICT_PLL"              , 0x34, init_io_restrict_pll            },
++	{ "INIT_END_REPEAT"                   , 0x36, init_end_repeat                 },
++	{ "INIT_COPY"                         , 0x37, init_copy                       },
++	{ "INIT_NOT"                          , 0x38, init_not                        },
++	{ "INIT_IO_FLAG_CONDITION"            , 0x39, init_io_flag_condition          },
++	{ "INIT_INDEX_ADDRESS_LATCHED"        , 0x49, init_idx_addr_latched           },
++	{ "INIT_IO_RESTRICT_PLL2"             , 0x4A, init_io_restrict_pll2           },
++	{ "INIT_PLL2"                         , 0x4B, init_pll2                       },
++	{ "INIT_I2C_BYTE"                     , 0x4C, init_i2c_byte                   },
++	{ "INIT_ZM_I2C_BYTE"                  , 0x4D, init_zm_i2c_byte                },
++	{ "INIT_ZM_I2C"                       , 0x4E, init_zm_i2c                     },
++	{ "INIT_TMDS"                         , 0x4F, init_tmds                       },
++	{ "INIT_ZM_TMDS_GROUP"                , 0x50, init_zm_tmds_group              },
++	{ "INIT_CR_INDEX_ADDRESS_LATCHED"     , 0x51, init_cr_idx_adr_latch           },
++	{ "INIT_CR"                           , 0x52, init_cr                         },
++	{ "INIT_ZM_CR"                        , 0x53, init_zm_cr                      },
++	{ "INIT_ZM_CR_GROUP"                  , 0x54, init_zm_cr_group                },
++	{ "INIT_CONDITION_TIME"               , 0x56, init_condition_time             },
++	{ "INIT_ZM_REG_SEQUENCE"              , 0x58, init_zm_reg_sequence            },
++	/* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
++	{ "INIT_SUB_DIRECT"                   , 0x5B, init_sub_direct                 },
++	{ "INIT_COPY_NV_REG"                  , 0x5F, init_copy_nv_reg                },
++	{ "INIT_ZM_INDEX_IO"                  , 0x62, init_zm_index_io                },
++	{ "INIT_COMPUTE_MEM"                  , 0x63, init_compute_mem                },
++	{ "INIT_RESET"                        , 0x65, init_reset                      },
++	{ "INIT_CONFIGURE_MEM"                , 0x66, init_configure_mem              },
++	{ "INIT_CONFIGURE_CLK"                , 0x67, init_configure_clk              },
++	{ "INIT_CONFIGURE_PREINIT"            , 0x68, init_configure_preinit          },
++	{ "INIT_IO"                           , 0x69, init_io                         },
++	{ "INIT_SUB"                          , 0x6B, init_sub                        },
++	{ "INIT_RAM_CONDITION"                , 0x6D, init_ram_condition              },
++	{ "INIT_NV_REG"                       , 0x6E, init_nv_reg                     },
++	{ "INIT_MACRO"                        , 0x6F, init_macro                      },
++	{ "INIT_DONE"                         , 0x71, init_done                       },
++	{ "INIT_RESUME"                       , 0x72, init_resume                     },
++	/* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
++	{ "INIT_TIME"                         , 0x74, init_time                       },
++	{ "INIT_CONDITION"                    , 0x75, init_condition                  },
++	{ "INIT_IO_CONDITION"                 , 0x76, init_io_condition               },
++	{ "INIT_INDEX_IO"                     , 0x78, init_index_io                   },
++	{ "INIT_PLL"                          , 0x79, init_pll                        },
++	{ "INIT_ZM_REG"                       , 0x7A, init_zm_reg                     },
++	{ "INIT_RAM_RESTRICT_PLL"             , 0x87, init_ram_restrict_pll           },
++	{ "INIT_8C"                           , 0x8C, init_8c                         },
++	{ "INIT_8D"                           , 0x8D, init_8d                         },
++	{ "INIT_GPIO"                         , 0x8E, init_gpio                       },
++	{ "INIT_RAM_RESTRICT_ZM_REG_GROUP"    , 0x8F, init_ram_restrict_zm_reg_group  },
++	{ "INIT_COPY_ZM_REG"                  , 0x90, init_copy_zm_reg                },
++	{ "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, init_zm_reg_group_addr_latched  },
++	{ "INIT_RESERVED"                     , 0x92, init_reserved                   },
++	{ "INIT_96"                           , 0x96, init_96                         },
++	{ "INIT_97"                           , 0x97, init_97                         },
++	{ "INIT_AUXCH"                        , 0x98, init_auxch                      },
++	{ "INIT_ZM_AUXCH"                     , 0x99, init_zm_auxch                   },
++	{ NULL                                , 0   , NULL                            }
++};
++
++#define MAX_TABLE_OPS 1000
++
++static int
++parse_init_table(struct nvbios *bios, unsigned int offset,
++		 struct init_exec *iexec)
++{
++	/*
++	 * Parses all commands in an init table.
++	 *
++	 * We start out executing all commands found in the init table. Some
++	 * opcodes may change the status of iexec->execute to SKIP, which will
++	 * cause the following opcodes to perform no operation until the value
++	 * is changed back to EXECUTE.
++	 */
++
++	int count = 0, i, res;
++	uint8_t id;
++
++	/*
++	 * Loop until INIT_DONE causes us to break out of the loop
++	 * (or until offset > bios length just in case... )
++	 * (and no more than MAX_TABLE_OPS iterations, just in case... )
++	 */
++	while ((offset < bios->length) && (count++ < MAX_TABLE_OPS)) {
++		id = bios->data[offset];
++
++		/* Find matching id in itbl_entry */
++		for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
++			;
++
++		if (itbl_entry[i].name) {
++			BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
++				offset, itbl_entry[i].id, itbl_entry[i].name);
++
++			/* execute eventual command handler */
++			res = (*itbl_entry[i].handler)(bios, offset, iexec);
++			if (!res)
++				break;
++			/*
++			 * Add the offset of the current command including all data
++			 * of that command. The offset will then be pointing on the
++			 * next op code.
++			 */
++			offset += res;
++		} else {
++			NV_ERROR(bios->dev,
++				 "0x%04X: Init table command not found: "
++				 "0x%02X\n", offset, id);
++			return -ENOENT;
++		}
++	}
++
++	if (offset >= bios->length)
++		NV_WARN(bios->dev,
++			"Offset 0x%04X greater than known bios image length.  "
++			"Corrupt image?\n", offset);
++	if (count >= MAX_TABLE_OPS)
++		NV_WARN(bios->dev,
++			"More than %d opcodes to a table is unlikely, "
++			"is the bios image corrupt?\n", MAX_TABLE_OPS);
++
++	return 0;
++}
++
++static void
++parse_init_tables(struct nvbios *bios)
++{
++	/* Loops and calls parse_init_table() for each present table. */
++
++	int i = 0;
++	uint16_t table;
++	struct init_exec iexec = {true, false};
++
++	if (bios->old_style_init) {
++		if (bios->init_script_tbls_ptr)
++			parse_init_table(bios, bios->init_script_tbls_ptr, &iexec);
++		if (bios->extra_init_script_tbl_ptr)
++			parse_init_table(bios, bios->extra_init_script_tbl_ptr, &iexec);
++
++		return;
++	}
++
++	while ((table = ROM16(bios->data[bios->init_script_tbls_ptr + i]))) {
++		NV_INFO(bios->dev,
++			"Parsing VBIOS init table %d at offset 0x%04X\n",
++			i / 2, table);
++		BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", table);
++
++		parse_init_table(bios, table, &iexec);
++		i += 2;
++	}
++}
++
++static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
++{
++	int compare_record_len, i = 0;
++	uint16_t compareclk, scriptptr = 0;
++
++	if (bios->major_version < 5) /* pre BIT */
++		compare_record_len = 3;
++	else
++		compare_record_len = 4;
++
++	do {
++		compareclk = ROM16(bios->data[clktable + compare_record_len * i]);
++		if (pxclk >= compareclk * 10) {
++			if (bios->major_version < 5) {
++				uint8_t tmdssub = bios->data[clktable + 2 + compare_record_len * i];
++				scriptptr = ROM16(bios->data[bios->init_script_tbls_ptr + tmdssub * 2]);
++			} else
++				scriptptr = ROM16(bios->data[clktable + 2 + compare_record_len * i]);
++			break;
++		}
++		i++;
++	} while (compareclk);
++
++	return scriptptr;
++}
++
++static void
++run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
++		      struct dcb_entry *dcbent, int head, bool dl)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	struct init_exec iexec = {true, false};
++
++	NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
++		 scriptptr);
++	bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_44,
++		       head ? NV_CIO_CRE_44_HEADB : NV_CIO_CRE_44_HEADA);
++	/* note: if dcb entries have been merged, index may be misleading */
++	NVWriteVgaCrtc5758(dev, head, 0, dcbent->index);
++	parse_init_table(bios, scriptptr, &iexec);
++
++	nv04_dfp_bind_head(dev, dcbent, head, dl);
++}
++
++static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
++	uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
++
++	if (!bios->fp.xlated_entry || !sub || !scriptofs)
++		return -EINVAL;
++
++	run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link);
++
++	if (script == LVDS_PANEL_OFF) {
++		/* off-on delay in ms */
++		msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
++	}
++#ifdef __powerpc__
++	/* Powerbook specific quirks */
++	if ((dev->pci_device & 0xffff) == 0x0179 ||
++	    (dev->pci_device & 0xffff) == 0x0189 ||
++	    (dev->pci_device & 0xffff) == 0x0329) {
++		if (script == LVDS_RESET) {
++			nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
++
++		} else if (script == LVDS_PANEL_ON) {
++			bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
++				  bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
++				  | (1 << 31));
++			bios_wr32(bios, NV_PCRTC_GPIO_EXT,
++				  bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
++
++		} else if (script == LVDS_PANEL_OFF) {
++			bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL,
++				  bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL)
++				  & ~(1 << 31));
++			bios_wr32(bios, NV_PCRTC_GPIO_EXT,
++				  bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
++		}
++	}
++#endif
++
++	return 0;
++}
++
++static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
++{
++	/*
++	 * The BIT LVDS table's header has the information to setup the
++	 * necessary registers. Following the standard 4 byte header are:
++	 * A bitmask byte and a dual-link transition pxclk value for use in
++	 * selecting the init script when not using straps; 4 script pointers
++	 * for panel power, selected by output and on/off; and 8 table pointers
++	 * for panel init, the needed one determined by output, and bits in the
++	 * conf byte. These tables are similar to the TMDS tables, consisting
++	 * of a list of pxclks and script pointers.
++	 */
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
++	uint16_t scriptptr = 0, clktable;
++	uint8_t clktableptr = 0;
++
++	/*
++	 * For now we assume version 3.0 table - g80 support will need some
++	 * changes
++	 */
++
++	switch (script) {
++	case LVDS_INIT:
++		return -ENOSYS;
++	case LVDS_BACKLIGHT_ON:
++	case LVDS_PANEL_ON:
++		scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 7 + outputset * 2]);
++		break;
++	case LVDS_BACKLIGHT_OFF:
++	case LVDS_PANEL_OFF:
++		scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
++		break;
++	case LVDS_RESET:
++		if (dcbent->lvdsconf.use_straps_for_mode) {
++			if (bios->fp.dual_link)
++				clktableptr += 2;
++			if (bios->fp.BITbit1)
++				clktableptr++;
++		} else {
++			/* using EDID */
++			uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
++			int fallbackcmpval = (dcbent->or == 4) ? 4 : 1;
++
++			if (bios->fp.dual_link) {
++				clktableptr += 2;
++				fallbackcmpval *= 2;
++			}
++			if (fallbackcmpval & fallback)
++				clktableptr++;
++		}
++
++		/* adding outputset * 8 may not be correct */
++		clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]);
++		if (!clktable) {
++			NV_ERROR(dev, "Pixel clock comparison table not found\n");
++			return -ENOENT;
++		}
++		scriptptr = clkcmptable(bios, clktable, pxclk);
++	}
++
++	if (!scriptptr) {
++		NV_ERROR(dev, "LVDS output init script not found\n");
++		return -ENOENT;
++	}
++	run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link);
++
++	return 0;
++}
++
++int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
++{
++	/*
++	 * LVDS operations are multiplexed in an effort to present a single API
++	 * which works with two vastly differing underlying structures.
++	 * This acts as the demux
++	 */
++
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
++	uint32_t sel_clk_binding, sel_clk;
++	int ret;
++
++	if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver ||
++	    (lvds_ver >= 0x30 && script == LVDS_INIT))
++		return 0;
++
++	if (!bios->fp.lvds_init_run) {
++		bios->fp.lvds_init_run = true;
++		call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk);
++	}
++
++	if (script == LVDS_PANEL_ON && bios->fp.reset_after_pclk_change)
++		call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk);
++	if (script == LVDS_RESET && bios->fp.power_off_for_reset)
++		call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk);
++
++	NV_TRACE(dev, "Calling LVDS script %d:\n", script);
++
++	/* don't let script change pll->head binding */
++	sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
++
++	if (lvds_ver < 0x30)
++		ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
++	else
++		ret = run_lvds_table(dev, dcbent, head, script, pxclk);
++
++	bios->fp.last_script_invoc = (script << 1 | head);
++
++	sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
++	/* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
++	nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
++
++	return ret;
++}
++
++struct lvdstableheader {
++	uint8_t lvds_ver, headerlen, recordlen;
++};
++
++static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct nvbios *bios, struct lvdstableheader *lth)
++{
++	/*
++	 * BMP version (0xa) LVDS table has a simple header of version and
++	 * record length. The BIT LVDS table has the typical BIT table header:
++	 * version byte, header length byte, record length byte, and a byte for
++	 * the maximum number of records that can be held in the table.
++	 */
++
++	uint8_t lvds_ver, headerlen, recordlen;
++
++	memset(lth, 0, sizeof(struct lvdstableheader));
++
++	if (bios->fp.lvdsmanufacturerpointer == 0x0) {
++		NV_ERROR(dev, "Pointer to LVDS manufacturer table invalid\n");
++		return -EINVAL;
++	}
++
++	lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
++
++	switch (lvds_ver) {
++	case 0x0a:	/* pre NV40 */
++		headerlen = 2;
++		recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
++		break;
++	case 0x30:	/* NV4x */
++		headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
++		if (headerlen < 0x1f) {
++			NV_ERROR(dev, "LVDS table header not understood\n");
++			return -EINVAL;
++		}
++		recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
++		break;
++	case 0x40:	/* G80/G90 */
++		headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
++		if (headerlen < 0x7) {
++			NV_ERROR(dev, "LVDS table header not understood\n");
++			return -EINVAL;
++		}
++		recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
++		break;
++	default:
++		NV_ERROR(dev,
++			 "LVDS table revision %d.%d not currently supported\n",
++			 lvds_ver >> 4, lvds_ver & 0xf);
++		return -ENOSYS;
++	}
++
++	lth->lvds_ver = lvds_ver;
++	lth->headerlen = headerlen;
++	lth->recordlen = recordlen;
++
++	return 0;
++}
++
++static int
++get_fp_strap(struct drm_device *dev, struct nvbios *bios)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	/*
++	 * The fp strap is normally dictated by the "User Strap" in
++	 * PEXTDEV_BOOT_0[20:16], but on BMP cards when bit 2 of the
++	 * Internal_Flags struct at 0x48 is set, the user strap gets overriden
++	 * by the PCI subsystem ID during POST, but not before the previous user
++	 * strap has been committed to CR58 for CR57=0xf on head A, which may be
++	 * read and used instead
++	 */
++
++	if (bios->major_version < 5 && bios->data[0x48] & 0x4)
++		return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
++
++	if (dev_priv->card_type >= NV_50)
++		return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
++	else
++		return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
++}
++
++static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
++{
++	uint8_t *fptable;
++	uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex;
++	int ret, ofs, fpstrapping;
++	struct lvdstableheader lth;
++
++	if (bios->fp.fptablepointer == 0x0) {
++		/* Apple cards don't have the fp table; the laptops use DDC */
++		/* The table is also missing on some x86 IGPs */
++#ifndef __powerpc__
++		NV_ERROR(dev, "Pointer to flat panel table invalid\n");
++#endif
++		bios->pub.digital_min_front_porch = 0x4b;
++		return 0;
++	}
++
++	fptable = &bios->data[bios->fp.fptablepointer];
++	fptable_ver = fptable[0];
++
++	switch (fptable_ver) {
++	/*
++	 * BMP version 0x5.0x11 BIOSen have version 1 like tables, but no
++	 * version field, and miss one of the spread spectrum/PWM bytes.
++	 * This could affect early GF2Go parts (not seen any appropriate ROMs
++	 * though). Here we assume that a version of 0x05 matches this case
++	 * (combining with a BMP version check would be better), as the
++	 * common case for the panel type field is 0x0005, and that is in
++	 * fact what we are reading the first byte of.
++	 */
++	case 0x05:	/* some NV10, 11, 15, 16 */
++		recordlen = 42;
++		ofs = -1;
++		break;
++	case 0x10:	/* some NV15/16, and NV11+ */
++		recordlen = 44;
++		ofs = 0;
++		break;
++	case 0x20:	/* NV40+ */
++		headerlen = fptable[1];
++		recordlen = fptable[2];
++		fpentries = fptable[3];
++		/*
++		 * fptable[4] is the minimum
++		 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
++		 */
++		bios->pub.digital_min_front_porch = fptable[4];
++		ofs = -7;
++		break;
++	default:
++		NV_ERROR(dev,
++			 "FP table revision %d.%d not currently supported\n",
++			 fptable_ver >> 4, fptable_ver & 0xf);
++		return -ENOSYS;
++	}
++
++	if (!bios->is_mobile) /* !mobile only needs digital_min_front_porch */
++		return 0;
++
++	ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
++	if (ret)
++		return ret;
++
++	if (lth.lvds_ver == 0x30 || lth.lvds_ver == 0x40) {
++		bios->fp.fpxlatetableptr = bios->fp.lvdsmanufacturerpointer +
++							lth.headerlen + 1;
++		bios->fp.xlatwidth = lth.recordlen;
++	}
++	if (bios->fp.fpxlatetableptr == 0x0) {
++		NV_ERROR(dev, "Pointer to flat panel xlat table invalid\n");
++		return -EINVAL;
++	}
++
++	fpstrapping = get_fp_strap(dev, bios);
++
++	fpindex = bios->data[bios->fp.fpxlatetableptr +
++					fpstrapping * bios->fp.xlatwidth];
++
++	if (fpindex > fpentries) {
++		NV_ERROR(dev, "Bad flat panel table index\n");
++		return -ENOENT;
++	}
++
++	/* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
++	if (lth.lvds_ver > 0x10)
++		bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
++
++	/*
++	 * If either the strap or xlated fpindex value are 0xf there is no
++	 * panel using a strap-derived bios mode present.  this condition
++	 * includes, but is different from, the DDC panel indicator above
++	 */
++	if (fpstrapping == 0xf || fpindex == 0xf)
++		return 0;
++
++	bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen +
++			    recordlen * fpindex + ofs;
++
++	NV_TRACE(dev, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
++		 ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1,
++		 ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1,
++		 ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10);
++
++	return 0;
++}
++
++bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
++
++	if (!mode)	/* just checking whether we can produce a mode */
++		return bios->fp.mode_ptr;
++
++	memset(mode, 0, sizeof(struct drm_display_mode));
++	/*
++	 * For version 1.0 (version in byte 0):
++	 * bytes 1-2 are "panel type", including bits on whether Colour/mono,
++	 * single/dual link, and type (TFT etc.)
++	 * bytes 3-6 are bits per colour in RGBX
++	 */
++	mode->clock = ROM16(mode_entry[7]) * 10;
++	/* bytes 9-10 is HActive */
++	mode->hdisplay = ROM16(mode_entry[11]) + 1;
++	/*
++	 * bytes 13-14 is HValid Start
++	 * bytes 15-16 is HValid End
++	 */
++	mode->hsync_start = ROM16(mode_entry[17]) + 1;
++	mode->hsync_end = ROM16(mode_entry[19]) + 1;
++	mode->htotal = ROM16(mode_entry[21]) + 1;
++	/* bytes 23-24, 27-30 similarly, but vertical */
++	mode->vdisplay = ROM16(mode_entry[25]) + 1;
++	mode->vsync_start = ROM16(mode_entry[31]) + 1;
++	mode->vsync_end = ROM16(mode_entry[33]) + 1;
++	mode->vtotal = ROM16(mode_entry[35]) + 1;
++	mode->flags |= (mode_entry[37] & 0x10) ?
++			DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
++	mode->flags |= (mode_entry[37] & 0x1) ?
++			DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
++	/*
++	 * bytes 38-39 relate to spread spectrum settings
++	 * bytes 40-43 are something to do with PWM
++	 */
++
++	mode->status = MODE_OK;
++	mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
++	drm_mode_set_name(mode);
++	return bios->fp.mode_ptr;
++}
++
++int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, bool *if_is_24bit)
++{
++	/*
++	 * The LVDS table header is (mostly) described in
++	 * parse_lvds_manufacturer_table_header(): the BIT header additionally
++	 * contains the dual-link transition pxclk (in 10s kHz), at byte 5 - if
++	 * straps are not being used for the panel, this specifies the frequency
++	 * at which modes should be set up in the dual link style.
++	 *
++	 * Following the header, the BMP (ver 0xa) table has several records,
++	 * indexed by a seperate xlat table, indexed in turn by the fp strap in
++	 * EXTDEV_BOOT. Each record had a config byte, followed by 6 script
++	 * numbers for use by INIT_SUB which controlled panel init and power,
++	 * and finally a dword of ms to sleep between power off and on
++	 * operations.
++	 *
++	 * In the BIT versions, the table following the header serves as an
++	 * integrated config and xlat table: the records in the table are
++	 * indexed by the FP strap nibble in EXTDEV_BOOT, and each record has
++	 * two bytes - the first as a config byte, the second for indexing the
++	 * fp mode table pointed to by the BIT 'D' table
++	 *
++	 * DDC is not used until after card init, so selecting the correct table
++	 * entry and setting the dual link flag for EDID equipped panels,
++	 * requiring tests against the native-mode pixel clock, cannot be done
++	 * until later, when this function should be called with non-zero pxclk
++	 */
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
++	struct lvdstableheader lth;
++	uint16_t lvdsofs;
++	int ret, chip_version = bios->pub.chip_version;
++
++	ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
++	if (ret)
++		return ret;
++
++	switch (lth.lvds_ver) {
++	case 0x0a:	/* pre NV40 */
++		lvdsmanufacturerindex = bios->data[
++					bios->fp.fpxlatemanufacturertableptr +
++					fpstrapping];
++
++		/* we're done if this isn't the EDID panel case */
++		if (!pxclk)
++			break;
++
++		if (chip_version < 0x25) {
++			/* nv17 behaviour
++			 *
++			 * It seems the old style lvds script pointer is reused
++			 * to select 18/24 bit colour depth for EDID panels.
++			 */
++			lvdsmanufacturerindex =
++				(bios->legacy.lvds_single_a_script_ptr & 1) ?
++									2 : 0;
++			if (pxclk >= bios->fp.duallink_transition_clk)
++				lvdsmanufacturerindex++;
++		} else if (chip_version < 0x30) {
++			/* nv28 behaviour (off-chip encoder)
++			 *
++			 * nv28 does a complex dance of first using byte 121 of
++			 * the EDID to choose the lvdsmanufacturerindex, then
++			 * later attempting to match the EDID manufacturer and
++			 * product IDs in a table (signature 'pidt' (panel id
++			 * table?)), setting an lvdsmanufacturerindex of 0 and
++			 * an fp strap of the match index (or 0xf if none)
++			 */
++			lvdsmanufacturerindex = 0;
++		} else {
++			/* nv31, nv34 behaviour */
++			lvdsmanufacturerindex = 0;
++			if (pxclk >= bios->fp.duallink_transition_clk)
++				lvdsmanufacturerindex = 2;
++			if (pxclk >= 140000)
++				lvdsmanufacturerindex = 3;
++		}
++
++		/*
++		 * nvidia set the high nibble of (cr57=f, cr58) to
++		 * lvdsmanufacturerindex in this case; we don't
++		 */
++		break;
++	case 0x30:	/* NV4x */
++	case 0x40:	/* G80/G90 */
++		lvdsmanufacturerindex = fpstrapping;
++		break;
++	default:
++		NV_ERROR(dev, "LVDS table revision not currently supported\n");
++		return -ENOSYS;
++	}
++
++	lvdsofs = bios->fp.xlated_entry = bios->fp.lvdsmanufacturerpointer + lth.headerlen + lth.recordlen * lvdsmanufacturerindex;
++	switch (lth.lvds_ver) {
++	case 0x0a:
++		bios->fp.power_off_for_reset = bios->data[lvdsofs] & 1;
++		bios->fp.reset_after_pclk_change = bios->data[lvdsofs] & 2;
++		bios->fp.dual_link = bios->data[lvdsofs] & 4;
++		bios->fp.link_c_increment = bios->data[lvdsofs] & 8;
++		*if_is_24bit = bios->data[lvdsofs] & 16;
++		break;
++	case 0x30:
++		/*
++		 * My money would be on there being a 24 bit interface bit in
++		 * this table, but I have no example of a laptop bios with a
++		 * 24 bit panel to confirm that. Hence we shout loudly if any
++		 * bit other than bit 0 is set (I've not even seen bit 1)
++		 */
++		if (bios->data[lvdsofs] > 1)
++			NV_ERROR(dev,
++				 "You have a very unusual laptop display; please report it\n");
++		/*
++		 * No sign of the "power off for reset" or "reset for panel
++		 * on" bits, but it's safer to assume we should
++		 */
++		bios->fp.power_off_for_reset = true;
++		bios->fp.reset_after_pclk_change = true;
++		/*
++		 * It's ok lvdsofs is wrong for nv4x edid case; dual_link is
++		 * over-written, and BITbit1 isn't used
++		 */
++		bios->fp.dual_link = bios->data[lvdsofs] & 1;
++		bios->fp.BITbit1 = bios->data[lvdsofs] & 2;
++		bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
++		break;
++	case 0x40:
++		bios->fp.dual_link = bios->data[lvdsofs] & 1;
++		bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
++		bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
++		bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
++		break;
++	}
++
++	/* set dual_link flag for EDID case */
++	if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
++		bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
++
++	*dl = bios->fp.dual_link;
++
++	return 0;
++}
++
++static uint8_t *
++bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
++			 uint16_t record, int record_len, int record_nr)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	uint32_t entry;
++	uint16_t table;
++	int i, v;
++
++	for (i = 0; i < record_nr; i++, record += record_len) {
++		table = ROM16(bios->data[record]);
++		if (!table)
++			continue;
++		entry = ROM32(bios->data[table]);
++
++		v = (entry & 0x000f0000) >> 16;
++		if (!(v & dcbent->or))
++			continue;
++
++		v = (entry & 0x000000f0) >> 4;
++		if (v != dcbent->location)
++			continue;
++
++		v = (entry & 0x0000000f);
++		if (v != dcbent->type)
++			continue;
++
++		return &bios->data[table];
++	}
++
++	return NULL;
++}
++
++void *
++nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
++		      int *length)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	uint8_t *table;
++
++	if (!bios->display.dp_table_ptr) {
++		NV_ERROR(dev, "No pointer to DisplayPort table\n");
++		return NULL;
++	}
++	table = &bios->data[bios->display.dp_table_ptr];
++
++	if (table[0] != 0x21) {
++		NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
++			 table[0]);
++		return NULL;
++	}
++
++	*length = table[4];
++	return bios_output_config_match(dev, dcbent,
++					bios->display.dp_table_ptr + table[1],
++					table[2], table[3]);
++}
++
++int
++nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
++			       uint32_t sub, int pxclk)
++{
++	/*
++	 * The display script table is located by the BIT 'U' table.
++	 *
++	 * It contains an array of pointers to various tables describing
++	 * a particular output type.  The first 32-bits of the output
++	 * tables contains similar information to a DCB entry, and is
++	 * used to decide whether that particular table is suitable for
++	 * the output you want to access.
++	 *
++	 * The "record header length" field here seems to indicate the
++	 * offset of the first configuration entry in the output tables.
++	 * This is 10 on most cards I've seen, but 12 has been witnessed
++	 * on DP cards, and there's another script pointer within the
++	 * header.
++	 *
++	 * offset + 0   ( 8 bits): version
++	 * offset + 1   ( 8 bits): header length
++	 * offset + 2   ( 8 bits): record length
++	 * offset + 3   ( 8 bits): number of records
++	 * offset + 4   ( 8 bits): record header length
++	 * offset + 5   (16 bits): pointer to first output script table
++	 */
++
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	uint8_t *table = &bios->data[bios->display.script_table_ptr];
++	uint8_t *otable = NULL;
++	uint16_t script;
++	int i = 0;
++
++	if (!bios->display.script_table_ptr) {
++		NV_ERROR(dev, "No pointer to output script table\n");
++		return 1;
++	}
++
++	/*
++	 * Nothing useful has been in any of the pre-2.0 tables I've seen,
++	 * so until they are, we really don't need to care.
++	 */
++	if (table[0] < 0x20)
++		return 1;
++
++	if (table[0] != 0x20 && table[0] != 0x21) {
++		NV_ERROR(dev, "Output script table version 0x%02x unknown\n",
++			 table[0]);
++		return 1;
++	}
++
++	/*
++	 * The output script tables describing a particular output type
++	 * look as follows:
++	 *
++	 * offset + 0   (32 bits): output this table matches (hash of DCB)
++	 * offset + 4   ( 8 bits): unknown
++	 * offset + 5   ( 8 bits): number of configurations
++	 * offset + 6   (16 bits): pointer to some script
++	 * offset + 8   (16 bits): pointer to some script
++	 *
++	 * headerlen == 10
++	 * offset + 10           : configuration 0
++	 *
++	 * headerlen == 12
++	 * offset + 10           : pointer to some script
++	 * offset + 12           : configuration 0
++	 *
++	 * Each config entry is as follows:
++	 *
++	 * offset + 0   (16 bits): unknown, assumed to be a match value
++	 * offset + 2   (16 bits): pointer to script table (clock set?)
++	 * offset + 4   (16 bits): pointer to script table (reset?)
++	 *
++	 * There doesn't appear to be a count value to say how many
++	 * entries exist in each script table, instead, a 0 value in
++	 * the first 16-bit word seems to indicate both the end of the
++	 * list and the default entry.  The second 16-bit word in the
++	 * script tables is a pointer to the script to execute.
++	 */
++
++	NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
++			dcbent->type, dcbent->location, dcbent->or);
++	otable = bios_output_config_match(dev, dcbent, table[1] +
++					  bios->display.script_table_ptr,
++					  table[2], table[3]);
++	if (!otable) {
++		NV_ERROR(dev, "Couldn't find matching output script table\n");
++		return 1;
++	}
++
++	if (pxclk < -2 || pxclk > 0) {
++		/* Try to find matching script table entry */
++		for (i = 0; i < otable[5]; i++) {
++			if (ROM16(otable[table[4] + i*6]) == sub)
++				break;
++		}
++
++		if (i == otable[5]) {
++			NV_ERROR(dev, "Table 0x%04x not found for %d/%d, "
++				      "using first\n",
++				 sub, dcbent->type, dcbent->or);
++			i = 0;
++		}
++	}
++
++	if (pxclk == 0) {
++		script = ROM16(otable[6]);
++		if (!script) {
++			NV_DEBUG_KMS(dev, "output script 0 not found\n");
++			return 1;
++		}
++
++		NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
++		nouveau_bios_run_init_table(dev, script, dcbent);
++	} else
++	if (pxclk == -1) {
++		script = ROM16(otable[8]);
++		if (!script) {
++			NV_DEBUG_KMS(dev, "output script 1 not found\n");
++			return 1;
++		}
++
++		NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
++		nouveau_bios_run_init_table(dev, script, dcbent);
++	} else
++	if (pxclk == -2) {
++		if (table[4] >= 12)
++			script = ROM16(otable[10]);
++		else
++			script = 0;
++		if (!script) {
++			NV_DEBUG_KMS(dev, "output script 2 not found\n");
++			return 1;
++		}
++
++		NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
++		nouveau_bios_run_init_table(dev, script, dcbent);
++	} else
++	if (pxclk > 0) {
++		script = ROM16(otable[table[4] + i*6 + 2]);
++		if (script)
++			script = clkcmptable(bios, script, pxclk);
++		if (!script) {
++			NV_ERROR(dev, "clock script 0 not found\n");
++			return 1;
++		}
++
++		NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
++		nouveau_bios_run_init_table(dev, script, dcbent);
++	} else
++	if (pxclk < 0) {
++		script = ROM16(otable[table[4] + i*6 + 4]);
++		if (script)
++			script = clkcmptable(bios, script, -pxclk);
++		if (!script) {
++			NV_DEBUG_KMS(dev, "clock script 1 not found\n");
++			return 1;
++		}
++
++		NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
++		nouveau_bios_run_init_table(dev, script, dcbent);
++	}
++
++	return 0;
++}
++
++
++int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, int pxclk)
++{
++	/*
++	 * the pxclk parameter is in kHz
++	 *
++	 * This runs the TMDS regs setting code found on BIT bios cards
++	 *
++	 * For ffs(or) == 1 use the first table, for ffs(or) == 2 and
++	 * ffs(or) == 3, use the second.
++	 */
++
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	int cv = bios->pub.chip_version;
++	uint16_t clktable = 0, scriptptr;
++	uint32_t sel_clk_binding, sel_clk;
++
++	/* pre-nv17 off-chip tmds uses scripts, post nv17 doesn't */
++	if (cv >= 0x17 && cv != 0x1a && cv != 0x20 &&
++	    dcbent->location != DCB_LOC_ON_CHIP)
++		return 0;
++
++	switch (ffs(dcbent->or)) {
++	case 1:
++		clktable = bios->tmds.output0_script_ptr;
++		break;
++	case 2:
++	case 3:
++		clktable = bios->tmds.output1_script_ptr;
++		break;
++	}
++
++	if (!clktable) {
++		NV_ERROR(dev, "Pixel clock comparison table not found\n");
++		return -EINVAL;
++	}
++
++	scriptptr = clkcmptable(bios, clktable, pxclk);
++
++	if (!scriptptr) {
++		NV_ERROR(dev, "TMDS output init script not found\n");
++		return -ENOENT;
++	}
++
++	/* don't let script change pll->head binding */
++	sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
++	run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
++	sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
++
++	return 0;
++}
++
++int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
++{
++	/*
++	 * PLL limits table
++	 *
++	 * Version 0x10: NV30, NV31
++	 * One byte header (version), one record of 24 bytes
++	 * Version 0x11: NV36 - Not implemented
++	 * Seems to have same record style as 0x10, but 3 records rather than 1
++	 * Version 0x20: Found on Geforce 6 cards
++	 * Trivial 4 byte BIT header. 31 (0x1f) byte record length
++	 * Version 0x21: Found on Geforce 7, 8 and some Geforce 6 cards
++	 * 5 byte header, fifth byte of unknown purpose. 35 (0x23) byte record
++	 * length in general, some (integrated) have an extra configuration byte
++	 * Version 0x30: Found on Geforce 8, separates the register mapping
++	 * from the limits tables.
++	 */
++
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	int cv = bios->pub.chip_version, pllindex = 0;
++	uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
++	uint32_t crystal_strap_mask, crystal_straps;
++
++	if (!bios->pll_limit_tbl_ptr) {
++		if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
++		    cv >= 0x40) {
++			NV_ERROR(dev, "Pointer to PLL limits table invalid\n");
++			return -EINVAL;
++		}
++	} else
++		pll_lim_ver = bios->data[bios->pll_limit_tbl_ptr];
++
++	crystal_strap_mask = 1 << 6;
++	/* open coded dev->twoHeads test */
++	if (cv > 0x10 && cv != 0x15 && cv != 0x1a && cv != 0x20)
++		crystal_strap_mask |= 1 << 22;
++	crystal_straps = nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) &
++							crystal_strap_mask;
++
++	switch (pll_lim_ver) {
++	/*
++	 * We use version 0 to indicate a pre limit table bios (single stage
++	 * pll) and load the hard coded limits instead.
++	 */
++	case 0:
++		break;
++	case 0x10:
++	case 0x11:
++		/*
++		 * Strictly v0x11 has 3 entries, but the last two don't seem
++		 * to get used.
++		 */
++		headerlen = 1;
++		recordlen = 0x18;
++		entries = 1;
++		pllindex = 0;
++		break;
++	case 0x20:
++	case 0x21:
++	case 0x30:
++	case 0x40:
++		headerlen = bios->data[bios->pll_limit_tbl_ptr + 1];
++		recordlen = bios->data[bios->pll_limit_tbl_ptr + 2];
++		entries = bios->data[bios->pll_limit_tbl_ptr + 3];
++		break;
++	default:
++		NV_ERROR(dev, "PLL limits table revision 0x%X not currently "
++				"supported\n", pll_lim_ver);
++		return -ENOSYS;
++	}
++
++	/* initialize all members to zero */
++	memset(pll_lim, 0, sizeof(struct pll_lims));
++
++	if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
++		uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
++
++		pll_lim->vco1.minfreq = ROM32(pll_rec[0]);
++		pll_lim->vco1.maxfreq = ROM32(pll_rec[4]);
++		pll_lim->vco2.minfreq = ROM32(pll_rec[8]);
++		pll_lim->vco2.maxfreq = ROM32(pll_rec[12]);
++		pll_lim->vco1.min_inputfreq = ROM32(pll_rec[16]);
++		pll_lim->vco2.min_inputfreq = ROM32(pll_rec[20]);
++		pll_lim->vco1.max_inputfreq = pll_lim->vco2.max_inputfreq = INT_MAX;
++
++		/* these values taken from nv30/31/36 */
++		pll_lim->vco1.min_n = 0x1;
++		if (cv == 0x36)
++			pll_lim->vco1.min_n = 0x5;
++		pll_lim->vco1.max_n = 0xff;
++		pll_lim->vco1.min_m = 0x1;
++		pll_lim->vco1.max_m = 0xd;
++		pll_lim->vco2.min_n = 0x4;
++		/*
++		 * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
++		 * table version (apart from nv35)), N2 is compared to
++		 * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
++		 * save a comparison
++		 */
++		pll_lim->vco2.max_n = 0x28;
++		if (cv == 0x30 || cv == 0x35)
++			/* only 5 bits available for N2 on nv30/35 */
++			pll_lim->vco2.max_n = 0x1f;
++		pll_lim->vco2.min_m = 0x1;
++		pll_lim->vco2.max_m = 0x4;
++		pll_lim->max_log2p = 0x7;
++		pll_lim->max_usable_log2p = 0x6;
++	} else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
++		uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
++		uint32_t reg = 0; /* default match */
++		uint8_t *pll_rec;
++		int i;
++
++		/*
++		 * First entry is default match, if nothing better. warn if
++		 * reg field nonzero
++		 */
++		if (ROM32(bios->data[plloffs]))
++			NV_WARN(dev, "Default PLL limit entry has non-zero "
++				       "register field\n");
++
++		if (limit_match > MAX_PLL_TYPES)
++			/* we've been passed a reg as the match */
++			reg = limit_match;
++		else /* limit match is a pll type */
++			for (i = 1; i < entries && !reg; i++) {
++				uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
++
++				if (limit_match == NVPLL &&
++				    (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
++					reg = cmpreg;
++				if (limit_match == MPLL &&
++				    (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
++					reg = cmpreg;
++				if (limit_match == VPLL1 &&
++				    (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
++					reg = cmpreg;
++				if (limit_match == VPLL2 &&
++				    (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
++					reg = cmpreg;
++			}
++
++		for (i = 1; i < entries; i++)
++			if (ROM32(bios->data[plloffs + recordlen * i]) == reg) {
++				pllindex = i;
++				break;
++			}
++
++		pll_rec = &bios->data[plloffs + recordlen * pllindex];
++
++		BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
++			pllindex ? reg : 0);
++
++		/*
++		 * Frequencies are stored in tables in MHz, kHz are more
++		 * useful, so we convert.
++		 */
++
++		/* What output frequencies can each VCO generate? */
++		pll_lim->vco1.minfreq = ROM16(pll_rec[4]) * 1000;
++		pll_lim->vco1.maxfreq = ROM16(pll_rec[6]) * 1000;
++		pll_lim->vco2.minfreq = ROM16(pll_rec[8]) * 1000;
++		pll_lim->vco2.maxfreq = ROM16(pll_rec[10]) * 1000;
++
++		/* What input frequencies they accept (past the m-divider)? */
++		pll_lim->vco1.min_inputfreq = ROM16(pll_rec[12]) * 1000;
++		pll_lim->vco2.min_inputfreq = ROM16(pll_rec[14]) * 1000;
++		pll_lim->vco1.max_inputfreq = ROM16(pll_rec[16]) * 1000;
++		pll_lim->vco2.max_inputfreq = ROM16(pll_rec[18]) * 1000;
++
++		/* What values are accepted as multiplier and divider? */
++		pll_lim->vco1.min_n = pll_rec[20];
++		pll_lim->vco1.max_n = pll_rec[21];
++		pll_lim->vco1.min_m = pll_rec[22];
++		pll_lim->vco1.max_m = pll_rec[23];
++		pll_lim->vco2.min_n = pll_rec[24];
++		pll_lim->vco2.max_n = pll_rec[25];
++		pll_lim->vco2.min_m = pll_rec[26];
++		pll_lim->vco2.max_m = pll_rec[27];
++
++		pll_lim->max_usable_log2p = pll_lim->max_log2p = pll_rec[29];
++		if (pll_lim->max_log2p > 0x7)
++			/* pll decoding in nv_hw.c assumes never > 7 */
++			NV_WARN(dev, "Max log2 P value greater than 7 (%d)\n",
++				pll_lim->max_log2p);
++		if (cv < 0x60)
++			pll_lim->max_usable_log2p = 0x6;
++		pll_lim->log2p_bias = pll_rec[30];
++
++		if (recordlen > 0x22)
++			pll_lim->refclk = ROM32(pll_rec[31]);
++
++		if (recordlen > 0x23 && pll_rec[35])
++			NV_WARN(dev,
++				"Bits set in PLL configuration byte (%x)\n",
++				pll_rec[35]);
++
++		/* C51 special not seen elsewhere */
++		if (cv == 0x51 && !pll_lim->refclk) {
++			uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
++
++			if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) ||
++			    ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
++				if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
++					pll_lim->refclk = 200000;
++				else
++					pll_lim->refclk = 25000;
++			}
++		}
++	} else if (pll_lim_ver == 0x30) { /* ver 0x30 */
++		uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
++		uint8_t *record = NULL;
++		int i;
++
++		BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
++			limit_match);
++
++		for (i = 0; i < entries; i++, entry += recordlen) {
++			if (ROM32(entry[3]) == limit_match) {
++				record = &bios->data[ROM16(entry[1])];
++				break;
++			}
++		}
++
++		if (!record) {
++			NV_ERROR(dev, "Register 0x%08x not found in PLL "
++				 "limits table", limit_match);
++			return -ENOENT;
++		}
++
++		pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
++		pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
++		pll_lim->vco2.minfreq = ROM16(record[4]) * 1000;
++		pll_lim->vco2.maxfreq = ROM16(record[6]) * 1000;
++		pll_lim->vco1.min_inputfreq = ROM16(record[8]) * 1000;
++		pll_lim->vco2.min_inputfreq = ROM16(record[10]) * 1000;
++		pll_lim->vco1.max_inputfreq = ROM16(record[12]) * 1000;
++		pll_lim->vco2.max_inputfreq = ROM16(record[14]) * 1000;
++		pll_lim->vco1.min_n = record[16];
++		pll_lim->vco1.max_n = record[17];
++		pll_lim->vco1.min_m = record[18];
++		pll_lim->vco1.max_m = record[19];
++		pll_lim->vco2.min_n = record[20];
++		pll_lim->vco2.max_n = record[21];
++		pll_lim->vco2.min_m = record[22];
++		pll_lim->vco2.max_m = record[23];
++		pll_lim->max_usable_log2p = pll_lim->max_log2p = record[25];
++		pll_lim->log2p_bias = record[27];
++		pll_lim->refclk = ROM32(record[28]);
++	} else if (pll_lim_ver) { /* ver 0x40 */
++		uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
++		uint8_t *record = NULL;
++		int i;
++
++		BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
++			limit_match);
++
++		for (i = 0; i < entries; i++, entry += recordlen) {
++			if (ROM32(entry[3]) == limit_match) {
++				record = &bios->data[ROM16(entry[1])];
++				break;
++			}
++		}
++
++		if (!record) {
++			NV_ERROR(dev, "Register 0x%08x not found in PLL "
++				 "limits table", limit_match);
++			return -ENOENT;
++		}
++
++		pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
++		pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
++		pll_lim->vco1.min_inputfreq = ROM16(record[4]) * 1000;
++		pll_lim->vco1.max_inputfreq = ROM16(record[6]) * 1000;
++		pll_lim->vco1.min_m = record[8];
++		pll_lim->vco1.max_m = record[9];
++		pll_lim->vco1.min_n = record[10];
++		pll_lim->vco1.max_n = record[11];
++		pll_lim->min_p = record[12];
++		pll_lim->max_p = record[13];
++		/* where did this go to?? */
++		if (limit_match == 0x00614100 || limit_match == 0x00614900)
++			pll_lim->refclk = 27000;
++		else
++			pll_lim->refclk = 100000;
++	}
++
++	/*
++	 * By now any valid limit table ought to have set a max frequency for
++	 * vco1, so if it's zero it's either a pre limit table bios, or one
++	 * with an empty limit table (seen on nv18)
++	 */
++	if (!pll_lim->vco1.maxfreq) {
++		pll_lim->vco1.minfreq = bios->fminvco;
++		pll_lim->vco1.maxfreq = bios->fmaxvco;
++		pll_lim->vco1.min_inputfreq = 0;
++		pll_lim->vco1.max_inputfreq = INT_MAX;
++		pll_lim->vco1.min_n = 0x1;
++		pll_lim->vco1.max_n = 0xff;
++		pll_lim->vco1.min_m = 0x1;
++		if (crystal_straps == 0) {
++			/* nv05 does this, nv11 doesn't, nv10 unknown */
++			if (cv < 0x11)
++				pll_lim->vco1.min_m = 0x7;
++			pll_lim->vco1.max_m = 0xd;
++		} else {
++			if (cv < 0x11)
++				pll_lim->vco1.min_m = 0x8;
++			pll_lim->vco1.max_m = 0xe;
++		}
++		if (cv < 0x17 || cv == 0x1a || cv == 0x20)
++			pll_lim->max_log2p = 4;
++		else
++			pll_lim->max_log2p = 5;
++		pll_lim->max_usable_log2p = pll_lim->max_log2p;
++	}
++
++	if (!pll_lim->refclk)
++		switch (crystal_straps) {
++		case 0:
++			pll_lim->refclk = 13500;
++			break;
++		case (1 << 6):
++			pll_lim->refclk = 14318;
++			break;
++		case (1 << 22):
++			pll_lim->refclk = 27000;
++			break;
++		case (1 << 22 | 1 << 6):
++			pll_lim->refclk = 25000;
++			break;
++		}
++
++#if 0 /* for easy debugging */
++	ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
++	ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
++	ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
++	ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
++
++	ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
++	ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
++	ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
++	ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
++
++	ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
++	ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
++	ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
++	ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
++	ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
++	ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
++	ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
++	ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
++
++	ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
++	ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
++
++	ErrorF("pll.refclk: %d\n", pll_lim->refclk);
++#endif
++
++	return 0;
++}
++
++static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
++{
++	/*
++	 * offset + 0  (8 bits): Micro version
++	 * offset + 1  (8 bits): Minor version
++	 * offset + 2  (8 bits): Chip version
++	 * offset + 3  (8 bits): Major version
++	 */
++
++	bios->major_version = bios->data[offset + 3];
++	bios->pub.chip_version = bios->data[offset + 2];
++	NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
++		 bios->data[offset + 3], bios->data[offset + 2],
++		 bios->data[offset + 1], bios->data[offset]);
++}
++
++static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
++{
++	/*
++	 * Parses the init table segment for pointers used in script execution.
++	 *
++	 * offset + 0  (16 bits): init script tables pointer
++	 * offset + 2  (16 bits): macro index table pointer
++	 * offset + 4  (16 bits): macro table pointer
++	 * offset + 6  (16 bits): condition table pointer
++	 * offset + 8  (16 bits): io condition table pointer
++	 * offset + 10 (16 bits): io flag condition table pointer
++	 * offset + 12 (16 bits): init function table pointer
++	 */
++
++	bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
++	bios->macro_index_tbl_ptr = ROM16(bios->data[offset + 2]);
++	bios->macro_tbl_ptr = ROM16(bios->data[offset + 4]);
++	bios->condition_tbl_ptr = ROM16(bios->data[offset + 6]);
++	bios->io_condition_tbl_ptr = ROM16(bios->data[offset + 8]);
++	bios->io_flag_condition_tbl_ptr = ROM16(bios->data[offset + 10]);
++	bios->init_function_tbl_ptr = ROM16(bios->data[offset + 12]);
++}
++
++static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
++{
++	/*
++	 * Parses the load detect values for g80 cards.
++	 *
++	 * offset + 0 (16 bits): loadval table pointer
++	 */
++
++	uint16_t load_table_ptr;
++	uint8_t version, headerlen, entrylen, num_entries;
++
++	if (bitentry->length != 3) {
++		NV_ERROR(dev, "Do not understand BIT A table\n");
++		return -EINVAL;
++	}
++
++	load_table_ptr = ROM16(bios->data[bitentry->offset]);
++
++	if (load_table_ptr == 0x0) {
++		NV_ERROR(dev, "Pointer to BIT loadval table invalid\n");
++		return -EINVAL;
++	}
++
++	version = bios->data[load_table_ptr];
++
++	if (version != 0x10) {
++		NV_ERROR(dev, "BIT loadval table version %d.%d not supported\n",
++			 version >> 4, version & 0xF);
++		return -ENOSYS;
++	}
++
++	headerlen = bios->data[load_table_ptr + 1];
++	entrylen = bios->data[load_table_ptr + 2];
++	num_entries = bios->data[load_table_ptr + 3];
++
++	if (headerlen != 4 || entrylen != 4 || num_entries != 2) {
++		NV_ERROR(dev, "Do not understand BIT loadval table\n");
++		return -EINVAL;
++	}
++
++	/* First entry is normal dac, 2nd tv-out perhaps? */
++	bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
++
++	return 0;
++}
++
++static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
++{
++	/*
++	 * offset + 8  (16 bits): PLL limits table pointer
++	 *
++	 * There's more in here, but that's unknown.
++	 */
++
++	if (bitentry->length < 10) {
++		NV_ERROR(dev, "Do not understand BIT C table\n");
++		return -EINVAL;
++	}
++
++	bios->pll_limit_tbl_ptr = ROM16(bios->data[bitentry->offset + 8]);
++
++	return 0;
++}
++
++static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
++{
++	/*
++	 * Parses the flat panel table segment that the bit entry points to.
++	 * Starting at bitentry->offset:
++	 *
++	 * offset + 0  (16 bits): ??? table pointer - seems to have 18 byte
++	 * records beginning with a freq.
++	 * offset + 2  (16 bits): mode table pointer
++	 */
++
++	if (bitentry->length != 4) {
++		NV_ERROR(dev, "Do not understand BIT display table\n");
++		return -EINVAL;
++	}
++
++	bios->fp.fptablepointer = ROM16(bios->data[bitentry->offset + 2]);
++
++	return 0;
++}
++
++static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
++{
++	/*
++	 * Parses the init table segment that the bit entry points to.
++	 *
++	 * See parse_script_table_pointers for layout
++	 */
++
++	if (bitentry->length < 14) {
++		NV_ERROR(dev, "Do not understand init table\n");
++		return -EINVAL;
++	}
++
++	parse_script_table_pointers(bios, bitentry->offset);
++
++	if (bitentry->length >= 16)
++		bios->some_script_ptr = ROM16(bios->data[bitentry->offset + 14]);
++	if (bitentry->length >= 18)
++		bios->init96_tbl_ptr = ROM16(bios->data[bitentry->offset + 16]);
++
++	return 0;
++}
++
++static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
++{
++	/*
++	 * BIT 'i' (info?) table
++	 *
++	 * offset + 0  (32 bits): BIOS version dword (as in B table)
++	 * offset + 5  (8  bits): BIOS feature byte (same as for BMP?)
++	 * offset + 13 (16 bits): pointer to table containing DAC load
++	 * detection comparison values
++	 *
++	 * There's other things in the table, purpose unknown
++	 */
++
++	uint16_t daccmpoffset;
++	uint8_t dacver, dacheaderlen;
++
++	if (bitentry->length < 6) {
++		NV_ERROR(dev, "BIT i table too short for needed information\n");
++		return -EINVAL;
++	}
++
++	parse_bios_version(dev, bios, bitentry->offset);
++
++	/*
++	 * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
++	 * Quadro identity crisis), other bits possibly as for BMP feature byte
++	 */
++	bios->feature_byte = bios->data[bitentry->offset + 5];
++	bios->is_mobile = bios->feature_byte & FEATURE_MOBILE;
++
++	if (bitentry->length < 15) {
++		NV_WARN(dev, "BIT i table not long enough for DAC load "
++			       "detection comparison table\n");
++		return -EINVAL;
++	}
++
++	daccmpoffset = ROM16(bios->data[bitentry->offset + 13]);
++
++	/* doesn't exist on g80 */
++	if (!daccmpoffset)
++		return 0;
++
++	/*
++	 * The first value in the table, following the header, is the
++	 * comparison value, the second entry is a comparison value for
++	 * TV load detection.
++	 */
++
++	dacver = bios->data[daccmpoffset];
++	dacheaderlen = bios->data[daccmpoffset + 1];
++
++	if (dacver != 0x00 && dacver != 0x10) {
++		NV_WARN(dev, "DAC load detection comparison table version "
++			       "%d.%d not known\n", dacver >> 4, dacver & 0xf);
++		return -ENOSYS;
++	}
++
++	bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
++	bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
++
++	return 0;
++}
++
++static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
++{
++	/*
++	 * Parses the LVDS table segment that the bit entry points to.
++	 * Starting at bitentry->offset:
++	 *
++	 * offset + 0  (16 bits): LVDS strap xlate table pointer
++	 */
++
++	if (bitentry->length != 2) {
++		NV_ERROR(dev, "Do not understand BIT LVDS table\n");
++		return -EINVAL;
++	}
++
++	/*
++	 * No idea if it's still called the LVDS manufacturer table, but
++	 * the concept's close enough.
++	 */
++	bios->fp.lvdsmanufacturerpointer = ROM16(bios->data[bitentry->offset]);
++
++	return 0;
++}
++
++static int
++parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
++		      struct bit_entry *bitentry)
++{
++	/*
++	 * offset + 2  (8  bits): number of options in an
++	 * 	INIT_RAM_RESTRICT_ZM_REG_GROUP opcode option set
++	 * offset + 3  (16 bits): pointer to strap xlate table for RAM
++	 * 	restrict option selection
++	 *
++	 * There's a bunch of bits in this table other than the RAM restrict
++	 * stuff that we don't use - their use currently unknown
++	 */
++
++	/*
++	 * Older bios versions don't have a sufficiently long table for
++	 * what we want
++	 */
++	if (bitentry->length < 0x5)
++		return 0;
++
++	if (bitentry->id[1] < 2) {
++		bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
++		bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
++	} else {
++		bios->ram_restrict_group_count = bios->data[bitentry->offset + 0];
++		bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 1]);
++	}
++
++	return 0;
++}
++
++static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
++{
++	/*
++	 * Parses the pointer to the TMDS table
++	 *
++	 * Starting at bitentry->offset:
++	 *
++	 * offset + 0  (16 bits): TMDS table pointer
++	 *
++	 * The TMDS table is typically found just before the DCB table, with a
++	 * characteristic signature of 0x11,0x13 (1.1 being version, 0x13 being
++	 * length?)
++	 *
++	 * At offset +7 is a pointer to a script, which I don't know how to
++	 * run yet.
++	 * At offset +9 is a pointer to another script, likewise
++	 * Offset +11 has a pointer to a table where the first word is a pxclk
++	 * frequency and the second word a pointer to a script, which should be
++	 * run if the comparison pxclk frequency is less than the pxclk desired.
++	 * This repeats for decreasing comparison frequencies
++	 * Offset +13 has a pointer to a similar table
++	 * The selection of table (and possibly +7/+9 script) is dictated by
++	 * "or" from the DCB.
++	 */
++
++	uint16_t tmdstableptr, script1, script2;
++
++	if (bitentry->length != 2) {
++		NV_ERROR(dev, "Do not understand BIT TMDS table\n");
++		return -EINVAL;
++	}
++
++	tmdstableptr = ROM16(bios->data[bitentry->offset]);
++
++	if (tmdstableptr == 0x0) {
++		NV_ERROR(dev, "Pointer to TMDS table invalid\n");
++		return -EINVAL;
++	}
++
++	/* nv50+ has v2.0, but we don't parse it atm */
++	if (bios->data[tmdstableptr] != 0x11) {
++		NV_WARN(dev,
++			"TMDS table revision %d.%d not currently supported\n",
++			bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
++		return -ENOSYS;
++	}
++
++	/*
++	 * These two scripts are odd: they don't seem to get run even when
++	 * they are not stubbed.
++	 */
++	script1 = ROM16(bios->data[tmdstableptr + 7]);
++	script2 = ROM16(bios->data[tmdstableptr + 9]);
++	if (bios->data[script1] != 'q' || bios->data[script2] != 'q')
++		NV_WARN(dev, "TMDS table script pointers not stubbed\n");
++
++	bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]);
++	bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]);
++
++	return 0;
++}
++
++static int
++parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
++		      struct bit_entry *bitentry)
++{
++	/*
++	 * Parses the pointer to the G80 output script tables
++	 *
++	 * Starting at bitentry->offset:
++	 *
++	 * offset + 0  (16 bits): output script table pointer
++	 */
++
++	uint16_t outputscripttableptr;
++
++	if (bitentry->length != 3) {
++		NV_ERROR(dev, "Do not understand BIT U table\n");
++		return -EINVAL;
++	}
++
++	outputscripttableptr = ROM16(bios->data[bitentry->offset]);
++	bios->display.script_table_ptr = outputscripttableptr;
++	return 0;
++}
++
++static int
++parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
++				struct bit_entry *bitentry)
++{
++	bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]);
++	return 0;
++}
++
++struct bit_table {
++	const char id;
++	int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
++};
++
++#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
++
++static int
++parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
++		struct bit_table *table)
++{
++	struct drm_device *dev = bios->dev;
++	uint8_t maxentries = bios->data[bitoffset + 4];
++	int i, offset;
++	struct bit_entry bitentry;
++
++	for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) {
++		bitentry.id[0] = bios->data[offset];
++
++		if (bitentry.id[0] != table->id)
++			continue;
++
++		bitentry.id[1] = bios->data[offset + 1];
++		bitentry.length = ROM16(bios->data[offset + 2]);
++		bitentry.offset = ROM16(bios->data[offset + 4]);
++
++		return table->parse_fn(dev, bios, &bitentry);
++	}
++
++	NV_INFO(dev, "BIT table '%c' not found\n", table->id);
++	return -ENOSYS;
++}
++
++static int
++parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
++{
++	int ret;
++
++	/*
++	 * The only restriction on parsing order currently is having 'i' first
++	 * for use of bios->*_version or bios->feature_byte while parsing;
++	 * functions shouldn't be actually *doing* anything apart from pulling
++	 * data from the image into the bios struct, thus no interdependencies
++	 */
++	ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('i', i));
++	if (ret) /* info? */
++		return ret;
++	if (bios->major_version >= 0x60) /* g80+ */
++		parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
++	ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('C', C));
++	if (ret)
++		return ret;
++	parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
++	ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
++	if (ret)
++		return ret;
++	parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
++	parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
++	parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
++	parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
++	parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport));
++
++	return 0;
++}
++
++static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsigned int offset)
++{
++	/*
++	 * Parses the BMP structure for useful things, but does not act on them
++	 *
++	 * offset +   5: BMP major version
++	 * offset +   6: BMP minor version
++	 * offset +   9: BMP feature byte
++	 * offset +  10: BCD encoded BIOS version
++	 *
++	 * offset +  18: init script table pointer (for bios versions < 5.10h)
++	 * offset +  20: extra init script table pointer (for bios
++	 * versions < 5.10h)
++	 *
++	 * offset +  24: memory init table pointer (used on early bios versions)
++	 * offset +  26: SDR memory sequencing setup data table
++	 * offset +  28: DDR memory sequencing setup data table
++	 *
++	 * offset +  54: index of I2C CRTC pair to use for CRT output
++	 * offset +  55: index of I2C CRTC pair to use for TV output
++	 * offset +  56: index of I2C CRTC pair to use for flat panel output
++	 * offset +  58: write CRTC index for I2C pair 0
++	 * offset +  59: read CRTC index for I2C pair 0
++	 * offset +  60: write CRTC index for I2C pair 1
++	 * offset +  61: read CRTC index for I2C pair 1
++	 *
++	 * offset +  67: maximum internal PLL frequency (single stage PLL)
++	 * offset +  71: minimum internal PLL frequency (single stage PLL)
++	 *
++	 * offset +  75: script table pointers, as described in
++	 * parse_script_table_pointers
++	 *
++	 * offset +  89: TMDS single link output A table pointer
++	 * offset +  91: TMDS single link output B table pointer
++	 * offset +  95: LVDS single link output A table pointer
++	 * offset + 105: flat panel timings table pointer
++	 * offset + 107: flat panel strapping translation table pointer
++	 * offset + 117: LVDS manufacturer panel config table pointer
++	 * offset + 119: LVDS manufacturer strapping translation table pointer
++	 *
++	 * offset + 142: PLL limits table pointer
++	 *
++	 * offset + 156: minimum pixel clock for LVDS dual link
++	 */
++
++	uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor;
++	uint16_t bmplength;
++	uint16_t legacy_scripts_offset, legacy_i2c_offset;
++
++	/* load needed defaults in case we can't parse this info */
++	bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
++	bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
++	bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
++	bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
++	bios->pub.digital_min_front_porch = 0x4b;
++	bios->fmaxvco = 256000;
++	bios->fminvco = 128000;
++	bios->fp.duallink_transition_clk = 90000;
++
++	bmp_version_major = bmp[5];
++	bmp_version_minor = bmp[6];
++
++	NV_TRACE(dev, "BMP version %d.%d\n",
++		 bmp_version_major, bmp_version_minor);
++
++	/*
++	 * Make sure that 0x36 is blank and can't be mistaken for a DCB
++	 * pointer on early versions
++	 */
++	if (bmp_version_major < 5)
++		*(uint16_t *)&bios->data[0x36] = 0;
++
++	/*
++	 * Seems that the minor version was 1 for all major versions prior
++	 * to 5. Version 6 could theoretically exist, but I suspect BIT
++	 * happened instead.
++	 */
++	if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) {
++		NV_ERROR(dev, "You have an unsupported BMP version. "
++				"Please send in your bios\n");
++		return -ENOSYS;
++	}
++
++	if (bmp_version_major == 0)
++		/* nothing that's currently useful in this version */
++		return 0;
++	else if (bmp_version_major == 1)
++		bmplength = 44; /* exact for 1.01 */
++	else if (bmp_version_major == 2)
++		bmplength = 48; /* exact for 2.01 */
++	else if (bmp_version_major == 3)
++		bmplength = 54;
++		/* guessed - mem init tables added in this version */
++	else if (bmp_version_major == 4 || bmp_version_minor < 0x1)
++		/* don't know if 5.0 exists... */
++		bmplength = 62;
++		/* guessed - BMP I2C indices added in version 4*/
++	else if (bmp_version_minor < 0x6)
++		bmplength = 67; /* exact for 5.01 */
++	else if (bmp_version_minor < 0x10)
++		bmplength = 75; /* exact for 5.06 */
++	else if (bmp_version_minor == 0x10)
++		bmplength = 89; /* exact for 5.10h */
++	else if (bmp_version_minor < 0x14)
++		bmplength = 118; /* exact for 5.11h */
++	else if (bmp_version_minor < 0x24)
++		/*
++		 * Not sure of version where pll limits came in;
++		 * certainly exist by 0x24 though.
++		 */
++		/* length not exact: this is long enough to get lvds members */
++		bmplength = 123;
++	else if (bmp_version_minor < 0x27)
++		/*
++		 * Length not exact: this is long enough to get pll limit
++		 * member
++		 */
++		bmplength = 144;
++	else
++		/*
++		 * Length not exact: this is long enough to get dual link
++		 * transition clock.
++		 */
++		bmplength = 158;
++
++	/* checksum */
++	if (nv_cksum(bmp, 8)) {
++		NV_ERROR(dev, "Bad BMP checksum\n");
++		return -EINVAL;
++	}
++
++	/*
++	 * Bit 4 seems to indicate either a mobile bios or a quadro card --
++	 * mobile behaviour consistent (nv11+), quadro only seen nv18gl-nv36gl
++	 * (not nv10gl), bit 5 that the flat panel tables are present, and
++	 * bit 6 a tv bios.
++	 */
++	bios->feature_byte = bmp[9];
++
++	parse_bios_version(dev, bios, offset + 10);
++
++	if (bmp_version_major < 5 || bmp_version_minor < 0x10)
++		bios->old_style_init = true;
++	legacy_scripts_offset = 18;
++	if (bmp_version_major < 2)
++		legacy_scripts_offset -= 4;
++	bios->init_script_tbls_ptr = ROM16(bmp[legacy_scripts_offset]);
++	bios->extra_init_script_tbl_ptr = ROM16(bmp[legacy_scripts_offset + 2]);
++
++	if (bmp_version_major > 2) {	/* appears in BMP 3 */
++		bios->legacy.mem_init_tbl_ptr = ROM16(bmp[24]);
++		bios->legacy.sdr_seq_tbl_ptr = ROM16(bmp[26]);
++		bios->legacy.ddr_seq_tbl_ptr = ROM16(bmp[28]);
++	}
++
++	legacy_i2c_offset = 0x48;	/* BMP version 2 & 3 */
++	if (bmplength > 61)
++		legacy_i2c_offset = offset + 54;
++	bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
++	bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
++	bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
++	bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
++	bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
++	bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
++	bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
++
++	if (bmplength > 74) {
++		bios->fmaxvco = ROM32(bmp[67]);
++		bios->fminvco = ROM32(bmp[71]);
++	}
++	if (bmplength > 88)
++		parse_script_table_pointers(bios, offset + 75);
++	if (bmplength > 94) {
++		bios->tmds.output0_script_ptr = ROM16(bmp[89]);
++		bios->tmds.output1_script_ptr = ROM16(bmp[91]);
++		/*
++		 * Never observed in use with lvds scripts, but is reused for
++		 * 18/24 bit panel interface default for EDID equipped panels
++		 * (if_is_24bit not set directly to avoid any oscillation).
++		 */
++		bios->legacy.lvds_single_a_script_ptr = ROM16(bmp[95]);
++	}
++	if (bmplength > 108) {
++		bios->fp.fptablepointer = ROM16(bmp[105]);
++		bios->fp.fpxlatetableptr = ROM16(bmp[107]);
++		bios->fp.xlatwidth = 1;
++	}
++	if (bmplength > 120) {
++		bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
++		bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
++	}
++	if (bmplength > 143)
++		bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
++
++	if (bmplength > 157)
++		bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
++
++	return 0;
++}
++
++static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
++{
++	int i, j;
++
++	for (i = 0; i <= (n - len); i++) {
++		for (j = 0; j < len; j++)
++			if (data[i + j] != str[j])
++				break;
++		if (j == len)
++			return i;
++	}
++
++	return 0;
++}
++
++static int
++read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
++{
++	uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
++	int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
++	int recordoffset = 0, rdofs = 1, wrofs = 0;
++	uint8_t port_type = 0;
++
++	if (!i2ctable)
++		return -EINVAL;
++
++	if (dcb_version >= 0x30) {
++		if (i2ctable[0] != dcb_version) /* necessary? */
++			NV_WARN(dev,
++				"DCB I2C table version mismatch (%02X vs %02X)\n",
++				i2ctable[0], dcb_version);
++		dcb_i2c_ver = i2ctable[0];
++		headerlen = i2ctable[1];
++		if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
++			i2c_entries = i2ctable[2];
++		else
++			NV_WARN(dev,
++				"DCB I2C table has more entries than indexable "
++				"(%d entries, max index 15)\n", i2ctable[2]);
++		entry_len = i2ctable[3];
++		/* [4] is i2c_default_indices, read in parse_dcb_table() */
++	}
++	/*
++	 * It's your own fault if you call this function on a DCB 1.1 BIOS --
++	 * the test below is for DCB 1.2
++	 */
++	if (dcb_version < 0x14) {
++		recordoffset = 2;
++		rdofs = 0;
++		wrofs = 1;
++	}
++
++	if (index == 0xf)
++		return 0;
++	if (index > i2c_entries) {
++		NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n",
++			 index, i2ctable[2]);
++		return -ENOENT;
++	}
++	if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
++		NV_ERROR(dev, "DCB I2C entry invalid\n");
++		return -EINVAL;
++	}
++
++	if (dcb_i2c_ver >= 0x30) {
++		port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
++
++		/*
++		 * Fixup for chips using same address offset for read and
++		 * write.
++		 */
++		if (port_type == 4)	/* seen on C51 */
++			rdofs = wrofs = 1;
++		if (port_type >= 5)	/* G80+ */
++			rdofs = wrofs = 0;
++	}
++
++	if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
++		NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
++
++	i2c->port_type = port_type;
++	i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
++	i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
++
++	return 0;
++}
++
++static struct dcb_gpio_entry *
++new_gpio_entry(struct nvbios *bios)
++{
++	struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio;
++
++	return &gpio->entry[gpio->entries++];
++}
++
++struct dcb_gpio_entry *
++nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	int i;
++
++	for (i = 0; i < bios->bdcb.gpio.entries; i++) {
++		if (bios->bdcb.gpio.entry[i].tag != tag)
++			continue;
++
++		return &bios->bdcb.gpio.entry[i];
++	}
++
++	return NULL;
++}
++
++static void
++parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
++{
++	struct dcb_gpio_entry *gpio;
++	uint16_t ent = ROM16(bios->data[offset]);
++	uint8_t line = ent & 0x1f,
++		tag = ent >> 5 & 0x3f,
++		flags = ent >> 11 & 0x1f;
++
++	if (tag == 0x3f)
++		return;
++
++	gpio = new_gpio_entry(bios);
++
++	gpio->tag = tag;
++	gpio->line = line;
++	gpio->invert = flags != 4;
++}
++
++static void
++parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
++{
++	struct dcb_gpio_entry *gpio;
++	uint32_t ent = ROM32(bios->data[offset]);
++	uint8_t line = ent & 0x1f,
++		tag = ent >> 8 & 0xff;
++
++	if (tag == 0xff)
++		return;
++
++	gpio = new_gpio_entry(bios);
++
++	/* Currently unused, we may need more fields parsed at some
++	 * point. */
++	gpio->tag = tag;
++	gpio->line = line;
++}
++
++static void
++parse_dcb_gpio_table(struct nvbios *bios)
++{
++	struct drm_device *dev = bios->dev;
++	uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr;
++	uint8_t *gpio_table = &bios->data[gpio_table_ptr];
++	int header_len = gpio_table[1],
++	    entries = gpio_table[2],
++	    entry_len = gpio_table[3];
++	void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
++	int i;
++
++	if (bios->bdcb.version >= 0x40) {
++		if (gpio_table_ptr && entry_len != 4) {
++			NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
++			return;
++		}
++
++		parse_entry = parse_dcb40_gpio_entry;
++
++	} else if (bios->bdcb.version >= 0x30) {
++		if (gpio_table_ptr && entry_len != 2) {
++			NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
++			return;
++		}
++
++		parse_entry = parse_dcb30_gpio_entry;
++
++	} else if (bios->bdcb.version >= 0x22) {
++		/*
++		 * DCBs older than v3.0 don't really have a GPIO
++		 * table, instead they keep some GPIO info at fixed
++		 * locations.
++		 */
++		uint16_t dcbptr = ROM16(bios->data[0x36]);
++		uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
++
++		if (tvdac_gpio[0] & 1) {
++			struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
++
++			gpio->tag = DCB_GPIO_TVDAC0;
++			gpio->line = tvdac_gpio[1] >> 4;
++			gpio->invert = tvdac_gpio[0] & 2;
++		}
++	}
++
++	if (!gpio_table_ptr)
++		return;
++
++	if (entries > DCB_MAX_NUM_GPIO_ENTRIES) {
++		NV_WARN(dev, "Too many entries in the DCB GPIO table.\n");
++		entries = DCB_MAX_NUM_GPIO_ENTRIES;
++	}
++
++	for (i = 0; i < entries; i++)
++		parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
++}
++
++struct dcb_connector_table_entry *
++nouveau_bios_connector_entry(struct drm_device *dev, int index)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	struct dcb_connector_table_entry *cte;
++
++	if (index >= bios->bdcb.connector.entries)
++		return NULL;
++
++	cte = &bios->bdcb.connector.entry[index];
++	if (cte->type == 0xff)
++		return NULL;
++
++	return cte;
++}
++
++static void
++parse_dcb_connector_table(struct nvbios *bios)
++{
++	struct drm_device *dev = bios->dev;
++	struct dcb_connector_table *ct = &bios->bdcb.connector;
++	struct dcb_connector_table_entry *cte;
++	uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr];
++	uint8_t *entry;
++	int i;
++
++	if (!bios->bdcb.connector_table_ptr) {
++		NV_DEBUG_KMS(dev, "No DCB connector table present\n");
++		return;
++	}
++
++	NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
++		conntab[0], conntab[1], conntab[2], conntab[3]);
++	if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
++	    (conntab[3] != 2 && conntab[3] != 4)) {
++		NV_ERROR(dev, "  Unknown!  Please report.\n");
++		return;
++	}
++
++	ct->entries = conntab[2];
++
++	entry = conntab + conntab[1];
++	cte = &ct->entry[0];
++	for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
++		if (conntab[3] == 2)
++			cte->entry = ROM16(entry[0]);
++		else
++			cte->entry = ROM32(entry[0]);
++		cte->type  = (cte->entry & 0x000000ff) >> 0;
++		cte->index = (cte->entry & 0x00000f00) >> 8;
++		switch (cte->entry & 0x00033000) {
++		case 0x00001000:
++			cte->gpio_tag = 0x07;
++			break;
++		case 0x00002000:
++			cte->gpio_tag = 0x08;
++			break;
++		case 0x00010000:
++			cte->gpio_tag = 0x51;
++			break;
++		case 0x00020000:
++			cte->gpio_tag = 0x52;
++			break;
++		default:
++			cte->gpio_tag = 0xff;
++			break;
++		}
++
++		if (cte->type == 0xff)
++			continue;
++
++		NV_INFO(dev, "  %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
++			i, cte->entry, cte->type, cte->index, cte->gpio_tag);
++	}
++}
++
++static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
++{
++	struct dcb_entry *entry = &dcb->entry[dcb->entries];
++
++	memset(entry, 0, sizeof(struct dcb_entry));
++	entry->index = dcb->entries++;
++
++	return entry;
++}
++
++static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
++{
++	struct dcb_entry *entry = new_dcb_entry(dcb);
++
++	entry->type = 0;
++	entry->i2c_index = i2c;
++	entry->heads = heads;
++	entry->location = DCB_LOC_ON_CHIP;
++	/* "or" mostly unused in early gen crt modesetting, 0 is fine */
++}
++
++static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
++{
++	struct dcb_entry *entry = new_dcb_entry(dcb);
++
++	entry->type = 2;
++	entry->i2c_index = LEGACY_I2C_PANEL;
++	entry->heads = twoHeads ? 3 : 1;
++	entry->location = !DCB_LOC_ON_CHIP;	/* ie OFF CHIP */
++	entry->or = 1;	/* means |0x10 gets set on CRE_LCD__INDEX */
++	entry->duallink_possible = false; /* SiI164 and co. are single link */
++
++#if 0
++	/*
++	 * For dvi-a either crtc probably works, but my card appears to only
++	 * support dvi-d.  "nvidia" still attempts to program it for dvi-a,
++	 * doing the full fp output setup (program 0x6808.. fp dimension regs,
++	 * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
++	 * the monitor picks up the mode res ok and lights up, but no pixel
++	 * data appears, so the board manufacturer probably connected up the
++	 * sync lines, but missed the video traces / components
++	 *
++	 * with this introduction, dvi-a left as an exercise for the reader.
++	 */
++	fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
++#endif
++}
++
++static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
++{
++	struct dcb_entry *entry = new_dcb_entry(dcb);
++
++	entry->type = 1;
++	entry->i2c_index = LEGACY_I2C_TV;
++	entry->heads = twoHeads ? 3 : 1;
++	entry->location = !DCB_LOC_ON_CHIP;	/* ie OFF CHIP */
++}
++
++static bool
++parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
++		  uint32_t conn, uint32_t conf, struct dcb_entry *entry)
++{
++	entry->type = conn & 0xf;
++	entry->i2c_index = (conn >> 4) & 0xf;
++	entry->heads = (conn >> 8) & 0xf;
++	if (bdcb->version >= 0x40)
++		entry->connector = (conn >> 12) & 0xf;
++	entry->bus = (conn >> 16) & 0xf;
++	entry->location = (conn >> 20) & 0x3;
++	entry->or = (conn >> 24) & 0xf;
++	/*
++	 * Normal entries consist of a single bit, but dual link has the
++	 * next most significant bit set too
++	 */
++	entry->duallink_possible =
++			((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
++
++	switch (entry->type) {
++	case OUTPUT_ANALOG:
++		/*
++		 * Although the rest of a CRT conf dword is usually
++		 * zeros, mac biosen have stuff there so we must mask
++		 */
++		entry->crtconf.maxfreq = (bdcb->version < 0x30) ?
++					 (conf & 0xffff) * 10 :
++					 (conf & 0xff) * 10000;
++		break;
++	case OUTPUT_LVDS:
++		{
++		uint32_t mask;
++		if (conf & 0x1)
++			entry->lvdsconf.use_straps_for_mode = true;
++		if (bdcb->version < 0x22) {
++			mask = ~0xd;
++			/*
++			 * The laptop in bug 14567 lies and claims to not use
++			 * straps when it does, so assume all DCB 2.0 laptops
++			 * use straps, until a broken EDID using one is produced
++			 */
++			entry->lvdsconf.use_straps_for_mode = true;
++			/*
++			 * Both 0x4 and 0x8 show up in v2.0 tables; assume they
++			 * mean the same thing (probably wrong, but might work)
++			 */
++			if (conf & 0x4 || conf & 0x8)
++				entry->lvdsconf.use_power_scripts = true;
++		} else {
++			mask = ~0x5;
++			if (conf & 0x4)
++				entry->lvdsconf.use_power_scripts = true;
++		}
++		if (conf & mask) {
++			/*
++			 * Until we even try to use these on G8x, it's
++			 * useless reporting unknown bits.  They all are.
++			 */
++			if (bdcb->version >= 0x40)
++				break;
++
++			NV_ERROR(dev, "Unknown LVDS configuration bits, "
++				      "please report\n");
++		}
++		break;
++		}
++	case OUTPUT_TV:
++	{
++		if (bdcb->version >= 0x30)
++			entry->tvconf.has_component_output = conf & (0x8 << 4);
++		else
++			entry->tvconf.has_component_output = false;
++
++		break;
++	}
++	case OUTPUT_DP:
++		entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
++		entry->dpconf.link_bw = (conf & 0x00e00000) >> 21;
++		switch ((conf & 0x0f000000) >> 24) {
++		case 0xf:
++			entry->dpconf.link_nr = 4;
++			break;
++		case 0x3:
++			entry->dpconf.link_nr = 2;
++			break;
++		default:
++			entry->dpconf.link_nr = 1;
++			break;
++		}
++		break;
++	case OUTPUT_TMDS:
++		entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
++		break;
++	case 0xe:
++		/* weird g80 mobile type that "nv" treats as a terminator */
++		bdcb->dcb.entries--;
++		return false;
++	}
++
++	/* unsure what DCB version introduces this, 3.0? */
++	if (conf & 0x100000)
++		entry->i2c_upper_default = true;
++
++	return true;
++}
++
++static bool
++parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
++		  uint32_t conn, uint32_t conf, struct dcb_entry *entry)
++{
++	switch (conn & 0x0000000f) {
++	case 0:
++		entry->type = OUTPUT_ANALOG;
++		break;
++	case 1:
++		entry->type = OUTPUT_TV;
++		break;
++	case 2:
++	case 3:
++		entry->type = OUTPUT_LVDS;
++		break;
++	case 4:
++		switch ((conn & 0x000000f0) >> 4) {
++		case 0:
++			entry->type = OUTPUT_TMDS;
++			break;
++		case 1:
++			entry->type = OUTPUT_LVDS;
++			break;
++		default:
++			NV_ERROR(dev, "Unknown DCB subtype 4/%d\n",
++				 (conn & 0x000000f0) >> 4);
++			return false;
++		}
++		break;
++	default:
++		NV_ERROR(dev, "Unknown DCB type %d\n", conn & 0x0000000f);
++		return false;
++	}
++
++	entry->i2c_index = (conn & 0x0003c000) >> 14;
++	entry->heads = ((conn & 0x001c0000) >> 18) + 1;
++	entry->or = entry->heads; /* same as heads, hopefully safe enough */
++	entry->location = (conn & 0x01e00000) >> 21;
++	entry->bus = (conn & 0x0e000000) >> 25;
++	entry->duallink_possible = false;
++
++	switch (entry->type) {
++	case OUTPUT_ANALOG:
++		entry->crtconf.maxfreq = (conf & 0xffff) * 10;
++		break;
++	case OUTPUT_TV:
++		entry->tvconf.has_component_output = false;
++		break;
++	case OUTPUT_TMDS:
++		/*
++		 * Invent a DVI-A output, by copying the fields of the DVI-D
++		 * output; reported to work by math_b on an NV20(!).
++		 */
++		fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
++		break;
++	case OUTPUT_LVDS:
++		if ((conn & 0x00003f00) != 0x10)
++			entry->lvdsconf.use_straps_for_mode = true;
++		entry->lvdsconf.use_power_scripts = true;
++		break;
++	default:
++		break;
++	}
++
++	return true;
++}
++
++static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
++			    uint32_t conn, uint32_t conf)
++{
++	struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb);
++	bool ret;
++
++	if (bdcb->version >= 0x20)
++		ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry);
++	else
++		ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry);
++	if (!ret)
++		return ret;
++
++	read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table,
++			   entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]);
++
++	return true;
++}
++
++static
++void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
++{
++	/*
++	 * DCB v2.0 lists each output combination separately.
++	 * Here we merge compatible entries to have fewer outputs, with
++	 * more options
++	 */
++
++	int i, newentries = 0;
++
++	for (i = 0; i < dcb->entries; i++) {
++		struct dcb_entry *ient = &dcb->entry[i];
++		int j;
++
++		for (j = i + 1; j < dcb->entries; j++) {
++			struct dcb_entry *jent = &dcb->entry[j];
++
++			if (jent->type == 100) /* already merged entry */
++				continue;
++
++			/* merge heads field when all other fields the same */
++			if (jent->i2c_index == ient->i2c_index &&
++			    jent->type == ient->type &&
++			    jent->location == ient->location &&
++			    jent->or == ient->or) {
++				NV_TRACE(dev, "Merging DCB entries %d and %d\n",
++					 i, j);
++				ient->heads |= jent->heads;
++				jent->type = 100; /* dummy value */
++			}
++		}
++	}
++
++	/* Compact entries merged into others out of dcb */
++	for (i = 0; i < dcb->entries; i++) {
++		if (dcb->entry[i].type == 100)
++			continue;
++
++		if (newentries != i) {
++			dcb->entry[newentries] = dcb->entry[i];
++			dcb->entry[newentries].index = newentries;
++		}
++		newentries++;
++	}
++
++	dcb->entries = newentries;
++}
++
++static int
++parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct bios_parsed_dcb *bdcb = &bios->bdcb;
++	struct parsed_dcb *dcb;
++	uint16_t dcbptr = 0, i2ctabptr = 0;
++	uint8_t *dcbtable;
++	uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
++	bool configblock = true;
++	int recordlength = 8, confofs = 4;
++	int i;
++
++	dcb = bios->pub.dcb = &bdcb->dcb;
++	dcb->entries = 0;
++
++	/* get the offset from 0x36 */
++	if (dev_priv->card_type > NV_04) {
++		dcbptr = ROM16(bios->data[0x36]);
++		if (dcbptr == 0x0000)
++			NV_WARN(dev, "No output data (DCB) found in BIOS\n");
++	}
++
++	/* this situation likely means a really old card, pre DCB */
++	if (dcbptr == 0x0) {
++		NV_INFO(dev, "Assuming a CRT output exists\n");
++		fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
++
++		if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
++			fabricate_tv_output(dcb, twoHeads);
++
++		return 0;
++	}
++
++	dcbtable = &bios->data[dcbptr];
++
++	/* get DCB version */
++	bdcb->version = dcbtable[0];
++	NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
++		 bdcb->version >> 4, bdcb->version & 0xf);
++
++	if (bdcb->version >= 0x20) { /* NV17+ */
++		uint32_t sig;
++
++		if (bdcb->version >= 0x30) { /* NV40+ */
++			headerlen = dcbtable[1];
++			entries = dcbtable[2];
++			recordlength = dcbtable[3];
++			i2ctabptr = ROM16(dcbtable[4]);
++			sig = ROM32(dcbtable[6]);
++			bdcb->gpio_table_ptr = ROM16(dcbtable[10]);
++			bdcb->connector_table_ptr = ROM16(dcbtable[20]);
++		} else {
++			i2ctabptr = ROM16(dcbtable[2]);
++			sig = ROM32(dcbtable[4]);
++			headerlen = 8;
++		}
++
++		if (sig != 0x4edcbdcb) {
++			NV_ERROR(dev, "Bad Display Configuration Block "
++					"signature (%08X)\n", sig);
++			return -EINVAL;
++		}
++	} else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */
++		char sig[8] = { 0 };
++
++		strncpy(sig, (char *)&dcbtable[-7], 7);
++		i2ctabptr = ROM16(dcbtable[2]);
++		recordlength = 10;
++		confofs = 6;
++
++		if (strcmp(sig, "DEV_REC")) {
++			NV_ERROR(dev, "Bad Display Configuration Block "
++					"signature (%s)\n", sig);
++			return -EINVAL;
++		}
++	} else {
++		/*
++		 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
++		 * has the same single (crt) entry, even when tv-out present, so
++		 * the conclusion is this version cannot really be used.
++		 * v1.2 tables (some NV6/10, and NV15+) normally have the same
++		 * 5 entries, which are not specific to the card and so no use.
++		 * v1.2 does have an I2C table that read_dcb_i2c_table can
++		 * handle, but cards exist (nv11 in #14821) with a bad i2c table
++		 * pointer, so use the indices parsed in parse_bmp_structure.
++		 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
++		 */
++		NV_TRACEWARN(dev, "No useful information in BIOS output table; "
++				  "adding all possible outputs\n");
++		fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
++
++		/*
++		 * Attempt to detect TV before DVI because the test
++		 * for the former is more accurate and it rules the
++		 * latter out.
++		 */
++		if (nv04_tv_identify(dev,
++				     bios->legacy.i2c_indices.tv) >= 0)
++			fabricate_tv_output(dcb, twoHeads);
++
++		else if (bios->tmds.output0_script_ptr ||
++			 bios->tmds.output1_script_ptr)
++			fabricate_dvi_i_output(dcb, twoHeads);
++
++		return 0;
++	}
++
++	if (!i2ctabptr)
++		NV_WARN(dev, "No pointer to DCB I2C port table\n");
++	else {
++		bdcb->i2c_table = &bios->data[i2ctabptr];
++		if (bdcb->version >= 0x30)
++			bdcb->i2c_default_indices = bdcb->i2c_table[4];
++	}
++
++	parse_dcb_gpio_table(bios);
++	parse_dcb_connector_table(bios);
++
++	if (entries > DCB_MAX_NUM_ENTRIES)
++		entries = DCB_MAX_NUM_ENTRIES;
++
++	for (i = 0; i < entries; i++) {
++		uint32_t connection, config = 0;
++
++		connection = ROM32(dcbtable[headerlen + recordlength * i]);
++		if (configblock)
++			config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
++
++		/* seen on an NV11 with DCB v1.5 */
++		if (connection == 0x00000000)
++			break;
++
++		/* seen on an NV17 with DCB v2.0 */
++		if (connection == 0xffffffff)
++			break;
++
++		if ((connection & 0x0000000f) == 0x0000000f)
++			continue;
++
++		NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
++			     dcb->entries, connection, config);
++
++		if (!parse_dcb_entry(dev, bdcb, connection, config))
++			break;
++	}
++
++	/*
++	 * apart for v2.1+ not being known for requiring merging, this
++	 * guarantees dcbent->index is the index of the entry in the rom image
++	 */
++	if (bdcb->version < 0x21)
++		merge_like_dcb_entries(dev, dcb);
++
++	return dcb->entries ? 0 : -ENXIO;
++}
++
++static void
++fixup_legacy_connector(struct nvbios *bios)
++{
++	struct bios_parsed_dcb *bdcb = &bios->bdcb;
++	struct parsed_dcb *dcb = &bdcb->dcb;
++	int high = 0, i;
++
++	/*
++	 * DCB 3.0 also has the table in most cases, but there are some cards
++	 * where the table is filled with stub entries, and the DCB entriy
++	 * indices are all 0.  We don't need the connector indices on pre-G80
++	 * chips (yet?) so limit the use to DCB 4.0 and above.
++	 */
++	if (bdcb->version >= 0x40)
++		return;
++
++	/*
++	 * No known connector info before v3.0, so make it up.  the rule here
++	 * is: anything on the same i2c bus is considered to be on the same
++	 * connector.  any output without an associated i2c bus is assigned
++	 * its own unique connector index.
++	 */
++	for (i = 0; i < dcb->entries; i++) {
++		if (dcb->entry[i].i2c_index == 0xf)
++			continue;
++
++		/*
++		 * Ignore the I2C index for on-chip TV-out, as there
++		 * are cards with bogus values (nv31m in bug 23212),
++		 * and it's otherwise useless.
++		 */
++		if (dcb->entry[i].type == OUTPUT_TV &&
++		    dcb->entry[i].location == DCB_LOC_ON_CHIP) {
++			dcb->entry[i].i2c_index = 0xf;
++			continue;
++		}
++
++		dcb->entry[i].connector = dcb->entry[i].i2c_index;
++		if (dcb->entry[i].connector > high)
++			high = dcb->entry[i].connector;
++	}
++
++	for (i = 0; i < dcb->entries; i++) {
++		if (dcb->entry[i].i2c_index != 0xf)
++			continue;
++
++		dcb->entry[i].connector = ++high;
++	}
++}
++
++static void
++fixup_legacy_i2c(struct nvbios *bios)
++{
++	struct parsed_dcb *dcb = &bios->bdcb.dcb;
++	int i;
++
++	for (i = 0; i < dcb->entries; i++) {
++		if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
++			dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
++		if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
++			dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
++		if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
++			dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
++	}
++}
++
++static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
++{
++	/*
++	 * The header following the "HWSQ" signature has the number of entries,
++	 * and the entry size
++	 *
++	 * An entry consists of a dword to write to the sequencer control reg
++	 * (0x00001304), followed by the ucode bytes, written sequentially,
++	 * starting at reg 0x00001400
++	 */
++
++	uint8_t bytes_to_write;
++	uint16_t hwsq_entry_offset;
++	int i;
++
++	if (bios->data[hwsq_offset] <= entry) {
++		NV_ERROR(dev, "Too few entries in HW sequencer table for "
++				"requested entry\n");
++		return -ENOENT;
++	}
++
++	bytes_to_write = bios->data[hwsq_offset + 1];
++
++	if (bytes_to_write != 36) {
++		NV_ERROR(dev, "Unknown HW sequencer entry size\n");
++		return -EINVAL;
++	}
++
++	NV_TRACE(dev, "Loading NV17 power sequencing microcode\n");
++
++	hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
++
++	/* set sequencer control */
++	bios_wr32(bios, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
++	bytes_to_write -= 4;
++
++	/* write ucode */
++	for (i = 0; i < bytes_to_write; i += 4)
++		bios_wr32(bios, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
++
++	/* twiddle NV_PBUS_DEBUG_4 */
++	bios_wr32(bios, NV_PBUS_DEBUG_4, bios_rd32(bios, NV_PBUS_DEBUG_4) | 0x18);
++
++	return 0;
++}
++
++static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
++					struct nvbios *bios)
++{
++	/*
++	 * BMP based cards, from NV17, need a microcode loading to correctly
++	 * control the GPIO etc for LVDS panels
++	 *
++	 * BIT based cards seem to do this directly in the init scripts
++	 *
++	 * The microcode entries are found by the "HWSQ" signature.
++	 */
++
++	const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
++	const int sz = sizeof(hwsq_signature);
++	int hwsq_offset;
++
++	hwsq_offset = findstr(bios->data, bios->length, hwsq_signature, sz);
++	if (!hwsq_offset)
++		return 0;
++
++	/* always use entry 0? */
++	return load_nv17_hwsq_ucode_entry(dev, bios, hwsq_offset + sz, 0);
++}
++
++uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	const uint8_t edid_sig[] = {
++			0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
++	uint16_t offset = 0;
++	uint16_t newoffset;
++	int searchlen = NV_PROM_SIZE;
++
++	if (bios->fp.edid)
++		return bios->fp.edid;
++
++	while (searchlen) {
++		newoffset = findstr(&bios->data[offset], searchlen,
++								edid_sig, 8);
++		if (!newoffset)
++			return NULL;
++		offset += newoffset;
++		if (!nv_cksum(&bios->data[offset], EDID1_LEN))
++			break;
++
++		searchlen -= offset;
++		offset++;
++	}
++
++	NV_TRACE(dev, "Found EDID in BIOS\n");
++
++	return bios->fp.edid = &bios->data[offset];
++}
++
++void
++nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
++			    struct dcb_entry *dcbent)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	struct init_exec iexec = { true, false };
++
++	mutex_lock(&bios->lock);
++	bios->display.output = dcbent;
++	parse_init_table(bios, table, &iexec);
++	bios->display.output = NULL;
++	mutex_unlock(&bios->lock);
++}
++
++static bool NVInitVBIOS(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++
++	memset(bios, 0, sizeof(struct nvbios));
++	mutex_init(&bios->lock);
++	bios->dev = dev;
++
++	if (!NVShadowVBIOS(dev, bios->data))
++		return false;
++
++	bios->length = NV_PROM_SIZE;
++	return true;
++}
++
++static int nouveau_parse_vbios_struct(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
++	const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
++	int offset;
++
++	offset = findstr(bios->data, bios->length,
++					bit_signature, sizeof(bit_signature));
++	if (offset) {
++		NV_TRACE(dev, "BIT BIOS found\n");
++		return parse_bit_structure(bios, offset + 6);
++	}
++
++	offset = findstr(bios->data, bios->length,
++					bmp_signature, sizeof(bmp_signature));
++	if (offset) {
++		NV_TRACE(dev, "BMP BIOS found\n");
++		return parse_bmp_structure(dev, bios, offset);
++	}
++
++	NV_ERROR(dev, "No known BIOS signature found\n");
++	return -ENODEV;
++}
++
++int
++nouveau_run_vbios_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	int i, ret = 0;
++
++	NVLockVgaCrtcs(dev, false);
++	if (nv_two_heads(dev))
++		NVSetOwner(dev, bios->state.crtchead);
++
++	if (bios->major_version < 5)	/* BMP only */
++		load_nv17_hw_sequencer_ucode(dev, bios);
++
++	if (bios->execute) {
++		bios->fp.last_script_invoc = 0;
++		bios->fp.lvds_init_run = false;
++	}
++
++	parse_init_tables(bios);
++
++	/*
++	 * Runs some additional script seen on G8x VBIOSen.  The VBIOS'
++	 * parser will run this right after the init tables, the binary
++	 * driver appears to run it at some point later.
++	 */
++	if (bios->some_script_ptr) {
++		struct init_exec iexec = {true, false};
++
++		NV_INFO(dev, "Parsing VBIOS init table at offset 0x%04X\n",
++			bios->some_script_ptr);
++		parse_init_table(bios, bios->some_script_ptr, &iexec);
++	}
++
++	if (dev_priv->card_type >= NV_50) {
++		for (i = 0; i < bios->bdcb.dcb.entries; i++) {
++			nouveau_bios_run_display_table(dev,
++						       &bios->bdcb.dcb.entry[i],
++						       0, 0);
++		}
++	}
++
++	NVLockVgaCrtcs(dev, true);
++
++	return ret;
++}
++
++static void
++nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	struct dcb_i2c_entry *entry;
++	int i;
++
++	entry = &bios->bdcb.dcb.i2c[0];
++	for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
++		nouveau_i2c_fini(dev, entry);
++}
++
++int
++nouveau_bios_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	uint32_t saved_nv_pextdev_boot_0;
++	bool was_locked;
++	int ret;
++
++	dev_priv->vbios = &bios->pub;
++
++	if (!NVInitVBIOS(dev))
++		return -ENODEV;
++
++	ret = nouveau_parse_vbios_struct(dev);
++	if (ret)
++		return ret;
++
++	ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
++	if (ret)
++		return ret;
++
++	fixup_legacy_i2c(bios);
++	fixup_legacy_connector(bios);
++
++	if (!bios->major_version)	/* we don't run version 0 bios */
++		return 0;
++
++	/* these will need remembering across a suspend */
++	saved_nv_pextdev_boot_0 = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
++	bios->state.saved_nv_pfb_cfg0 = bios_rd32(bios, NV_PFB_CFG0);
++
++	/* init script execution disabled */
++	bios->execute = false;
++
++	/* ... unless card isn't POSTed already */
++	if (dev_priv->card_type >= NV_10 &&
++	    NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
++	    NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
++		NV_INFO(dev, "Adaptor not initialised\n");
++		if (dev_priv->card_type < NV_50) {
++			NV_ERROR(dev, "Unable to POST this chipset\n");
++			return -ENODEV;
++		}
++
++		NV_INFO(dev, "Running VBIOS init tables\n");
++		bios->execute = true;
++	}
++
++	bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
++
++	ret = nouveau_run_vbios_init(dev);
++	if (ret) {
++		dev_priv->vbios = NULL;
++		return ret;
++	}
++
++	/* feature_byte on BMP is poor, but init always sets CR4B */
++	was_locked = NVLockVgaCrtcs(dev, false);
++	if (bios->major_version < 5)
++		bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40;
++
++	/* all BIT systems need p_f_m_t for digital_min_front_porch */
++	if (bios->is_mobile || bios->major_version >= 5)
++		ret = parse_fp_mode_table(dev, bios);
++	NVLockVgaCrtcs(dev, was_locked);
++
++	/* allow subsequent scripts to execute */
++	bios->execute = true;
++
++	return 0;
++}
++
++void
++nouveau_bios_takedown(struct drm_device *dev)
++{
++	nouveau_bios_i2c_devices_takedown(dev);
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
+new file mode 100644
+index 0000000..fd94bd6
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
+@@ -0,0 +1,292 @@
++/*
++ * Copyright 2007-2008 Nouveau Project
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __NOUVEAU_BIOS_H__
++#define __NOUVEAU_BIOS_H__
++
++#include "nvreg.h"
++#include "nouveau_i2c.h"
++
++#define DCB_MAX_NUM_ENTRIES 16
++#define DCB_MAX_NUM_I2C_ENTRIES 16
++#define DCB_MAX_NUM_GPIO_ENTRIES 32
++#define DCB_MAX_NUM_CONNECTOR_ENTRIES 16
++
++#define DCB_LOC_ON_CHIP 0
++
++struct dcb_entry {
++	int index;	/* may not be raw dcb index if merging has happened */
++	uint8_t type;
++	uint8_t i2c_index;
++	uint8_t heads;
++	uint8_t connector;
++	uint8_t bus;
++	uint8_t location;
++	uint8_t or;
++	bool duallink_possible;
++	union {
++		struct sor_conf {
++			int link;
++		} sorconf;
++		struct {
++			int maxfreq;
++		} crtconf;
++		struct {
++			struct sor_conf sor;
++			bool use_straps_for_mode;
++			bool use_power_scripts;
++		} lvdsconf;
++		struct {
++			bool has_component_output;
++		} tvconf;
++		struct {
++			struct sor_conf sor;
++			int link_nr;
++			int link_bw;
++		} dpconf;
++		struct {
++			struct sor_conf sor;
++		} tmdsconf;
++	};
++	bool i2c_upper_default;
++};
++
++struct dcb_i2c_entry {
++	uint8_t port_type;
++	uint8_t read, write;
++	struct nouveau_i2c_chan *chan;
++};
++
++struct parsed_dcb {
++	int entries;
++	struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
++	struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
++};
++
++enum dcb_gpio_tag {
++	DCB_GPIO_TVDAC0 = 0xc,
++	DCB_GPIO_TVDAC1 = 0x2d,
++};
++
++struct dcb_gpio_entry {
++	enum dcb_gpio_tag tag;
++	int line;
++	bool invert;
++};
++
++struct parsed_dcb_gpio {
++	int entries;
++	struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
++};
++
++struct dcb_connector_table_entry {
++	uint32_t entry;
++	uint8_t type;
++	uint8_t index;
++	uint8_t gpio_tag;
++};
++
++struct dcb_connector_table {
++	int entries;
++	struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
++};
++
++struct bios_parsed_dcb {
++	uint8_t version;
++
++	struct parsed_dcb dcb;
++
++	uint8_t *i2c_table;
++	uint8_t i2c_default_indices;
++
++	uint16_t gpio_table_ptr;
++	struct parsed_dcb_gpio gpio;
++	uint16_t connector_table_ptr;
++	struct dcb_connector_table connector;
++};
++
++enum nouveau_encoder_type {
++	OUTPUT_ANALOG = 0,
++	OUTPUT_TV = 1,
++	OUTPUT_TMDS = 2,
++	OUTPUT_LVDS = 3,
++	OUTPUT_DP = 6,
++	OUTPUT_ANY = -1
++};
++
++enum nouveau_or {
++	OUTPUT_A = (1 << 0),
++	OUTPUT_B = (1 << 1),
++	OUTPUT_C = (1 << 2)
++};
++
++enum LVDS_script {
++	/* Order *does* matter here */
++	LVDS_INIT = 1,
++	LVDS_RESET,
++	LVDS_BACKLIGHT_ON,
++	LVDS_BACKLIGHT_OFF,
++	LVDS_PANEL_ON,
++	LVDS_PANEL_OFF
++};
++
++/* changing these requires matching changes to reg tables in nv_get_clock */
++#define MAX_PLL_TYPES	4
++enum pll_types {
++	NVPLL,
++	MPLL,
++	VPLL1,
++	VPLL2
++};
++
++struct pll_lims {
++	struct {
++		int minfreq;
++		int maxfreq;
++		int min_inputfreq;
++		int max_inputfreq;
++
++		uint8_t min_m;
++		uint8_t max_m;
++		uint8_t min_n;
++		uint8_t max_n;
++	} vco1, vco2;
++
++	uint8_t max_log2p;
++	/*
++	 * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
++	 * value) is no different to 6 (at least for vplls) so allowing the MNP
++	 * calc to use 7 causes the generated clock to be out by a factor of 2.
++	 * however, max_log2p cannot be fixed-up during parsing as the
++	 * unmodified max_log2p value is still needed for setting mplls, hence
++	 * an additional max_usable_log2p member
++	 */
++	uint8_t max_usable_log2p;
++	uint8_t log2p_bias;
++
++	uint8_t min_p;
++	uint8_t max_p;
++
++	int refclk;
++};
++
++struct nouveau_bios_info {
++	struct parsed_dcb *dcb;
++
++	uint8_t chip_version;
++
++	uint32_t dactestval;
++	uint32_t tvdactestval;
++	uint8_t digital_min_front_porch;
++	bool fp_no_ddc;
++};
++
++struct nvbios {
++	struct drm_device *dev;
++	struct nouveau_bios_info pub;
++
++	struct mutex lock;
++
++	uint8_t data[NV_PROM_SIZE];
++	unsigned int length;
++	bool execute;
++
++	uint8_t major_version;
++	uint8_t feature_byte;
++	bool is_mobile;
++
++	uint32_t fmaxvco, fminvco;
++
++	bool old_style_init;
++	uint16_t init_script_tbls_ptr;
++	uint16_t extra_init_script_tbl_ptr;
++	uint16_t macro_index_tbl_ptr;
++	uint16_t macro_tbl_ptr;
++	uint16_t condition_tbl_ptr;
++	uint16_t io_condition_tbl_ptr;
++	uint16_t io_flag_condition_tbl_ptr;
++	uint16_t init_function_tbl_ptr;
++
++	uint16_t pll_limit_tbl_ptr;
++	uint16_t ram_restrict_tbl_ptr;
++	uint8_t ram_restrict_group_count;
++
++	uint16_t some_script_ptr; /* BIT I + 14 */
++	uint16_t init96_tbl_ptr; /* BIT I + 16 */
++
++	struct bios_parsed_dcb bdcb;
++
++	struct {
++		int crtchead;
++		/* these need remembering across suspend */
++		uint32_t saved_nv_pfb_cfg0;
++	} state;
++
++	struct {
++		struct dcb_entry *output;
++		uint16_t script_table_ptr;
++		uint16_t dp_table_ptr;
++	} display;
++
++	struct {
++		uint16_t fptablepointer;	/* also used by tmds */
++		uint16_t fpxlatetableptr;
++		int xlatwidth;
++		uint16_t lvdsmanufacturerpointer;
++		uint16_t fpxlatemanufacturertableptr;
++		uint16_t mode_ptr;
++		uint16_t xlated_entry;
++		bool power_off_for_reset;
++		bool reset_after_pclk_change;
++		bool dual_link;
++		bool link_c_increment;
++		bool BITbit1;
++		bool if_is_24bit;
++		int duallink_transition_clk;
++		uint8_t strapless_is_24bit;
++		uint8_t *edid;
++
++		/* will need resetting after suspend */
++		int last_script_invoc;
++		bool lvds_init_run;
++	} fp;
++
++	struct {
++		uint16_t output0_script_ptr;
++		uint16_t output1_script_ptr;
++	} tmds;
++
++	struct {
++		uint16_t mem_init_tbl_ptr;
++		uint16_t sdr_seq_tbl_ptr;
++		uint16_t ddr_seq_tbl_ptr;
++
++		struct {
++			uint8_t crt, tv, panel;
++		} i2c_indices;
++
++		uint16_t lvds_single_a_script_ptr;
++	} legacy;
++};
++
++#endif
+diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
+new file mode 100644
+index 0000000..028719f
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
+@@ -0,0 +1,773 @@
++/*
++ * Copyright 2007 Dave Airlied
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++/*
++ * Authors: Dave Airlied <airlied at linux.ie>
++ *	    Ben Skeggs   <darktama at iinet.net.au>
++ *	    Jeremy Kolb  <jkolb at brandeis.edu>
++ */
++
++#include "drmP.h"
++
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++
++#include <linux/log2.h>
++
++static void
++nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
++{
++	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
++	struct drm_device *dev = dev_priv->dev;
++	struct nouveau_bo *nvbo = nouveau_bo(bo);
++
++	ttm_bo_kunmap(&nvbo->kmap);
++
++	if (unlikely(nvbo->gem))
++		DRM_ERROR("bo %p still attached to GEM object\n", bo);
++
++	if (nvbo->tile)
++		nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
++
++	spin_lock(&dev_priv->ttm.bo_list_lock);
++	list_del(&nvbo->head);
++	spin_unlock(&dev_priv->ttm.bo_list_lock);
++	kfree(nvbo);
++}
++
++static void
++nouveau_bo_fixup_align(struct drm_device *dev,
++		       uint32_t tile_mode, uint32_t tile_flags,
++		       int *align, int *size)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	/*
++	 * Some of the tile_flags have a periodic structure of N*4096 bytes,
++	 * align to to that as well as the page size. Align the size to the
++	 * appropriate boundaries. This does imply that sizes are rounded up
++	 * 3-7 pages, so be aware of this and do not waste memory by allocating
++	 * many small buffers.
++	 */
++	if (dev_priv->card_type == NV_50) {
++		uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
++		int i;
++
++		switch (tile_flags) {
++		case 0x1800:
++		case 0x2800:
++		case 0x4800:
++		case 0x7a00:
++			if (is_power_of_2(block_size)) {
++				for (i = 1; i < 10; i++) {
++					*align = 12 * i * block_size;
++					if (!(*align % 65536))
++						break;
++				}
++			} else {
++				for (i = 1; i < 10; i++) {
++					*align = 8 * i * block_size;
++					if (!(*align % 65536))
++						break;
++				}
++			}
++			*size = roundup(*size, *align);
++			break;
++		default:
++			break;
++		}
++
++	} else {
++		if (tile_mode) {
++			if (dev_priv->chipset >= 0x40) {
++				*align = 65536;
++				*size = roundup(*size, 64 * tile_mode);
++
++			} else if (dev_priv->chipset >= 0x30) {
++				*align = 32768;
++				*size = roundup(*size, 64 * tile_mode);
++
++			} else if (dev_priv->chipset >= 0x20) {
++				*align = 16384;
++				*size = roundup(*size, 64 * tile_mode);
++
++			} else if (dev_priv->chipset >= 0x10) {
++				*align = 16384;
++				*size = roundup(*size, 32 * tile_mode);
++			}
++		}
++	}
++
++	/* ALIGN works only on powers of two. */
++	*size = roundup(*size, PAGE_SIZE);
++
++	if (dev_priv->card_type == NV_50) {
++		*size = roundup(*size, 65536);
++		*align = max(65536, *align);
++	}
++}
++
++int
++nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
++	       int size, int align, uint32_t flags, uint32_t tile_mode,
++	       uint32_t tile_flags, bool no_vm, bool mappable,
++	       struct nouveau_bo **pnvbo)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_bo *nvbo;
++	int ret = 0;
++
++	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
++	if (!nvbo)
++		return -ENOMEM;
++	INIT_LIST_HEAD(&nvbo->head);
++	INIT_LIST_HEAD(&nvbo->entry);
++	nvbo->mappable = mappable;
++	nvbo->no_vm = no_vm;
++	nvbo->tile_mode = tile_mode;
++	nvbo->tile_flags = tile_flags;
++
++	nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
++	align >>= PAGE_SHIFT;
++
++	nvbo->placement.fpfn = 0;
++	nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
++	nouveau_bo_placement_set(nvbo, flags);
++
++	nvbo->channel = chan;
++	ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
++			  ttm_bo_type_device, &nvbo->placement, align, 0,
++			  false, NULL, size, nouveau_bo_del_ttm);
++	nvbo->channel = NULL;
++	if (ret) {
++		/* ttm will call nouveau_bo_del_ttm if it fails.. */
++		return ret;
++	}
++
++	spin_lock(&dev_priv->ttm.bo_list_lock);
++	list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
++	spin_unlock(&dev_priv->ttm.bo_list_lock);
++	*pnvbo = nvbo;
++	return 0;
++}
++
++void
++nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
++{
++	int n = 0;
++
++	if (memtype & TTM_PL_FLAG_VRAM)
++		nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
++	if (memtype & TTM_PL_FLAG_TT)
++		nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
++	if (memtype & TTM_PL_FLAG_SYSTEM)
++		nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
++	nvbo->placement.placement = nvbo->placements;
++	nvbo->placement.busy_placement = nvbo->placements;
++	nvbo->placement.num_placement = n;
++	nvbo->placement.num_busy_placement = n;
++
++	if (nvbo->pin_refcnt) {
++		while (n--)
++			nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
++	}
++}
++
++int
++nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
++{
++	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
++	struct ttm_buffer_object *bo = &nvbo->bo;
++	int ret, i;
++
++	if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
++		NV_ERROR(nouveau_bdev(bo->bdev)->dev,
++			 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
++			 1 << bo->mem.mem_type, memtype);
++		return -EINVAL;
++	}
++
++	if (nvbo->pin_refcnt++)
++		return 0;
++
++	ret = ttm_bo_reserve(bo, false, false, false, 0);
++	if (ret)
++		goto out;
++
++	nouveau_bo_placement_set(nvbo, memtype);
++	for (i = 0; i < nvbo->placement.num_placement; i++)
++		nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
++
++	ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
++	if (ret == 0) {
++		switch (bo->mem.mem_type) {
++		case TTM_PL_VRAM:
++			dev_priv->fb_aper_free -= bo->mem.size;
++			break;
++		case TTM_PL_TT:
++			dev_priv->gart_info.aper_free -= bo->mem.size;
++			break;
++		default:
++			break;
++		}
++	}
++	ttm_bo_unreserve(bo);
++out:
++	if (unlikely(ret))
++		nvbo->pin_refcnt--;
++	return ret;
++}
++
++int
++nouveau_bo_unpin(struct nouveau_bo *nvbo)
++{
++	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
++	struct ttm_buffer_object *bo = &nvbo->bo;
++	int ret, i;
++
++	if (--nvbo->pin_refcnt)
++		return 0;
++
++	ret = ttm_bo_reserve(bo, false, false, false, 0);
++	if (ret)
++		return ret;
++
++	for (i = 0; i < nvbo->placement.num_placement; i++)
++		nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
++
++	ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
++	if (ret == 0) {
++		switch (bo->mem.mem_type) {
++		case TTM_PL_VRAM:
++			dev_priv->fb_aper_free += bo->mem.size;
++			break;
++		case TTM_PL_TT:
++			dev_priv->gart_info.aper_free += bo->mem.size;
++			break;
++		default:
++			break;
++		}
++	}
++
++	ttm_bo_unreserve(bo);
++	return ret;
++}
++
++int
++nouveau_bo_map(struct nouveau_bo *nvbo)
++{
++	int ret;
++
++	ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
++	if (ret)
++		return ret;
++
++	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
++	ttm_bo_unreserve(&nvbo->bo);
++	return ret;
++}
++
++void
++nouveau_bo_unmap(struct nouveau_bo *nvbo)
++{
++	ttm_bo_kunmap(&nvbo->kmap);
++}
++
++u16
++nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
++{
++	bool is_iomem;
++	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
++	mem = &mem[index];
++	if (is_iomem)
++		return ioread16_native((void __force __iomem *)mem);
++	else
++		return *mem;
++}
++
++void
++nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
++{
++	bool is_iomem;
++	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
++	mem = &mem[index];
++	if (is_iomem)
++		iowrite16_native(val, (void __force __iomem *)mem);
++	else
++		*mem = val;
++}
++
++u32
++nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
++{
++	bool is_iomem;
++	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
++	mem = &mem[index];
++	if (is_iomem)
++		return ioread32_native((void __force __iomem *)mem);
++	else
++		return *mem;
++}
++
++void
++nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
++{
++	bool is_iomem;
++	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
++	mem = &mem[index];
++	if (is_iomem)
++		iowrite32_native(val, (void __force __iomem *)mem);
++	else
++		*mem = val;
++}
++
++static struct ttm_backend *
++nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
++{
++	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
++	struct drm_device *dev = dev_priv->dev;
++
++	switch (dev_priv->gart_info.type) {
++#if __OS_HAS_AGP
++	case NOUVEAU_GART_AGP:
++		return ttm_agp_backend_init(bdev, dev->agp->bridge);
++#endif
++	case NOUVEAU_GART_SGDMA:
++		return nouveau_sgdma_init_ttm(dev);
++	default:
++		NV_ERROR(dev, "Unknown GART type %d\n",
++			 dev_priv->gart_info.type);
++		break;
++	}
++
++	return NULL;
++}
++
++static int
++nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
++{
++	/* We'll do this from user space. */
++	return 0;
++}
++
++static int
++nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
++			 struct ttm_mem_type_manager *man)
++{
++	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
++	struct drm_device *dev = dev_priv->dev;
++
++	switch (type) {
++	case TTM_PL_SYSTEM:
++		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
++		man->available_caching = TTM_PL_MASK_CACHING;
++		man->default_caching = TTM_PL_FLAG_CACHED;
++		break;
++	case TTM_PL_VRAM:
++		man->flags = TTM_MEMTYPE_FLAG_FIXED |
++			     TTM_MEMTYPE_FLAG_MAPPABLE |
++			     TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++		man->available_caching = TTM_PL_FLAG_UNCACHED |
++					 TTM_PL_FLAG_WC;
++		man->default_caching = TTM_PL_FLAG_WC;
++
++		man->io_addr = NULL;
++		man->io_offset = drm_get_resource_start(dev, 1);
++		man->io_size = drm_get_resource_len(dev, 1);
++		if (man->io_size > nouveau_mem_fb_amount(dev))
++			man->io_size = nouveau_mem_fb_amount(dev);
++
++		man->gpu_offset = dev_priv->vm_vram_base;
++		break;
++	case TTM_PL_TT:
++		switch (dev_priv->gart_info.type) {
++		case NOUVEAU_GART_AGP:
++			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++				     TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
++			man->available_caching = TTM_PL_FLAG_UNCACHED;
++			man->default_caching = TTM_PL_FLAG_UNCACHED;
++			break;
++		case NOUVEAU_GART_SGDMA:
++			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
++				     TTM_MEMTYPE_FLAG_CMA;
++			man->available_caching = TTM_PL_MASK_CACHING;
++			man->default_caching = TTM_PL_FLAG_CACHED;
++			break;
++		default:
++			NV_ERROR(dev, "Unknown GART type: %d\n",
++				 dev_priv->gart_info.type);
++			return -EINVAL;
++		}
++
++		man->io_offset  = dev_priv->gart_info.aper_base;
++		man->io_size    = dev_priv->gart_info.aper_size;
++		man->io_addr   = NULL;
++		man->gpu_offset = dev_priv->vm_gart_base;
++		break;
++	default:
++		NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
++		return -EINVAL;
++	}
++	return 0;
++}
++
++static void
++nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
++{
++	struct nouveau_bo *nvbo = nouveau_bo(bo);
++
++	switch (bo->mem.mem_type) {
++	case TTM_PL_VRAM:
++		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
++					 TTM_PL_FLAG_SYSTEM);
++		break;
++	default:
++		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
++		break;
++	}
++
++	*pl = nvbo->placement;
++}
++
++
++/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
++ * TTM_PL_{VRAM,TT} directly.
++ */
++
++static int
++nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
++			      struct nouveau_bo *nvbo, bool evict, bool no_wait,
++			      struct ttm_mem_reg *new_mem)
++{
++	struct nouveau_fence *fence = NULL;
++	int ret;
++
++	ret = nouveau_fence_new(chan, &fence, true);
++	if (ret)
++		return ret;
++
++	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
++					evict, no_wait, new_mem);
++	if (nvbo->channel && nvbo->channel != chan)
++		ret = nouveau_fence_wait(fence, NULL, false, false);
++	nouveau_fence_unref((void *)&fence);
++	return ret;
++}
++
++static inline uint32_t
++nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
++		      struct ttm_mem_reg *mem)
++{
++	if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
++		if (mem->mem_type == TTM_PL_TT)
++			return NvDmaGART;
++		return NvDmaVRAM;
++	}
++
++	if (mem->mem_type == TTM_PL_TT)
++		return chan->gart_handle;
++	return chan->vram_handle;
++}
++
++static int
++nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
++		     int no_wait, struct ttm_mem_reg *new_mem)
++{
++	struct nouveau_bo *nvbo = nouveau_bo(bo);
++	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
++	struct ttm_mem_reg *old_mem = &bo->mem;
++	struct nouveau_channel *chan;
++	uint64_t src_offset, dst_offset;
++	uint32_t page_count;
++	int ret;
++
++	chan = nvbo->channel;
++	if (!chan || nvbo->tile_flags || nvbo->no_vm)
++		chan = dev_priv->channel;
++
++	src_offset = old_mem->mm_node->start << PAGE_SHIFT;
++	dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
++	if (chan != dev_priv->channel) {
++		if (old_mem->mem_type == TTM_PL_TT)
++			src_offset += dev_priv->vm_gart_base;
++		else
++			src_offset += dev_priv->vm_vram_base;
++
++		if (new_mem->mem_type == TTM_PL_TT)
++			dst_offset += dev_priv->vm_gart_base;
++		else
++			dst_offset += dev_priv->vm_vram_base;
++	}
++
++	ret = RING_SPACE(chan, 3);
++	if (ret)
++		return ret;
++	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
++	OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
++	OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
++
++	if (dev_priv->card_type >= NV_50) {
++		ret = RING_SPACE(chan, 4);
++		if (ret)
++			return ret;
++		BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
++		OUT_RING(chan, 1);
++		BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
++		OUT_RING(chan, 1);
++	}
++
++	page_count = new_mem->num_pages;
++	while (page_count) {
++		int line_count = (page_count > 2047) ? 2047 : page_count;
++
++		if (dev_priv->card_type >= NV_50) {
++			ret = RING_SPACE(chan, 3);
++			if (ret)
++				return ret;
++			BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
++			OUT_RING(chan, upper_32_bits(src_offset));
++			OUT_RING(chan, upper_32_bits(dst_offset));
++		}
++		ret = RING_SPACE(chan, 11);
++		if (ret)
++			return ret;
++		BEGIN_RING(chan, NvSubM2MF,
++				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
++		OUT_RING(chan, lower_32_bits(src_offset));
++		OUT_RING(chan, lower_32_bits(dst_offset));
++		OUT_RING(chan, PAGE_SIZE); /* src_pitch */
++		OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
++		OUT_RING(chan, PAGE_SIZE); /* line_length */
++		OUT_RING(chan, line_count);
++		OUT_RING(chan, (1<<8)|(1<<0));
++		OUT_RING(chan, 0);
++		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
++		OUT_RING(chan, 0);
++
++		page_count -= line_count;
++		src_offset += (PAGE_SIZE * line_count);
++		dst_offset += (PAGE_SIZE * line_count);
++	}
++
++	return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
++}
++
++static int
++nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
++		      bool no_wait, struct ttm_mem_reg *new_mem)
++{
++	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
++	struct ttm_placement placement;
++	struct ttm_mem_reg tmp_mem;
++	int ret;
++
++	placement.fpfn = placement.lpfn = 0;
++	placement.num_placement = placement.num_busy_placement = 1;
++	placement.placement = placement.busy_placement = &placement_memtype;
++
++	tmp_mem = *new_mem;
++	tmp_mem.mm_node = NULL;
++	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
++	if (ret)
++		return ret;
++
++	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
++	if (ret)
++		goto out;
++
++	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
++	if (ret)
++		goto out;
++
++	ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
++out:
++	if (tmp_mem.mm_node) {
++		spin_lock(&bo->bdev->glob->lru_lock);
++		drm_mm_put_block(tmp_mem.mm_node);
++		spin_unlock(&bo->bdev->glob->lru_lock);
++	}
++
++	return ret;
++}
++
++static int
++nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
++		      bool no_wait, struct ttm_mem_reg *new_mem)
++{
++	u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
++	struct ttm_placement placement;
++	struct ttm_mem_reg tmp_mem;
++	int ret;
++
++	placement.fpfn = placement.lpfn = 0;
++	placement.num_placement = placement.num_busy_placement = 1;
++	placement.placement = placement.busy_placement = &placement_memtype;
++
++	tmp_mem = *new_mem;
++	tmp_mem.mm_node = NULL;
++	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
++	if (ret)
++		return ret;
++
++	ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
++	if (ret)
++		goto out;
++
++	ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
++	if (ret)
++		goto out;
++
++out:
++	if (tmp_mem.mm_node) {
++		spin_lock(&bo->bdev->glob->lru_lock);
++		drm_mm_put_block(tmp_mem.mm_node);
++		spin_unlock(&bo->bdev->glob->lru_lock);
++	}
++
++	return ret;
++}
++
++static int
++nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
++		   struct nouveau_tile_reg **new_tile)
++{
++	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
++	struct drm_device *dev = dev_priv->dev;
++	struct nouveau_bo *nvbo = nouveau_bo(bo);
++	uint64_t offset;
++	int ret;
++
++	if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
++		/* Nothing to do. */
++		*new_tile = NULL;
++		return 0;
++	}
++
++	offset = new_mem->mm_node->start << PAGE_SHIFT;
++
++	if (dev_priv->card_type == NV_50) {
++		ret = nv50_mem_vm_bind_linear(dev,
++					      offset + dev_priv->vm_vram_base,
++					      new_mem->size, nvbo->tile_flags,
++					      offset);
++		if (ret)
++			return ret;
++
++	} else if (dev_priv->card_type >= NV_10) {
++		*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
++						nvbo->tile_mode);
++	}
++
++	return 0;
++}
++
++static void
++nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
++		      struct nouveau_tile_reg *new_tile,
++		      struct nouveau_tile_reg **old_tile)
++{
++	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
++	struct drm_device *dev = dev_priv->dev;
++
++	if (dev_priv->card_type >= NV_10 &&
++	    dev_priv->card_type < NV_50) {
++		if (*old_tile)
++			nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
++
++		*old_tile = new_tile;
++	}
++}
++
++static int
++nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
++		bool no_wait, struct ttm_mem_reg *new_mem)
++{
++	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
++	struct nouveau_bo *nvbo = nouveau_bo(bo);
++	struct ttm_mem_reg *old_mem = &bo->mem;
++	struct nouveau_tile_reg *new_tile = NULL;
++	int ret = 0;
++
++	ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
++	if (ret)
++		return ret;
++
++	/* Software copy if the card isn't up and running yet. */
++	if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
++	    !dev_priv->channel) {
++		ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++		goto out;
++	}
++
++	/* Fake bo copy. */
++	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
++		BUG_ON(bo->mem.mm_node != NULL);
++		bo->mem = *new_mem;
++		new_mem->mm_node = NULL;
++		goto out;
++	}
++
++	/* Hardware assisted copy. */
++	if (new_mem->mem_type == TTM_PL_SYSTEM)
++		ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
++	else if (old_mem->mem_type == TTM_PL_SYSTEM)
++		ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
++	else
++		ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
++
++	if (!ret)
++		goto out;
++
++	/* Fallback to software copy. */
++	ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
++
++out:
++	if (ret)
++		nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
++	else
++		nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
++
++	return ret;
++}
++
++static int
++nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
++{
++	return 0;
++}
++
++struct ttm_bo_driver nouveau_bo_driver = {
++	.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
++	.invalidate_caches = nouveau_bo_invalidate_caches,
++	.init_mem_type = nouveau_bo_init_mem_type,
++	.evict_flags = nouveau_bo_evict_flags,
++	.move = nouveau_bo_move,
++	.verify_access = nouveau_bo_verify_access,
++	.sync_obj_signaled = nouveau_fence_signalled,
++	.sync_obj_wait = nouveau_fence_wait,
++	.sync_obj_flush = nouveau_fence_flush,
++	.sync_obj_unref = nouveau_fence_unref,
++	.sync_obj_ref = nouveau_fence_ref,
++};
++
+diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
+new file mode 100644
+index 0000000..ee2b845
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
+@@ -0,0 +1,478 @@
++/*
++ * Copyright 1993-2003 NVIDIA, Corporation
++ * Copyright 2007-2009 Stuart Bennett
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
++ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_hw.h"
++
++/****************************************************************************\
++*                                                                            *
++* The video arbitration routines calculate some "magic" numbers.  Fixes      *
++* the snow seen when accessing the framebuffer without it.                   *
++* It just works (I hope).                                                    *
++*                                                                            *
++\****************************************************************************/
++
++struct nv_fifo_info {
++	int lwm;
++	int burst;
++};
++
++struct nv_sim_state {
++	int pclk_khz;
++	int mclk_khz;
++	int nvclk_khz;
++	int bpp;
++	int mem_page_miss;
++	int mem_latency;
++	int memory_type;
++	int memory_width;
++	int two_heads;
++};
++
++static void
++nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
++{
++	int pagemiss, cas, width, bpp;
++	int nvclks, mclks, pclks, crtpagemiss;
++	int found, mclk_extra, mclk_loop, cbs, m1, p1;
++	int mclk_freq, pclk_freq, nvclk_freq;
++	int us_m, us_n, us_p, crtc_drain_rate;
++	int cpm_us, us_crt, clwm;
++
++	pclk_freq = arb->pclk_khz;
++	mclk_freq = arb->mclk_khz;
++	nvclk_freq = arb->nvclk_khz;
++	pagemiss = arb->mem_page_miss;
++	cas = arb->mem_latency;
++	width = arb->memory_width >> 6;
++	bpp = arb->bpp;
++	cbs = 128;
++
++	pclks = 2;
++	nvclks = 10;
++	mclks = 13 + cas;
++	mclk_extra = 3;
++	found = 0;
++
++	while (!found) {
++		found = 1;
++
++		mclk_loop = mclks + mclk_extra;
++		us_m = mclk_loop * 1000 * 1000 / mclk_freq;
++		us_n = nvclks * 1000 * 1000 / nvclk_freq;
++		us_p = nvclks * 1000 * 1000 / pclk_freq;
++
++		crtc_drain_rate = pclk_freq * bpp / 8;
++		crtpagemiss = 2;
++		crtpagemiss += 1;
++		cpm_us = crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
++		us_crt = cpm_us + us_m + us_n + us_p;
++		clwm = us_crt * crtc_drain_rate / (1000 * 1000);
++		clwm++;
++
++		m1 = clwm + cbs - 512;
++		p1 = m1 * pclk_freq / mclk_freq;
++		p1 = p1 * bpp / 8;
++		if ((p1 < m1 && m1 > 0) || clwm > 519) {
++			found = !mclk_extra;
++			mclk_extra--;
++		}
++		if (clwm < 384)
++			clwm = 384;
++
++		fifo->lwm = clwm;
++		fifo->burst = cbs;
++	}
++}
++
++static void
++nv10_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
++{
++	int fill_rate, drain_rate;
++	int pclks, nvclks, mclks, xclks;
++	int pclk_freq, nvclk_freq, mclk_freq;
++	int fill_lat, extra_lat;
++	int max_burst_o, max_burst_l;
++	int fifo_len, min_lwm, max_lwm;
++	const int burst_lat = 80; /* Maximum allowable latency due
++				   * to the CRTC FIFO burst. (ns) */
++
++	pclk_freq = arb->pclk_khz;
++	nvclk_freq = arb->nvclk_khz;
++	mclk_freq = arb->mclk_khz;
++
++	fill_rate = mclk_freq * arb->memory_width / 8; /* kB/s */
++	drain_rate = pclk_freq * arb->bpp / 8; /* kB/s */
++
++	fifo_len = arb->two_heads ? 1536 : 1024; /* B */
++
++	/* Fixed FIFO refill latency. */
++
++	pclks = 4;	/* lwm detect. */
++
++	nvclks = 3	/* lwm -> sync. */
++		+ 2	/* fbi bus cycles (1 req + 1 busy) */
++		+ 1	/* 2 edge sync.  may be very close to edge so
++			 * just put one. */
++		+ 1	/* fbi_d_rdv_n */
++		+ 1	/* Fbi_d_rdata */
++		+ 1;	/* crtfifo load */
++
++	mclks = 1	/* 2 edge sync.  may be very close to edge so
++			 * just put one. */
++		+ 1	/* arb_hp_req */
++		+ 5	/* tiling pipeline */
++		+ 2	/* latency fifo */
++		+ 2	/* memory request to fbio block */
++		+ 7;	/* data returned from fbio block */
++
++	/* Need to accumulate 256 bits for read */
++	mclks += (arb->memory_type == 0 ? 2 : 1)
++		* arb->memory_width / 32;
++
++	fill_lat = mclks * 1000 * 1000 / mclk_freq   /* minimum mclk latency */
++		+ nvclks * 1000 * 1000 / nvclk_freq  /* nvclk latency */
++		+ pclks * 1000 * 1000 / pclk_freq;   /* pclk latency */
++
++	/* Conditional FIFO refill latency. */
++
++	xclks = 2 * arb->mem_page_miss + mclks /* Extra latency due to
++						* the overlay. */
++		+ 2 * arb->mem_page_miss       /* Extra pagemiss latency. */
++		+ (arb->bpp == 32 ? 8 : 4);    /* Margin of error. */
++
++	extra_lat = xclks * 1000 * 1000 / mclk_freq;
++
++	if (arb->two_heads)
++		/* Account for another CRTC. */
++		extra_lat += fill_lat + extra_lat + burst_lat;
++
++	/* FIFO burst */
++
++	/* Max burst not leading to overflows. */
++	max_burst_o = (1 + fifo_len - extra_lat * drain_rate / (1000 * 1000))
++		* (fill_rate / 1000) / ((fill_rate - drain_rate) / 1000);
++	fifo->burst = min(max_burst_o, 1024);
++
++	/* Max burst value with an acceptable latency. */
++	max_burst_l = burst_lat * fill_rate / (1000 * 1000);
++	fifo->burst = min(max_burst_l, fifo->burst);
++
++	fifo->burst = rounddown_pow_of_two(fifo->burst);
++
++	/* FIFO low watermark */
++
++	min_lwm = (fill_lat + extra_lat) * drain_rate / (1000 * 1000) + 1;
++	max_lwm = fifo_len - fifo->burst
++		+ fill_lat * drain_rate / (1000 * 1000)
++		+ fifo->burst * drain_rate / fill_rate;
++
++	fifo->lwm = min_lwm + 10 * (max_lwm - min_lwm) / 100; /* Empirical. */
++}
++
++static void
++nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
++		int *burst, int *lwm)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv_fifo_info fifo_data;
++	struct nv_sim_state sim_data;
++	int MClk = nouveau_hw_get_clock(dev, MPLL);
++	int NVClk = nouveau_hw_get_clock(dev, NVPLL);
++	uint32_t cfg1 = nvReadFB(dev, NV_PFB_CFG1);
++
++	sim_data.pclk_khz = VClk;
++	sim_data.mclk_khz = MClk;
++	sim_data.nvclk_khz = NVClk;
++	sim_data.bpp = bpp;
++	sim_data.two_heads = nv_two_heads(dev);
++	if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
++	    (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
++		uint32_t type;
++
++		pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
++
++		sim_data.memory_type = (type >> 12) & 1;
++		sim_data.memory_width = 64;
++		sim_data.mem_latency = 3;
++		sim_data.mem_page_miss = 10;
++	} else {
++		sim_data.memory_type = nvReadFB(dev, NV_PFB_CFG0) & 0x1;
++		sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
++		sim_data.mem_latency = cfg1 & 0xf;
++		sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
++	}
++
++	if (dev_priv->card_type == NV_04)
++		nv04_calc_arb(&fifo_data, &sim_data);
++	else
++		nv10_calc_arb(&fifo_data, &sim_data);
++
++	*burst = ilog2(fifo_data.burst >> 4);
++	*lwm = fifo_data.lwm >> 3;
++}
++
++static void
++nv30_update_arb(int *burst, int *lwm)
++{
++	unsigned int fifo_size, burst_size, graphics_lwm;
++
++	fifo_size = 2048;
++	burst_size = 512;
++	graphics_lwm = fifo_size - burst_size;
++
++	*burst = ilog2(burst_size >> 5);
++	*lwm = graphics_lwm >> 3;
++}
++
++void
++nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->card_type < NV_30)
++		nv04_update_arb(dev, vclk, bpp, burst, lwm);
++	else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
++		 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
++		*burst = 128;
++		*lwm = 0x0480;
++	} else
++		nv30_update_arb(burst, lwm);
++}
++
++static int
++getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
++	      struct nouveau_pll_vals *bestpv)
++{
++	/* Find M, N and P for a single stage PLL
++	 *
++	 * Note that some bioses (NV3x) have lookup tables of precomputed MNP
++	 * values, but we're too lazy to use those atm
++	 *
++	 * "clk" parameter in kHz
++	 * returns calculated clock
++	 */
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int cv = dev_priv->vbios->chip_version;
++	int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
++	int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
++	int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
++	int minU = pll_lim->vco1.min_inputfreq;
++	int maxU = pll_lim->vco1.max_inputfreq;
++	int minP = pll_lim->max_p ? pll_lim->min_p : 0;
++	int maxP = pll_lim->max_p ? pll_lim->max_p : pll_lim->max_usable_log2p;
++	int crystal = pll_lim->refclk;
++	int M, N, thisP, P;
++	int clkP, calcclk;
++	int delta, bestdelta = INT_MAX;
++	int bestclk = 0;
++
++	/* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
++	/* possibly correlated with introduction of 27MHz crystal */
++	if (dev_priv->card_type < NV_50) {
++		if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
++			if (clk > 250000)
++				maxM = 6;
++			if (clk > 340000)
++				maxM = 2;
++		} else if (cv < 0x40) {
++			if (clk > 150000)
++				maxM = 6;
++			if (clk > 200000)
++				maxM = 4;
++			if (clk > 340000)
++				maxM = 2;
++		}
++	}
++
++	P = pll_lim->max_p ? maxP : (1 << maxP);
++	if ((clk * P) < minvco) {
++		minvco = clk * maxP;
++		maxvco = minvco * 2;
++	}
++
++	if (clk + clk/200 > maxvco)	/* +0.5% */
++		maxvco = clk + clk/200;
++
++	/* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
++	for (thisP = minP; thisP <= maxP; thisP++) {
++		P = pll_lim->max_p ? thisP : (1 << thisP);
++		clkP = clk * P;
++
++		if (clkP < minvco)
++			continue;
++		if (clkP > maxvco)
++			return bestclk;
++
++		for (M = minM; M <= maxM; M++) {
++			if (crystal/M < minU)
++				return bestclk;
++			if (crystal/M > maxU)
++				continue;
++
++			/* add crystal/2 to round better */
++			N = (clkP * M + crystal/2) / crystal;
++
++			if (N < minN)
++				continue;
++			if (N > maxN)
++				break;
++
++			/* more rounding additions */
++			calcclk = ((N * crystal + P/2) / P + M/2) / M;
++			delta = abs(calcclk - clk);
++			/* we do an exhaustive search rather than terminating
++			 * on an optimality condition...
++			 */
++			if (delta < bestdelta) {
++				bestdelta = delta;
++				bestclk = calcclk;
++				bestpv->N1 = N;
++				bestpv->M1 = M;
++				bestpv->log2P = thisP;
++				if (delta == 0)	/* except this one */
++					return bestclk;
++			}
++		}
++	}
++
++	return bestclk;
++}
++
++static int
++getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
++	      struct nouveau_pll_vals *bestpv)
++{
++	/* Find M, N and P for a two stage PLL
++	 *
++	 * Note that some bioses (NV30+) have lookup tables of precomputed MNP
++	 * values, but we're too lazy to use those atm
++	 *
++	 * "clk" parameter in kHz
++	 * returns calculated clock
++	 */
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int chip_version = dev_priv->vbios->chip_version;
++	int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
++	int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
++	int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
++	int maxU1 = pll_lim->vco1.max_inputfreq, maxU2 = pll_lim->vco2.max_inputfreq;
++	int minM1 = pll_lim->vco1.min_m, maxM1 = pll_lim->vco1.max_m;
++	int minN1 = pll_lim->vco1.min_n, maxN1 = pll_lim->vco1.max_n;
++	int minM2 = pll_lim->vco2.min_m, maxM2 = pll_lim->vco2.max_m;
++	int minN2 = pll_lim->vco2.min_n, maxN2 = pll_lim->vco2.max_n;
++	int maxlog2P = pll_lim->max_usable_log2p;
++	int crystal = pll_lim->refclk;
++	bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
++	int M1, N1, M2, N2, log2P;
++	int clkP, calcclk1, calcclk2, calcclkout;
++	int delta, bestdelta = INT_MAX;
++	int bestclk = 0;
++
++	int vco2 = (maxvco2 - maxvco2/200) / 2;
++	for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
++		;
++	clkP = clk << log2P;
++
++	if (maxvco2 < clk + clk/200)	/* +0.5% */
++		maxvco2 = clk + clk/200;
++
++	for (M1 = minM1; M1 <= maxM1; M1++) {
++		if (crystal/M1 < minU1)
++			return bestclk;
++		if (crystal/M1 > maxU1)
++			continue;
++
++		for (N1 = minN1; N1 <= maxN1; N1++) {
++			calcclk1 = crystal * N1 / M1;
++			if (calcclk1 < minvco1)
++				continue;
++			if (calcclk1 > maxvco1)
++				break;
++
++			for (M2 = minM2; M2 <= maxM2; M2++) {
++				if (calcclk1/M2 < minU2)
++					break;
++				if (calcclk1/M2 > maxU2)
++					continue;
++
++				/* add calcclk1/2 to round better */
++				N2 = (clkP * M2 + calcclk1/2) / calcclk1;
++				if (N2 < minN2)
++					continue;
++				if (N2 > maxN2)
++					break;
++
++				if (!fixedgain2) {
++					if (chip_version < 0x60)
++						if (N2/M2 < 4 || N2/M2 > 10)
++							continue;
++
++					calcclk2 = calcclk1 * N2 / M2;
++					if (calcclk2 < minvco2)
++						break;
++					if (calcclk2 > maxvco2)
++						continue;
++				} else
++					calcclk2 = calcclk1;
++
++				calcclkout = calcclk2 >> log2P;
++				delta = abs(calcclkout - clk);
++				/* we do an exhaustive search rather than terminating
++				 * on an optimality condition...
++				 */
++				if (delta < bestdelta) {
++					bestdelta = delta;
++					bestclk = calcclkout;
++					bestpv->N1 = N1;
++					bestpv->M1 = M1;
++					bestpv->N2 = N2;
++					bestpv->M2 = M2;
++					bestpv->log2P = log2P;
++					if (delta == 0)	/* except this one */
++						return bestclk;
++				}
++			}
++		}
++	}
++
++	return bestclk;
++}
++
++int
++nouveau_calc_pll_mnp(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
++		     struct nouveau_pll_vals *pv)
++{
++	int outclk;
++
++	if (!pll_lim->vco2.maxfreq)
++		outclk = getMNP_single(dev, pll_lim, clk, pv);
++	else
++		outclk = getMNP_double(dev, pll_lim, clk, pv);
++
++	if (!outclk)
++		NV_ERROR(dev, "Could not find a compatible set of PLL values\n");
++
++	return outclk;
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
+new file mode 100644
+index 0000000..2281f99
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
+@@ -0,0 +1,430 @@
++/*
++ * Copyright 2005-2006 Stephane Marchesin
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++#include "nouveau_dma.h"
++
++static int
++nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_bo *pb = chan->pushbuf_bo;
++	struct nouveau_gpuobj *pushbuf = NULL;
++	uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
++	int ret;
++
++	if (pb->bo.mem.mem_type == TTM_PL_TT) {
++		ret = nouveau_gpuobj_gart_dma_new(chan, 0,
++						  dev_priv->gart_info.aper_size,
++						  NV_DMA_ACCESS_RO, &pushbuf,
++						  NULL);
++		chan->pushbuf_base = start;
++	} else
++	if (dev_priv->card_type != NV_04) {
++		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
++					     dev_priv->fb_available_size,
++					     NV_DMA_ACCESS_RO,
++					     NV_DMA_TARGET_VIDMEM, &pushbuf);
++		chan->pushbuf_base = start;
++	} else {
++		/* NV04 cmdbuf hack, from original ddx.. not sure of it's
++		 * exact reason for existing :)  PCI access to cmdbuf in
++		 * VRAM.
++		 */
++		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++					     drm_get_resource_start(dev, 1),
++					     dev_priv->fb_available_size,
++					     NV_DMA_ACCESS_RO,
++					     NV_DMA_TARGET_PCI, &pushbuf);
++		chan->pushbuf_base = start;
++	}
++
++	ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
++	if (ret) {
++		NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
++		if (pushbuf != dev_priv->gart_info.sg_ctxdma)
++			nouveau_gpuobj_del(dev, &pushbuf);
++		return ret;
++	}
++
++	return 0;
++}
++
++static struct nouveau_bo *
++nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
++{
++	struct nouveau_bo *pushbuf = NULL;
++	int location, ret;
++
++	if (nouveau_vram_pushbuf)
++		location = TTM_PL_FLAG_VRAM;
++	else
++		location = TTM_PL_FLAG_TT;
++
++	ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
++			     true, &pushbuf);
++	if (ret) {
++		NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
++		return NULL;
++	}
++
++	ret = nouveau_bo_pin(pushbuf, location);
++	if (ret) {
++		NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
++		nouveau_bo_ref(NULL, &pushbuf);
++		return NULL;
++	}
++
++	return pushbuf;
++}
++
++/* allocates and initializes a fifo for user space consumption */
++int
++nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
++		      struct drm_file *file_priv,
++		      uint32_t vram_handle, uint32_t tt_handle)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	struct nouveau_channel *chan;
++	int channel, user;
++	int ret;
++
++	/*
++	 * Alright, here is the full story
++	 * Nvidia cards have multiple hw fifo contexts (praise them for that,
++	 * no complicated crash-prone context switches)
++	 * We allocate a new context for each app and let it write to it
++	 * directly (woo, full userspace command submission !)
++	 * When there are no more contexts, you lost
++	 */
++	for (channel = 0; channel < pfifo->channels; channel++) {
++		if (dev_priv->fifos[channel] == NULL)
++			break;
++	}
++
++	/* no more fifos. you lost. */
++	if (channel == pfifo->channels)
++		return -EINVAL;
++
++	dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
++					   GFP_KERNEL);
++	if (!dev_priv->fifos[channel])
++		return -ENOMEM;
++	dev_priv->fifo_alloc_count++;
++	chan = dev_priv->fifos[channel];
++	INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
++	INIT_LIST_HEAD(&chan->fence.pending);
++	chan->dev = dev;
++	chan->id = channel;
++	chan->file_priv = file_priv;
++	chan->vram_handle = vram_handle;
++	chan->gart_handle = tt_handle;
++
++	NV_INFO(dev, "Allocating FIFO number %d\n", channel);
++
++	/* Allocate DMA push buffer */
++	chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
++	if (!chan->pushbuf_bo) {
++		ret = -ENOMEM;
++		NV_ERROR(dev, "pushbuf %d\n", ret);
++		nouveau_channel_free(chan);
++		return ret;
++	}
++
++	nouveau_dma_pre_init(chan);
++
++	/* Locate channel's user control regs */
++	if (dev_priv->card_type < NV_40)
++		user = NV03_USER(channel);
++	else
++	if (dev_priv->card_type < NV_50)
++		user = NV40_USER(channel);
++	else
++		user = NV50_USER(channel);
++
++	chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
++								PAGE_SIZE);
++	if (!chan->user) {
++		NV_ERROR(dev, "ioremap of regs failed.\n");
++		nouveau_channel_free(chan);
++		return -ENOMEM;
++	}
++	chan->user_put = 0x40;
++	chan->user_get = 0x44;
++
++	/* Allocate space for per-channel fixed notifier memory */
++	ret = nouveau_notifier_init_channel(chan);
++	if (ret) {
++		NV_ERROR(dev, "ntfy %d\n", ret);
++		nouveau_channel_free(chan);
++		return ret;
++	}
++
++	/* Setup channel's default objects */
++	ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
++	if (ret) {
++		NV_ERROR(dev, "gpuobj %d\n", ret);
++		nouveau_channel_free(chan);
++		return ret;
++	}
++
++	/* Create a dma object for the push buffer */
++	ret = nouveau_channel_pushbuf_ctxdma_init(chan);
++	if (ret) {
++		NV_ERROR(dev, "pbctxdma %d\n", ret);
++		nouveau_channel_free(chan);
++		return ret;
++	}
++
++	/* disable the fifo caches */
++	pfifo->reassign(dev, false);
++
++	/* Create a graphics context for new channel */
++	ret = pgraph->create_context(chan);
++	if (ret) {
++		nouveau_channel_free(chan);
++		return ret;
++	}
++
++	/* Construct inital RAMFC for new channel */
++	ret = pfifo->create_context(chan);
++	if (ret) {
++		nouveau_channel_free(chan);
++		return ret;
++	}
++
++	pfifo->reassign(dev, true);
++
++	ret = nouveau_dma_init(chan);
++	if (!ret)
++		ret = nouveau_fence_init(chan);
++	if (ret) {
++		nouveau_channel_free(chan);
++		return ret;
++	}
++
++	nouveau_debugfs_channel_init(chan);
++
++	NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
++	*chan_ret = chan;
++	return 0;
++}
++
++/* stops a fifo */
++void
++nouveau_channel_free(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	unsigned long flags;
++	int ret;
++
++	NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
++
++	nouveau_debugfs_channel_fini(chan);
++
++	/* Give outstanding push buffers a chance to complete */
++	spin_lock_irqsave(&chan->fence.lock, flags);
++	nouveau_fence_update(chan);
++	spin_unlock_irqrestore(&chan->fence.lock, flags);
++	if (chan->fence.sequence != chan->fence.sequence_ack) {
++		struct nouveau_fence *fence = NULL;
++
++		ret = nouveau_fence_new(chan, &fence, true);
++		if (ret == 0) {
++			ret = nouveau_fence_wait(fence, NULL, false, false);
++			nouveau_fence_unref((void *)&fence);
++		}
++
++		if (ret)
++			NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
++	}
++
++	/* Ensure all outstanding fences are signaled.  They should be if the
++	 * above attempts at idling were OK, but if we failed this'll tell TTM
++	 * we're done with the buffers.
++	 */
++	nouveau_fence_fini(chan);
++
++	/* Ensure the channel is no longer active on the GPU */
++	pfifo->reassign(dev, false);
++
++	pgraph->fifo_access(dev, false);
++	if (pgraph->channel(dev) == chan)
++		pgraph->unload_context(dev);
++	pgraph->destroy_context(chan);
++	pgraph->fifo_access(dev, true);
++
++	if (pfifo->channel_id(dev) == chan->id) {
++		pfifo->disable(dev);
++		pfifo->unload_context(dev);
++		pfifo->enable(dev);
++	}
++	pfifo->destroy_context(chan);
++
++	pfifo->reassign(dev, true);
++
++	/* Release the channel's resources */
++	nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
++	if (chan->pushbuf_bo) {
++		nouveau_bo_unpin(chan->pushbuf_bo);
++		nouveau_bo_ref(NULL, &chan->pushbuf_bo);
++	}
++	nouveau_gpuobj_channel_takedown(chan);
++	nouveau_notifier_takedown_channel(chan);
++	if (chan->user)
++		iounmap(chan->user);
++
++	dev_priv->fifos[chan->id] = NULL;
++	dev_priv->fifo_alloc_count--;
++	kfree(chan);
++}
++
++/* cleans up all the fifos from file_priv */
++void
++nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_engine *engine = &dev_priv->engine;
++	int i;
++
++	NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
++	for (i = 0; i < engine->fifo.channels; i++) {
++		struct nouveau_channel *chan = dev_priv->fifos[i];
++
++		if (chan && chan->file_priv == file_priv)
++			nouveau_channel_free(chan);
++	}
++}
++
++int
++nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
++		      int channel)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_engine *engine = &dev_priv->engine;
++
++	if (channel >= engine->fifo.channels)
++		return 0;
++	if (dev_priv->fifos[channel] == NULL)
++		return 0;
++
++	return (dev_priv->fifos[channel]->file_priv == file_priv);
++}
++
++/***********************************
++ * ioctls wrapping the functions
++ ***********************************/
++
++static int
++nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
++			 struct drm_file *file_priv)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_nouveau_channel_alloc *init = data;
++	struct nouveau_channel *chan;
++	int ret;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++	if (dev_priv->engine.graph.accel_blocked)
++		return -ENODEV;
++
++	if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
++		return -EINVAL;
++
++	ret = nouveau_channel_alloc(dev, &chan, file_priv,
++				    init->fb_ctxdma_handle,
++				    init->tt_ctxdma_handle);
++	if (ret)
++		return ret;
++	init->channel  = chan->id;
++
++	init->subchan[0].handle = NvM2MF;
++	if (dev_priv->card_type < NV_50)
++		init->subchan[0].grclass = 0x0039;
++	else
++		init->subchan[0].grclass = 0x5039;
++	init->subchan[1].handle = NvSw;
++	init->subchan[1].grclass = NV_SW;
++	init->nr_subchan = 2;
++
++	/* Named memory object area */
++	ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
++				    &init->notifier_handle);
++	if (ret) {
++		nouveau_channel_free(chan);
++		return ret;
++	}
++
++	return 0;
++}
++
++static int
++nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
++			struct drm_file *file_priv)
++{
++	struct drm_nouveau_channel_free *cfree = data;
++	struct nouveau_channel *chan;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
++
++	nouveau_channel_free(chan);
++	return 0;
++}
++
++/***********************************
++ * finally, the ioctl table
++ ***********************************/
++
++struct drm_ioctl_desc nouveau_ioctls[] = {
++	DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
++	DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
++};
++
++int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+new file mode 100644
+index 0000000..d2f6335
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -0,0 +1,846 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include <acpi/button.h>
++
++#include "drmP.h"
++#include "drm_edid.h"
++#include "drm_crtc_helper.h"
++
++#include "nouveau_reg.h"
++#include "nouveau_drv.h"
++#include "nouveau_encoder.h"
++#include "nouveau_crtc.h"
++#include "nouveau_connector.h"
++#include "nouveau_hw.h"
++
++static inline struct drm_encoder_slave_funcs *
++get_slave_funcs(struct nouveau_encoder *enc)
++{
++	return to_encoder_slave(to_drm_encoder(enc))->slave_funcs;
++}
++
++static struct nouveau_encoder *
++find_encoder_by_type(struct drm_connector *connector, int type)
++{
++	struct drm_device *dev = connector->dev;
++	struct nouveau_encoder *nv_encoder;
++	struct drm_mode_object *obj;
++	int i, id;
++
++	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
++		id = connector->encoder_ids[i];
++		if (!id)
++			break;
++
++		obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
++		if (!obj)
++			continue;
++		nv_encoder = nouveau_encoder(obj_to_encoder(obj));
++
++		if (type == OUTPUT_ANY || nv_encoder->dcb->type == type)
++			return nv_encoder;
++	}
++
++	return NULL;
++}
++
++struct nouveau_connector *
++nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
++{
++	struct drm_device *dev = to_drm_encoder(encoder)->dev;
++	struct drm_connector *drm_connector;
++
++	list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) {
++		if (drm_connector->encoder == to_drm_encoder(encoder))
++			return nouveau_connector(drm_connector);
++	}
++
++	return NULL;
++}
++
++
++static void
++nouveau_connector_destroy(struct drm_connector *drm_connector)
++{
++	struct nouveau_connector *nv_connector =
++		nouveau_connector(drm_connector);
++	struct drm_device *dev;
++
++	if (!nv_connector)
++		return;
++
++	dev = nv_connector->base.dev;
++	NV_DEBUG_KMS(dev, "\n");
++
++	kfree(nv_connector->edid);
++	drm_sysfs_connector_remove(drm_connector);
++	drm_connector_cleanup(drm_connector);
++	kfree(drm_connector);
++}
++
++static void
++nouveau_connector_ddc_prepare(struct drm_connector *connector, int *flags)
++{
++	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
++
++	if (dev_priv->card_type >= NV_50)
++		return;
++
++	*flags = 0;
++	if (NVLockVgaCrtcs(dev_priv->dev, false))
++		*flags |= 1;
++	if (nv_heads_tied(dev_priv->dev))
++		*flags |= 2;
++
++	if (*flags & 2)
++		NVSetOwner(dev_priv->dev, 0); /* necessary? */
++}
++
++static void
++nouveau_connector_ddc_finish(struct drm_connector *connector, int flags)
++{
++	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
++
++	if (dev_priv->card_type >= NV_50)
++		return;
++
++	if (flags & 2)
++		NVSetOwner(dev_priv->dev, 4);
++	if (flags & 1)
++		NVLockVgaCrtcs(dev_priv->dev, true);
++}
++
++static struct nouveau_i2c_chan *
++nouveau_connector_ddc_detect(struct drm_connector *connector,
++			     struct nouveau_encoder **pnv_encoder)
++{
++	struct drm_device *dev = connector->dev;
++	uint8_t out_buf[] = { 0x0, 0x0}, buf[2];
++	int ret, flags, i;
++
++	struct i2c_msg msgs[] = {
++		{
++			.addr = 0x50,
++			.flags = 0,
++			.len = 1,
++			.buf = out_buf,
++		},
++		{
++			.addr = 0x50,
++			.flags = I2C_M_RD,
++			.len = 1,
++			.buf = buf,
++		}
++	};
++
++	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
++		struct nouveau_i2c_chan *i2c = NULL;
++		struct nouveau_encoder *nv_encoder;
++		struct drm_mode_object *obj;
++		int id;
++
++		id = connector->encoder_ids[i];
++		if (!id)
++			break;
++
++		obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
++		if (!obj)
++			continue;
++		nv_encoder = nouveau_encoder(obj_to_encoder(obj));
++
++		if (nv_encoder->dcb->i2c_index < 0xf)
++			i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
++		if (!i2c)
++			continue;
++
++		nouveau_connector_ddc_prepare(connector, &flags);
++		ret = i2c_transfer(&i2c->adapter, msgs, 2);
++		nouveau_connector_ddc_finish(connector, flags);
++
++		if (ret == 2) {
++			*pnv_encoder = nv_encoder;
++			return i2c;
++		}
++	}
++
++	return NULL;
++}
++
++static void
++nouveau_connector_set_encoder(struct drm_connector *connector,
++			      struct nouveau_encoder *nv_encoder)
++{
++	struct nouveau_connector *nv_connector = nouveau_connector(connector);
++	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
++	struct drm_device *dev = connector->dev;
++
++	if (nv_connector->detected_encoder == nv_encoder)
++		return;
++	nv_connector->detected_encoder = nv_encoder;
++
++	if (nv_encoder->dcb->type == OUTPUT_LVDS ||
++	    nv_encoder->dcb->type == OUTPUT_TMDS) {
++		connector->doublescan_allowed = false;
++		connector->interlace_allowed = false;
++	} else {
++		connector->doublescan_allowed = true;
++		if (dev_priv->card_type == NV_20 ||
++		   (dev_priv->card_type == NV_10 &&
++		    (dev->pci_device & 0x0ff0) != 0x0100 &&
++		    (dev->pci_device & 0x0ff0) != 0x0150))
++			/* HW is broken */
++			connector->interlace_allowed = false;
++		else
++			connector->interlace_allowed = true;
++	}
++
++	if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
++		drm_connector_property_set_value(connector,
++			dev->mode_config.dvi_i_subconnector_property,
++			nv_encoder->dcb->type == OUTPUT_TMDS ?
++			DRM_MODE_SUBCONNECTOR_DVID :
++			DRM_MODE_SUBCONNECTOR_DVIA);
++	}
++}
++
++static enum drm_connector_status
++nouveau_connector_detect(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	struct nouveau_connector *nv_connector = nouveau_connector(connector);
++	struct nouveau_encoder *nv_encoder = NULL;
++	struct nouveau_i2c_chan *i2c;
++	int type, flags;
++
++	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
++		nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
++	if (nv_encoder && nv_connector->native_mode) {
++#ifdef CONFIG_ACPI
++		if (!nouveau_ignorelid && !acpi_lid_open())
++			return connector_status_disconnected;
++#endif
++		nouveau_connector_set_encoder(connector, nv_encoder);
++		return connector_status_connected;
++	}
++
++	/* Cleanup the previous EDID block. */
++	if (nv_connector->edid) {
++		drm_mode_connector_update_edid_property(connector, NULL);
++		kfree(nv_connector->edid);
++		nv_connector->edid = NULL;
++	}
++
++	i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
++	if (i2c) {
++		nouveau_connector_ddc_prepare(connector, &flags);
++		nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
++		nouveau_connector_ddc_finish(connector, flags);
++		drm_mode_connector_update_edid_property(connector,
++							nv_connector->edid);
++		if (!nv_connector->edid) {
++			NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
++				 drm_get_connector_name(connector));
++			goto detect_analog;
++		}
++
++		if (nv_encoder->dcb->type == OUTPUT_DP &&
++		    !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
++			NV_ERROR(dev, "Detected %s, but failed init\n",
++				 drm_get_connector_name(connector));
++			return connector_status_disconnected;
++		}
++
++		/* Override encoder type for DVI-I based on whether EDID
++		 * says the display is digital or analog, both use the
++		 * same i2c channel so the value returned from ddc_detect
++		 * isn't necessarily correct.
++		 */
++		if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
++			if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
++				type = OUTPUT_TMDS;
++			else
++				type = OUTPUT_ANALOG;
++
++			nv_encoder = find_encoder_by_type(connector, type);
++			if (!nv_encoder) {
++				NV_ERROR(dev, "Detected %d encoder on %s, "
++					      "but no object!\n", type,
++					 drm_get_connector_name(connector));
++				return connector_status_disconnected;
++			}
++		}
++
++		nouveau_connector_set_encoder(connector, nv_encoder);
++		return connector_status_connected;
++	}
++
++detect_analog:
++	nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
++	if (!nv_encoder)
++		nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
++	if (nv_encoder) {
++		struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
++		struct drm_encoder_helper_funcs *helper =
++						encoder->helper_private;
++
++		if (helper->detect(encoder, connector) ==
++						connector_status_connected) {
++			nouveau_connector_set_encoder(connector, nv_encoder);
++			return connector_status_connected;
++		}
++
++	}
++
++	return connector_status_disconnected;
++}
++
++static void
++nouveau_connector_force(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	struct nouveau_encoder *nv_encoder;
++	int type;
++
++	if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
++		if (connector->force == DRM_FORCE_ON_DIGITAL)
++			type = OUTPUT_TMDS;
++		else
++			type = OUTPUT_ANALOG;
++	} else
++		type = OUTPUT_ANY;
++
++	nv_encoder = find_encoder_by_type(connector, type);
++	if (!nv_encoder) {
++		NV_ERROR(dev, "can't find encoder to force %s on!\n",
++			 drm_get_connector_name(connector));
++		connector->status = connector_status_disconnected;
++		return;
++	}
++
++	nouveau_connector_set_encoder(connector, nv_encoder);
++}
++
++static int
++nouveau_connector_set_property(struct drm_connector *connector,
++			       struct drm_property *property, uint64_t value)
++{
++	struct nouveau_connector *nv_connector = nouveau_connector(connector);
++	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
++	struct drm_device *dev = connector->dev;
++	int ret;
++
++	/* Scaling mode */
++	if (property == dev->mode_config.scaling_mode_property) {
++		struct nouveau_crtc *nv_crtc = NULL;
++		bool modeset = false;
++
++		switch (value) {
++		case DRM_MODE_SCALE_NONE:
++		case DRM_MODE_SCALE_FULLSCREEN:
++		case DRM_MODE_SCALE_CENTER:
++		case DRM_MODE_SCALE_ASPECT:
++			break;
++		default:
++			return -EINVAL;
++		}
++
++		/* LVDS always needs gpu scaling */
++		if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
++		    value == DRM_MODE_SCALE_NONE)
++			return -EINVAL;
++
++		/* Changing between GPU and panel scaling requires a full
++		 * modeset
++		 */
++		if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
++		    (value == DRM_MODE_SCALE_NONE))
++			modeset = true;
++		nv_connector->scaling_mode = value;
++
++		if (connector->encoder && connector->encoder->crtc)
++			nv_crtc = nouveau_crtc(connector->encoder->crtc);
++		if (!nv_crtc)
++			return 0;
++
++		if (modeset || !nv_crtc->set_scale) {
++			ret = drm_crtc_helper_set_mode(&nv_crtc->base,
++							&nv_crtc->base.mode,
++							nv_crtc->base.x,
++							nv_crtc->base.y, NULL);
++			if (!ret)
++				return -EINVAL;
++		} else {
++			ret = nv_crtc->set_scale(nv_crtc, value, true);
++			if (ret)
++				return ret;
++		}
++
++		return 0;
++	}
++
++	/* Dithering */
++	if (property == dev->mode_config.dithering_mode_property) {
++		struct nouveau_crtc *nv_crtc = NULL;
++
++		if (value == DRM_MODE_DITHERING_ON)
++			nv_connector->use_dithering = true;
++		else
++			nv_connector->use_dithering = false;
++
++		if (connector->encoder && connector->encoder->crtc)
++			nv_crtc = nouveau_crtc(connector->encoder->crtc);
++
++		if (!nv_crtc || !nv_crtc->set_dither)
++			return 0;
++
++		return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
++					   true);
++	}
++
++	if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
++		return get_slave_funcs(nv_encoder)->
++			set_property(to_drm_encoder(nv_encoder), connector, property, value);
++
++	return -EINVAL;
++}
++
++static struct drm_display_mode *
++nouveau_connector_native_mode(struct nouveau_connector *connector)
++{
++	struct drm_device *dev = connector->base.dev;
++	struct drm_display_mode *mode, *largest = NULL;
++	int high_w = 0, high_h = 0, high_v = 0;
++
++	/* Use preferred mode if there is one.. */
++	list_for_each_entry(mode, &connector->base.probed_modes, head) {
++		if (mode->type & DRM_MODE_TYPE_PREFERRED) {
++			NV_DEBUG_KMS(dev, "native mode from preferred\n");
++			return drm_mode_duplicate(dev, mode);
++		}
++	}
++
++	/* Otherwise, take the resolution with the largest width, then height,
++	 * then vertical refresh
++	 */
++	list_for_each_entry(mode, &connector->base.probed_modes, head) {
++		if (mode->hdisplay < high_w)
++			continue;
++
++		if (mode->hdisplay == high_w && mode->vdisplay < high_h)
++			continue;
++
++		if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
++		    mode->vrefresh < high_v)
++			continue;
++
++		high_w = mode->hdisplay;
++		high_h = mode->vdisplay;
++		high_v = mode->vrefresh;
++		largest = mode;
++	}
++
++	NV_DEBUG_KMS(dev, "native mode from largest: %dx%d@%d\n",
++		      high_w, high_h, high_v);
++	return largest ? drm_mode_duplicate(dev, largest) : NULL;
++}
++
++struct moderec {
++	int hdisplay;
++	int vdisplay;
++};
++
++static struct moderec scaler_modes[] = {
++	{ 1920, 1200 },
++	{ 1920, 1080 },
++	{ 1680, 1050 },
++	{ 1600, 1200 },
++	{ 1400, 1050 },
++	{ 1280, 1024 },
++	{ 1280, 960 },
++	{ 1152, 864 },
++	{ 1024, 768 },
++	{ 800, 600 },
++	{ 720, 400 },
++	{ 640, 480 },
++	{ 640, 400 },
++	{ 640, 350 },
++	{}
++};
++
++static int
++nouveau_connector_scaler_modes_add(struct drm_connector *connector)
++{
++	struct nouveau_connector *nv_connector = nouveau_connector(connector);
++	struct drm_display_mode *native = nv_connector->native_mode, *m;
++	struct drm_device *dev = connector->dev;
++	struct moderec *mode = &scaler_modes[0];
++	int modes = 0;
++
++	if (!native)
++		return 0;
++
++	while (mode->hdisplay) {
++		if (mode->hdisplay <= native->hdisplay &&
++		    mode->vdisplay <= native->vdisplay) {
++			m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
++					 drm_mode_vrefresh(native), false,
++					 false, false);
++			if (!m)
++				continue;
++
++			m->type |= DRM_MODE_TYPE_DRIVER;
++
++			drm_mode_probed_add(connector, m);
++			modes++;
++		}
++
++		mode++;
++	}
++
++	return modes;
++}
++
++static int
++nouveau_connector_get_modes(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	struct nouveau_connector *nv_connector = nouveau_connector(connector);
++	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
++	int ret = 0;
++
++	/* If we're not LVDS, destroy the previous native mode, the attached
++	 * monitor could have changed.
++	 */
++	if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
++	    nv_connector->native_mode) {
++		drm_mode_destroy(dev, nv_connector->native_mode);
++		nv_connector->native_mode = NULL;
++	}
++
++	if (nv_connector->edid)
++		ret = drm_add_edid_modes(connector, nv_connector->edid);
++
++	/* Find the native mode if this is a digital panel, if we didn't
++	 * find any modes through DDC previously add the native mode to
++	 * the list of modes.
++	 */
++	if (!nv_connector->native_mode)
++		nv_connector->native_mode =
++			nouveau_connector_native_mode(nv_connector);
++	if (ret == 0 && nv_connector->native_mode) {
++		struct drm_display_mode *mode;
++
++		mode = drm_mode_duplicate(dev, nv_connector->native_mode);
++		drm_mode_probed_add(connector, mode);
++		ret = 1;
++	}
++
++	if (nv_encoder->dcb->type == OUTPUT_TV)
++		ret = get_slave_funcs(nv_encoder)->
++			get_modes(to_drm_encoder(nv_encoder), connector);
++
++	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
++		ret += nouveau_connector_scaler_modes_add(connector);
++
++	return ret;
++}
++
++static int
++nouveau_connector_mode_valid(struct drm_connector *connector,
++			     struct drm_display_mode *mode)
++{
++	struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
++	struct nouveau_connector *nv_connector = nouveau_connector(connector);
++	struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
++	unsigned min_clock = 25000, max_clock = min_clock;
++	unsigned clock = mode->clock;
++
++	switch (nv_encoder->dcb->type) {
++	case OUTPUT_LVDS:
++		BUG_ON(!nv_connector->native_mode);
++		if (mode->hdisplay > nv_connector->native_mode->hdisplay ||
++		    mode->vdisplay > nv_connector->native_mode->vdisplay)
++			return MODE_PANEL;
++
++		min_clock = 0;
++		max_clock = 400000;
++		break;
++	case OUTPUT_TMDS:
++		if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
++		    (dev_priv->card_type < NV_50 &&
++		     !nv_encoder->dcb->duallink_possible))
++			max_clock = 165000;
++		else
++			max_clock = 330000;
++		break;
++	case OUTPUT_ANALOG:
++		max_clock = nv_encoder->dcb->crtconf.maxfreq;
++		if (!max_clock)
++			max_clock = 350000;
++		break;
++	case OUTPUT_TV:
++		return get_slave_funcs(nv_encoder)->
++			mode_valid(to_drm_encoder(nv_encoder), mode);
++	case OUTPUT_DP:
++		if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
++			max_clock = nv_encoder->dp.link_nr * 270000;
++		else
++			max_clock = nv_encoder->dp.link_nr * 162000;
++
++		clock *= 3;
++		break;
++	}
++
++	if (clock < min_clock)
++		return MODE_CLOCK_LOW;
++
++	if (clock > max_clock)
++		return MODE_CLOCK_HIGH;
++
++	return MODE_OK;
++}
++
++static struct drm_encoder *
++nouveau_connector_best_encoder(struct drm_connector *connector)
++{
++	struct nouveau_connector *nv_connector = nouveau_connector(connector);
++
++	if (nv_connector->detected_encoder)
++		return to_drm_encoder(nv_connector->detected_encoder);
++
++	return NULL;
++}
++
++static const struct drm_connector_helper_funcs
++nouveau_connector_helper_funcs = {
++	.get_modes = nouveau_connector_get_modes,
++	.mode_valid = nouveau_connector_mode_valid,
++	.best_encoder = nouveau_connector_best_encoder,
++};
++
++static const struct drm_connector_funcs
++nouveau_connector_funcs = {
++	.dpms = drm_helper_connector_dpms,
++	.save = NULL,
++	.restore = NULL,
++	.detect = nouveau_connector_detect,
++	.destroy = nouveau_connector_destroy,
++	.fill_modes = drm_helper_probe_single_connector_modes,
++	.set_property = nouveau_connector_set_property,
++	.force = nouveau_connector_force
++};
++
++static int
++nouveau_connector_create_lvds(struct drm_device *dev,
++			      struct drm_connector *connector)
++{
++	struct nouveau_connector *nv_connector = nouveau_connector(connector);
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_i2c_chan *i2c = NULL;
++	struct nouveau_encoder *nv_encoder;
++	struct drm_display_mode native, *mode, *temp;
++	bool dummy, if_is_24bit = false;
++	int ret, flags;
++
++	nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
++	if (!nv_encoder)
++		return -ENODEV;
++
++	ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit);
++	if (ret) {
++		NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n");
++		return ret;
++	}
++	nv_connector->use_dithering = !if_is_24bit;
++
++	/* Firstly try getting EDID over DDC, if allowed and I2C channel
++	 * is available.
++	 */
++	if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
++		i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
++
++	if (i2c) {
++		nouveau_connector_ddc_prepare(connector, &flags);
++		nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
++		nouveau_connector_ddc_finish(connector, flags);
++	}
++
++	/* If no EDID found above, and the VBIOS indicates a hardcoded
++	 * modeline is avalilable for the panel, set it as the panel's
++	 * native mode and exit.
++	 */
++	if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
++	     (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
++	      dev_priv->VBIOS.pub.fp_no_ddc)) {
++		nv_connector->native_mode = drm_mode_duplicate(dev, &native);
++		goto out;
++	}
++
++	/* Still nothing, some VBIOS images have a hardcoded EDID block
++	 * stored for the panel stored in them.
++	 */
++	if (!nv_connector->edid && !nv_connector->native_mode &&
++	    !dev_priv->VBIOS.pub.fp_no_ddc) {
++		struct edid *edid =
++			(struct edid *)nouveau_bios_embedded_edid(dev);
++		if (edid) {
++			nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
++			*(nv_connector->edid) = *edid;
++		}
++	}
++
++	if (!nv_connector->edid)
++		goto out;
++
++	/* We didn't find/use a panel mode from the VBIOS, so parse the EDID
++	 * block and look for the preferred mode there.
++	 */
++	ret = drm_add_edid_modes(connector, nv_connector->edid);
++	if (ret == 0)
++		goto out;
++	nv_connector->detected_encoder = nv_encoder;
++	nv_connector->native_mode = nouveau_connector_native_mode(nv_connector);
++	list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
++		drm_mode_remove(connector, mode);
++
++out:
++	if (!nv_connector->native_mode) {
++		NV_ERROR(dev, "LVDS present in DCB table, but couldn't "
++			      "determine its native mode.  Disabling.\n");
++		return -ENODEV;
++	}
++
++	drm_mode_connector_update_edid_property(connector, nv_connector->edid);
++	return 0;
++}
++
++int
++nouveau_connector_create(struct drm_device *dev, int index, int type)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_connector *nv_connector = NULL;
++	struct drm_connector *connector;
++	struct drm_encoder *encoder;
++	int ret;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
++	if (!nv_connector)
++		return -ENOMEM;
++	nv_connector->dcb = nouveau_bios_connector_entry(dev, index);
++	connector = &nv_connector->base;
++
++	switch (type) {
++	case DRM_MODE_CONNECTOR_VGA:
++		NV_INFO(dev, "Detected a VGA connector\n");
++		break;
++	case DRM_MODE_CONNECTOR_DVID:
++		NV_INFO(dev, "Detected a DVI-D connector\n");
++		break;
++	case DRM_MODE_CONNECTOR_DVII:
++		NV_INFO(dev, "Detected a DVI-I connector\n");
++		break;
++	case DRM_MODE_CONNECTOR_LVDS:
++		NV_INFO(dev, "Detected a LVDS connector\n");
++		break;
++	case DRM_MODE_CONNECTOR_TV:
++		NV_INFO(dev, "Detected a TV connector\n");
++		break;
++	case DRM_MODE_CONNECTOR_DisplayPort:
++		NV_INFO(dev, "Detected a DisplayPort connector\n");
++		break;
++	default:
++		NV_ERROR(dev, "Unknown connector, this is not good.\n");
++		break;
++	}
++
++	/* defaults, will get overridden in detect() */
++	connector->interlace_allowed = false;
++	connector->doublescan_allowed = false;
++
++	drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
++	drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
++
++	/* Init DVI-I specific properties */
++	if (type == DRM_MODE_CONNECTOR_DVII) {
++		drm_mode_create_dvi_i_properties(dev);
++		drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
++		drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
++	}
++
++	if (type != DRM_MODE_CONNECTOR_LVDS)
++		nv_connector->use_dithering = false;
++
++	if (type == DRM_MODE_CONNECTOR_DVID ||
++	    type == DRM_MODE_CONNECTOR_DVII ||
++	    type == DRM_MODE_CONNECTOR_LVDS ||
++	    type == DRM_MODE_CONNECTOR_DisplayPort) {
++		nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
++
++		drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property,
++					      nv_connector->scaling_mode);
++		drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property,
++					      nv_connector->use_dithering ? DRM_MODE_DITHERING_ON
++					      : DRM_MODE_DITHERING_OFF);
++
++	} else {
++		nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
++
++		if (type == DRM_MODE_CONNECTOR_VGA  &&
++				dev_priv->card_type >= NV_50) {
++			drm_connector_attach_property(connector,
++					dev->mode_config.scaling_mode_property,
++					nv_connector->scaling_mode);
++		}
++	}
++
++	/* attach encoders */
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++
++		if (nv_encoder->dcb->connector != index)
++			continue;
++
++		if (get_slave_funcs(nv_encoder))
++			get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
++
++		drm_mode_connector_attach_encoder(connector, encoder);
++	}
++
++	drm_sysfs_connector_add(connector);
++
++	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
++		ret = nouveau_connector_create_lvds(dev, connector);
++		if (ret) {
++			connector->funcs->destroy(connector);
++			return ret;
++		}
++	}
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
+new file mode 100644
+index 0000000..728b809
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
+@@ -0,0 +1,54 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __NOUVEAU_CONNECTOR_H__
++#define __NOUVEAU_CONNECTOR_H__
++
++#include "drm_edid.h"
++#include "nouveau_i2c.h"
++
++struct nouveau_connector {
++	struct drm_connector base;
++
++	struct dcb_connector_table_entry *dcb;
++
++	int scaling_mode;
++	bool use_dithering;
++
++	struct nouveau_encoder *detected_encoder;
++	struct edid *edid;
++	struct drm_display_mode *native_mode;
++};
++
++static inline struct nouveau_connector *nouveau_connector(
++						struct drm_connector *con)
++{
++	return container_of(con, struct nouveau_connector, base);
++}
++
++int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type);
++
++#endif /* __NOUVEAU_CONNECTOR_H__ */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
+new file mode 100644
+index 0000000..49fa7b2
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
+@@ -0,0 +1,95 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __NOUVEAU_CRTC_H__
++#define __NOUVEAU_CRTC_H__
++
++struct nouveau_crtc {
++	struct drm_crtc base;
++
++	int index;
++
++	struct drm_display_mode *mode;
++
++	uint32_t dpms_saved_fp_control;
++	uint32_t fp_users;
++	int saturation;
++	int sharpness;
++	int last_dpms;
++
++	struct {
++		int cpp;
++		bool blanked;
++		uint32_t offset;
++		uint32_t tile_flags;
++	} fb;
++
++	struct {
++		struct nouveau_bo *nvbo;
++		bool visible;
++		uint32_t offset;
++		void (*set_offset)(struct nouveau_crtc *, uint32_t offset);
++		void (*set_pos)(struct nouveau_crtc *, int x, int y);
++		void (*hide)(struct nouveau_crtc *, bool update);
++		void (*show)(struct nouveau_crtc *, bool update);
++	} cursor;
++
++	struct {
++		struct nouveau_bo *nvbo;
++		uint16_t r[256];
++		uint16_t g[256];
++		uint16_t b[256];
++		int depth;
++	} lut;
++
++	int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
++	int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
++};
++
++static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
++{
++	return container_of(crtc, struct nouveau_crtc, base);
++}
++
++static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
++{
++	return &crtc->base;
++}
++
++int nv50_crtc_create(struct drm_device *dev, int index);
++int nv50_cursor_init(struct nouveau_crtc *);
++void nv50_cursor_fini(struct nouveau_crtc *);
++int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
++			 uint32_t buffer_handle, uint32_t width,
++			 uint32_t height);
++int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
++
++int nv04_cursor_init(struct nouveau_crtc *);
++
++struct nouveau_connector *
++nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
++
++#endif /* __NOUVEAU_CRTC_H__ */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+new file mode 100644
+index 0000000..d79db36
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+@@ -0,0 +1,155 @@
++/*
++ * Copyright (C) 2009 Red Hat <bskeggs at redhat.com>
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++/*
++ * Authors:
++ *  Ben Skeggs <bskeggs at redhat.com>
++ */
++
++#include <linux/debugfs.h>
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++
++static int
++nouveau_debugfs_channel_info(struct seq_file *m, void *data)
++{
++	struct drm_info_node *node = (struct drm_info_node *) m->private;
++	struct nouveau_channel *chan = node->info_ent->data;
++
++	seq_printf(m, "channel id    : %d\n", chan->id);
++
++	seq_printf(m, "cpu fifo state:\n");
++	seq_printf(m, "          base: 0x%08x\n", chan->pushbuf_base);
++	seq_printf(m, "           max: 0x%08x\n", chan->dma.max << 2);
++	seq_printf(m, "           cur: 0x%08x\n", chan->dma.cur << 2);
++	seq_printf(m, "           put: 0x%08x\n", chan->dma.put << 2);
++	seq_printf(m, "          free: 0x%08x\n", chan->dma.free << 2);
++
++	seq_printf(m, "gpu fifo state:\n");
++	seq_printf(m, "           get: 0x%08x\n",
++					nvchan_rd32(chan, chan->user_get));
++	seq_printf(m, "           put: 0x%08x\n",
++					nvchan_rd32(chan, chan->user_put));
++
++	seq_printf(m, "last fence    : %d\n", chan->fence.sequence);
++	seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
++	return 0;
++}
++
++int
++nouveau_debugfs_channel_init(struct nouveau_channel *chan)
++{
++	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
++	struct drm_minor *minor = chan->dev->primary;
++	int ret;
++
++	if (!dev_priv->debugfs.channel_root) {
++		dev_priv->debugfs.channel_root =
++			debugfs_create_dir("channel", minor->debugfs_root);
++		if (!dev_priv->debugfs.channel_root)
++			return -ENOENT;
++	}
++
++	snprintf(chan->debugfs.name, 32, "%d", chan->id);
++	chan->debugfs.info.name = chan->debugfs.name;
++	chan->debugfs.info.show = nouveau_debugfs_channel_info;
++	chan->debugfs.info.driver_features = 0;
++	chan->debugfs.info.data = chan;
++
++	ret = drm_debugfs_create_files(&chan->debugfs.info, 1,
++				       dev_priv->debugfs.channel_root,
++				       chan->dev->primary);
++	if (ret == 0)
++		chan->debugfs.active = true;
++	return ret;
++}
++
++void
++nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
++{
++	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
++
++	if (!chan->debugfs.active)
++		return;
++
++	drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary);
++	chan->debugfs.active = false;
++
++	if (chan == dev_priv->channel) {
++		debugfs_remove(dev_priv->debugfs.channel_root);
++		dev_priv->debugfs.channel_root = NULL;
++	}
++}
++
++static int
++nouveau_debugfs_chipset_info(struct seq_file *m, void *data)
++{
++	struct drm_info_node *node = (struct drm_info_node *) m->private;
++	struct drm_minor *minor = node->minor;
++	struct drm_device *dev = minor->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t ppci_0;
++
++	ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800);
++
++	seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0));
++	seq_printf(m, "PCI ID    : 0x%04x:0x%04x\n",
++		   ppci_0 & 0xffff, ppci_0 >> 16);
++	return 0;
++}
++
++static int
++nouveau_debugfs_memory_info(struct seq_file *m, void *data)
++{
++	struct drm_info_node *node = (struct drm_info_node *) m->private;
++	struct drm_minor *minor = node->minor;
++	struct drm_device *dev = minor->dev;
++
++	seq_printf(m, "VRAM total: %dKiB\n",
++		   (int)(nouveau_mem_fb_amount(dev) >> 10));
++	return 0;
++}
++
++static struct drm_info_list nouveau_debugfs_list[] = {
++	{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
++	{ "memory", nouveau_debugfs_memory_info, 0, NULL },
++};
++#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
++
++int
++nouveau_debugfs_init(struct drm_minor *minor)
++{
++	drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
++				 minor->debugfs_root, minor);
++	return 0;
++}
++
++void
++nouveau_debugfs_takedown(struct drm_minor *minor)
++{
++	drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
++				 minor);
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
+new file mode 100644
+index 0000000..dfc9439
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_display.c
+@@ -0,0 +1,115 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm_crtc_helper.h"
++#include "nouveau_drv.h"
++#include "nouveau_fb.h"
++#include "nouveau_fbcon.h"
++
++static void
++nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
++{
++	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
++	struct drm_device *dev = drm_fb->dev;
++
++	if (drm_fb->fbdev)
++		nouveau_fbcon_remove(dev, drm_fb);
++
++	if (fb->nvbo) {
++		mutex_lock(&dev->struct_mutex);
++		drm_gem_object_unreference(fb->nvbo->gem);
++		mutex_unlock(&dev->struct_mutex);
++	}
++
++	drm_framebuffer_cleanup(drm_fb);
++	kfree(fb);
++}
++
++static int
++nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
++				       struct drm_file *file_priv,
++				       unsigned int *handle)
++{
++	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
++
++	return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
++}
++
++static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
++	.destroy = nouveau_user_framebuffer_destroy,
++	.create_handle = nouveau_user_framebuffer_create_handle,
++};
++
++struct drm_framebuffer *
++nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
++			   struct drm_mode_fb_cmd *mode_cmd)
++{
++	struct nouveau_framebuffer *fb;
++	int ret;
++
++	fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
++	if (!fb)
++		return NULL;
++
++	ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
++	if (ret) {
++		kfree(fb);
++		return NULL;
++	}
++
++	drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
++
++	fb->nvbo = nvbo;
++	return &fb->base;
++}
++
++static struct drm_framebuffer *
++nouveau_user_framebuffer_create(struct drm_device *dev,
++				struct drm_file *file_priv,
++				struct drm_mode_fb_cmd *mode_cmd)
++{
++	struct drm_framebuffer *fb;
++	struct drm_gem_object *gem;
++
++	gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
++	if (!gem)
++		return NULL;
++
++	fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
++	if (!fb) {
++		drm_gem_object_unreference(gem);
++		return NULL;
++	}
++
++	return fb;
++}
++
++const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
++	.fb_create = nouveau_user_framebuffer_create,
++	.fb_changed = nouveau_fbcon_probe,
++};
++
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
+new file mode 100644
+index 0000000..50d9e67
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
+@@ -0,0 +1,244 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++
++void
++nouveau_dma_pre_init(struct nouveau_channel *chan)
++{
++	chan->dma.max  = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
++	chan->dma.put  = 0;
++	chan->dma.cur  = chan->dma.put;
++	chan->dma.free = chan->dma.max - chan->dma.cur;
++}
++
++int
++nouveau_dma_init(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *m2mf = NULL;
++	struct nouveau_gpuobj *nvsw = NULL;
++	int ret, i;
++
++	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
++	ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
++				    0x0039 : 0x5039, &m2mf);
++	if (ret)
++		return ret;
++
++	ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
++	if (ret)
++		return ret;
++
++	/* Create an NV_SW object for various sync purposes */
++	ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
++	if (ret)
++		return ret;
++
++	ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
++	if (ret)
++		return ret;
++
++	/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
++	ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
++	if (ret)
++		return ret;
++
++	/* Map push buffer */
++	ret = nouveau_bo_map(chan->pushbuf_bo);
++	if (ret)
++		return ret;
++
++	/* Map M2MF notifier object - fbcon. */
++	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		ret = nouveau_bo_map(chan->notifier_bo);
++		if (ret)
++			return ret;
++	}
++
++	/* Insert NOPS for NOUVEAU_DMA_SKIPS */
++	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
++	if (ret)
++		return ret;
++
++	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
++		OUT_RING(chan, 0);
++
++	/* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
++	ret = RING_SPACE(chan, 4);
++	if (ret)
++		return ret;
++	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
++	OUT_RING(chan, NvM2MF);
++	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
++	OUT_RING(chan, NvNotify0);
++
++	/* Initialise NV_SW */
++	ret = RING_SPACE(chan, 2);
++	if (ret)
++		return ret;
++	BEGIN_RING(chan, NvSubSw, 0, 1);
++	OUT_RING(chan, NvSw);
++
++	/* Sit back and pray the channel works.. */
++	FIRE_RING(chan);
++
++	return 0;
++}
++
++void
++OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
++{
++	bool is_iomem;
++	u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
++	mem = &mem[chan->dma.cur];
++	if (is_iomem)
++		memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
++	else
++		memcpy(mem, data, nr_dwords * 4);
++	chan->dma.cur += nr_dwords;
++}
++
++/* Fetch and adjust GPU GET pointer
++ *
++ * Returns:
++ *  value >= 0, the adjusted GET pointer
++ *  -EINVAL if GET pointer currently outside main push buffer
++ *  -EBUSY if timeout exceeded
++ */
++static inline int
++READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
++{
++	uint32_t val;
++
++	val = nvchan_rd32(chan, chan->user_get);
++
++	/* reset counter as long as GET is still advancing, this is
++	 * to avoid misdetecting a GPU lockup if the GPU happens to
++	 * just be processing an operation that takes a long time
++	 */
++	if (val != *prev_get) {
++		*prev_get = val;
++		*timeout = 0;
++	}
++
++	if ((++*timeout & 0xff) == 0) {
++		DRM_UDELAY(1);
++		if (*timeout > 100000)
++			return -EBUSY;
++	}
++
++	if (val < chan->pushbuf_base ||
++	    val > chan->pushbuf_base + (chan->dma.max << 2))
++		return -EINVAL;
++
++	return (val - chan->pushbuf_base) >> 2;
++}
++
++int
++nouveau_dma_wait(struct nouveau_channel *chan, int size)
++{
++	uint32_t prev_get = 0, cnt = 0;
++	int get;
++
++	while (chan->dma.free < size) {
++		get = READ_GET(chan, &prev_get, &cnt);
++		if (unlikely(get == -EBUSY))
++			return -EBUSY;
++
++		/* loop until we have a usable GET pointer.  the value
++		 * we read from the GPU may be outside the main ring if
++		 * PFIFO is processing a buffer called from the main ring,
++		 * discard these values until something sensible is seen.
++		 *
++		 * the other case we discard GET is while the GPU is fetching
++		 * from the SKIPS area, so the code below doesn't have to deal
++		 * with some fun corner cases.
++		 */
++		if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
++			continue;
++
++		if (get <= chan->dma.cur) {
++			/* engine is fetching behind us, or is completely
++			 * idle (GET == PUT) so we have free space up until
++			 * the end of the push buffer
++			 *
++			 * we can only hit that path once per call due to
++			 * looping back to the beginning of the push buffer,
++			 * we'll hit the fetching-ahead-of-us path from that
++			 * point on.
++			 *
++			 * the *one* exception to that rule is if we read
++			 * GET==PUT, in which case the below conditional will
++			 * always succeed and break us out of the wait loop.
++			 */
++			chan->dma.free = chan->dma.max - chan->dma.cur;
++			if (chan->dma.free >= size)
++				break;
++
++			/* not enough space left at the end of the push buffer,
++			 * instruct the GPU to jump back to the start right
++			 * after processing the currently pending commands.
++			 */
++			OUT_RING(chan, chan->pushbuf_base | 0x20000000);
++
++			/* wait for GET to depart from the skips area.
++			 * prevents writing GET==PUT and causing a race
++			 * condition that causes us to think the GPU is
++			 * idle when it's not.
++			 */
++			do {
++				get = READ_GET(chan, &prev_get, &cnt);
++				if (unlikely(get == -EBUSY))
++					return -EBUSY;
++				if (unlikely(get == -EINVAL))
++					continue;
++			} while (get <= NOUVEAU_DMA_SKIPS);
++			WRITE_PUT(NOUVEAU_DMA_SKIPS);
++
++			/* we're now submitting commands at the start of
++			 * the push buffer.
++			 */
++			chan->dma.cur  =
++			chan->dma.put  = NOUVEAU_DMA_SKIPS;
++		}
++
++		/* engine fetching ahead of us, we have space up until the
++		 * current GET pointer.  the "- 1" is to ensure there's
++		 * space left to emit a jump back to the beginning of the
++		 * push buffer if we require it.  we can never get GET == PUT
++		 * here, so this is safe.
++		 */
++		chan->dma.free = get - chan->dma.cur - 1;
++	}
++
++	return 0;
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
+new file mode 100644
+index 0000000..dabfd65
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
+@@ -0,0 +1,159 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __NOUVEAU_DMA_H__
++#define __NOUVEAU_DMA_H__
++
++#ifndef NOUVEAU_DMA_DEBUG
++#define NOUVEAU_DMA_DEBUG 0
++#endif
++
++/*
++ * There's a hw race condition where you can't jump to your PUT offset,
++ * to avoid this we jump to offset + SKIPS and fill the difference with
++ * NOPs.
++ *
++ * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses
++ * a SKIPS value of 8.  Lets assume that the race condition is to do
++ * with writing into the fetch area, we configure a fetch size of 128
++ * bytes so we need a larger SKIPS value.
++ */
++#define NOUVEAU_DMA_SKIPS (128 / 4)
++
++/* Hardcoded object assignments to subchannels (subchannel id). */
++enum {
++	NvSubM2MF	= 0,
++	NvSubSw		= 1,
++	NvSub2D		= 2,
++	NvSubCtxSurf2D  = 2,
++	NvSubGdiRect    = 3,
++	NvSubImageBlit  = 4
++};
++
++/* Object handles. */
++enum {
++	NvM2MF		= 0x80000001,
++	NvDmaFB		= 0x80000002,
++	NvDmaTT		= 0x80000003,
++	NvDmaVRAM	= 0x80000004,
++	NvDmaGART	= 0x80000005,
++	NvNotify0       = 0x80000006,
++	Nv2D		= 0x80000007,
++	NvCtxSurf2D	= 0x80000008,
++	NvRop		= 0x80000009,
++	NvImagePatt	= 0x8000000a,
++	NvClipRect	= 0x8000000b,
++	NvGdiRect	= 0x8000000c,
++	NvImageBlit	= 0x8000000d,
++	NvSw		= 0x8000000e,
++
++	/* G80+ display objects */
++	NvEvoVRAM	= 0x01000000,
++	NvEvoFB16	= 0x01000001,
++	NvEvoFB32	= 0x01000002
++};
++
++#define NV_MEMORY_TO_MEMORY_FORMAT                                    0x00000039
++#define NV_MEMORY_TO_MEMORY_FORMAT_NAME                               0x00000000
++#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF                            0x00000050
++#define NV_MEMORY_TO_MEMORY_FORMAT_NOP                                0x00000100
++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY                             0x00000104
++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE                 0x00000000
++#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN       0x00000001
++#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY                         0x00000180
++#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE                         0x00000184
++#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN                          0x0000030c
++
++#define NV50_MEMORY_TO_MEMORY_FORMAT                                  0x00005039
++#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200                           0x00000200
++#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C                           0x0000021c
++#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH                   0x00000238
++#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH                  0x0000023c
++
++static __must_check inline int
++RING_SPACE(struct nouveau_channel *chan, int size)
++{
++	if (chan->dma.free < size) {
++		int ret;
++
++		ret = nouveau_dma_wait(chan, size);
++		if (ret)
++			return ret;
++	}
++
++	chan->dma.free -= size;
++	return 0;
++}
++
++static inline void
++OUT_RING(struct nouveau_channel *chan, int data)
++{
++	if (NOUVEAU_DMA_DEBUG) {
++		NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
++			chan->id, chan->dma.cur << 2, data);
++	}
++
++	nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
++}
++
++extern void
++OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
++
++static inline void
++BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
++{
++	OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
++}
++
++#define WRITE_PUT(val) do {                                                    \
++	DRM_MEMORYBARRIER();                                                   \
++	nouveau_bo_rd32(chan->pushbuf_bo, 0);                                  \
++	nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base);  \
++} while (0)
++
++static inline void
++FIRE_RING(struct nouveau_channel *chan)
++{
++	if (NOUVEAU_DMA_DEBUG) {
++		NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
++			chan->id, chan->dma.cur << 2);
++	}
++
++	if (chan->dma.cur == chan->dma.put)
++		return;
++	chan->accel_done = true;
++
++	WRITE_PUT(chan->dma.cur);
++	chan->dma.put = chan->dma.cur;
++}
++
++static inline void
++WIND_RING(struct nouveau_channel *chan)
++{
++	chan->dma.cur = chan->dma.put;
++}
++
++#endif
+diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
+new file mode 100644
+index 0000000..f954ad9
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
+@@ -0,0 +1,575 @@
++/*
++ * Copyright 2009 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_i2c.h"
++#include "nouveau_encoder.h"
++
++static int
++auxch_rd(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct nouveau_i2c_chan *auxch;
++	int ret;
++
++	auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
++	if (!auxch)
++		return -ENODEV;
++
++	ret = nouveau_dp_auxch(auxch, 9, address, buf, size);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
++static int
++auxch_wr(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct nouveau_i2c_chan *auxch;
++	int ret;
++
++	auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
++	if (!auxch)
++		return -ENODEV;
++
++	ret = nouveau_dp_auxch(auxch, 8, address, buf, size);
++	return ret;
++}
++
++static int
++nouveau_dp_lane_count_set(struct drm_encoder *encoder, uint8_t cmd)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	uint32_t tmp;
++	int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
++
++	tmp  = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
++	tmp &= ~(NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED |
++		 NV50_SOR_DP_CTRL_LANE_MASK);
++	tmp |= ((1 << (cmd & DP_LANE_COUNT_MASK)) - 1) << 16;
++	if (cmd & DP_LANE_COUNT_ENHANCED_FRAME_EN)
++		tmp |= NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED;
++	nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
++
++	return auxch_wr(encoder, DP_LANE_COUNT_SET, &cmd, 1);
++}
++
++static int
++nouveau_dp_link_bw_set(struct drm_encoder *encoder, uint8_t cmd)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	uint32_t tmp;
++	int reg = 0x614300 + (nv_encoder->or * 0x800);
++
++	tmp  = nv_rd32(dev, reg);
++	tmp &= 0xfff3ffff;
++	if (cmd == DP_LINK_BW_2_7)
++		tmp |= 0x00040000;
++	nv_wr32(dev, reg, tmp);
++
++	return auxch_wr(encoder, DP_LINK_BW_SET, &cmd, 1);
++}
++
++static int
++nouveau_dp_link_train_set(struct drm_encoder *encoder, int pattern)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	uint32_t tmp;
++	uint8_t cmd;
++	int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
++	int ret;
++
++	tmp  = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
++	tmp &= ~NV50_SOR_DP_CTRL_TRAINING_PATTERN;
++	tmp |= (pattern << 24);
++	nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
++
++	ret = auxch_rd(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
++	if (ret)
++		return ret;
++	cmd &= ~DP_TRAINING_PATTERN_MASK;
++	cmd |= (pattern & DP_TRAINING_PATTERN_MASK);
++	return auxch_wr(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
++}
++
++static int
++nouveau_dp_max_voltage_swing(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	struct bit_displayport_encoder_table_entry *dpse;
++	struct bit_displayport_encoder_table *dpe;
++	int i, dpe_headerlen, max_vs = 0;
++
++	dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
++	if (!dpe)
++		return false;
++	dpse = (void *)((char *)dpe + dpe_headerlen);
++
++	for (i = 0; i < dpe_headerlen; i++, dpse++) {
++		if (dpse->vs_level > max_vs)
++			max_vs = dpse->vs_level;
++	}
++
++	return max_vs;
++}
++
++static int
++nouveau_dp_max_pre_emphasis(struct drm_encoder *encoder, int vs)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	struct bit_displayport_encoder_table_entry *dpse;
++	struct bit_displayport_encoder_table *dpe;
++	int i, dpe_headerlen, max_pre = 0;
++
++	dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
++	if (!dpe)
++		return false;
++	dpse = (void *)((char *)dpe + dpe_headerlen);
++
++	for (i = 0; i < dpe_headerlen; i++, dpse++) {
++		if (dpse->vs_level != vs)
++			continue;
++
++		if (dpse->pre_level > max_pre)
++			max_pre = dpse->pre_level;
++	}
++
++	return max_pre;
++}
++
++static bool
++nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	struct bit_displayport_encoder_table_entry *dpse;
++	struct bit_displayport_encoder_table *dpe;
++	int ret, i, dpe_headerlen, vs = 0, pre = 0;
++	uint8_t request[2];
++
++	dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
++	if (!dpe)
++		return false;
++	dpse = (void *)((char *)dpe + dpe_headerlen);
++
++	ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
++	if (ret)
++		return false;
++
++	NV_DEBUG_KMS(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
++
++	/* Keep all lanes at the same level.. */
++	for (i = 0; i < nv_encoder->dp.link_nr; i++) {
++		int lane_req = (request[i >> 1] >> ((i & 1) << 2)) & 0xf;
++		int lane_vs = lane_req & 3;
++		int lane_pre = (lane_req >> 2) & 3;
++
++		if (lane_vs > vs)
++			vs = lane_vs;
++		if (lane_pre > pre)
++			pre = lane_pre;
++	}
++
++	if (vs >= nouveau_dp_max_voltage_swing(encoder)) {
++		vs  = nouveau_dp_max_voltage_swing(encoder);
++		vs |= 4;
++	}
++
++	if (pre >= nouveau_dp_max_pre_emphasis(encoder, vs & 3)) {
++		pre  = nouveau_dp_max_pre_emphasis(encoder, vs & 3);
++		pre |= 4;
++	}
++
++	/* Update the configuration for all lanes.. */
++	for (i = 0; i < nv_encoder->dp.link_nr; i++)
++		config[i] = (pre << 3) | vs;
++
++	return true;
++}
++
++static bool
++nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	struct bit_displayport_encoder_table_entry *dpse;
++	struct bit_displayport_encoder_table *dpe;
++	int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
++	int dpe_headerlen, ret, i;
++
++	NV_DEBUG_KMS(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
++		 config[0], config[1], config[2], config[3]);
++
++	dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
++	if (!dpe)
++		return false;
++	dpse = (void *)((char *)dpe + dpe_headerlen);
++
++	for (i = 0; i < dpe->record_nr; i++, dpse++) {
++		if (dpse->vs_level == (config[0] & 3) &&
++		    dpse->pre_level == ((config[0] >> 3) & 3))
++			break;
++	}
++	BUG_ON(i == dpe->record_nr);
++
++	for (i = 0; i < nv_encoder->dp.link_nr; i++) {
++		const int shift[4] = { 16, 8, 0, 24 };
++		uint32_t mask = 0xff << shift[i];
++		uint32_t reg0, reg1, reg2;
++
++		reg0  = nv_rd32(dev, NV50_SOR_DP_UNK118(or, link)) & ~mask;
++		reg0 |= (dpse->reg0 << shift[i]);
++		reg1  = nv_rd32(dev, NV50_SOR_DP_UNK120(or, link)) & ~mask;
++		reg1 |= (dpse->reg1 << shift[i]);
++		reg2  = nv_rd32(dev, NV50_SOR_DP_UNK130(or, link)) & 0xffff00ff;
++		reg2 |= (dpse->reg2 << 8);
++		nv_wr32(dev, NV50_SOR_DP_UNK118(or, link), reg0);
++		nv_wr32(dev, NV50_SOR_DP_UNK120(or, link), reg1);
++		nv_wr32(dev, NV50_SOR_DP_UNK130(or, link), reg2);
++	}
++
++	ret = auxch_wr(encoder, DP_TRAINING_LANE0_SET, config, 4);
++	if (ret)
++		return false;
++
++	return true;
++}
++
++bool
++nouveau_dp_link_train(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	uint8_t config[4];
++	uint8_t status[3];
++	bool cr_done, cr_max_vs, eq_done;
++	int ret = 0, i, tries, voltage;
++
++	NV_DEBUG_KMS(dev, "link training!!\n");
++train:
++	cr_done = eq_done = false;
++
++	/* set link configuration */
++	NV_DEBUG_KMS(dev, "\tbegin train: bw %d, lanes %d\n",
++		 nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
++
++	ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
++	if (ret)
++		return false;
++
++	config[0] = nv_encoder->dp.link_nr;
++	if (nv_encoder->dp.dpcd_version >= 0x11)
++		config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
++
++	ret = nouveau_dp_lane_count_set(encoder, config[0]);
++	if (ret)
++		return false;
++
++	/* clock recovery */
++	NV_DEBUG_KMS(dev, "\tbegin cr\n");
++	ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
++	if (ret)
++		goto stop;
++
++	tries = 0;
++	voltage = -1;
++	memset(config, 0x00, sizeof(config));
++	for (;;) {
++		if (!nouveau_dp_link_train_commit(encoder, config))
++			break;
++
++		udelay(100);
++
++		ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
++		if (ret)
++			break;
++		NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
++			 status[0], status[1]);
++
++		cr_done = true;
++		cr_max_vs = false;
++		for (i = 0; i < nv_encoder->dp.link_nr; i++) {
++			int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
++
++			if (!(lane & DP_LANE_CR_DONE)) {
++				cr_done = false;
++				if (config[i] & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED)
++					cr_max_vs = true;
++				break;
++			}
++		}
++
++		if ((config[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
++			voltage = config[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
++			tries = 0;
++		}
++
++		if (cr_done || cr_max_vs || (++tries == 5))
++			break;
++
++		if (!nouveau_dp_link_train_adjust(encoder, config))
++			break;
++	}
++
++	if (!cr_done)
++		goto stop;
++
++	/* channel equalisation */
++	NV_DEBUG_KMS(dev, "\tbegin eq\n");
++	ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
++	if (ret)
++		goto stop;
++
++	for (tries = 0; tries <= 5; tries++) {
++		udelay(400);
++
++		ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
++		if (ret)
++			break;
++		NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
++			 status[0], status[1]);
++
++		eq_done = true;
++		if (!(status[2] & DP_INTERLANE_ALIGN_DONE))
++			eq_done = false;
++
++		for (i = 0; eq_done && i < nv_encoder->dp.link_nr; i++) {
++			int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
++
++			if (!(lane & DP_LANE_CR_DONE)) {
++				cr_done = false;
++				break;
++			}
++
++			if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
++			    !(lane & DP_LANE_SYMBOL_LOCKED)) {
++				eq_done = false;
++				break;
++			}
++		}
++
++		if (eq_done || !cr_done)
++			break;
++
++		if (!nouveau_dp_link_train_adjust(encoder, config) ||
++		    !nouveau_dp_link_train_commit(encoder, config))
++			break;
++	}
++
++stop:
++	/* end link training */
++	ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_DISABLE);
++	if (ret)
++		return false;
++
++	/* retry at a lower setting, if possible */
++	if (!ret && !(eq_done && cr_done)) {
++		NV_DEBUG_KMS(dev, "\twe failed\n");
++		if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
++			NV_DEBUG_KMS(dev, "retry link training at low rate\n");
++			nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
++			goto train;
++		}
++	}
++
++	return eq_done;
++}
++
++bool
++nouveau_dp_detect(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	uint8_t dpcd[4];
++	int ret;
++
++	ret = auxch_rd(encoder, 0x0000, dpcd, 4);
++	if (ret)
++		return false;
++
++	NV_DEBUG_KMS(dev, "encoder: link_bw %d, link_nr %d\n"
++		      "display: link_bw %d, link_nr %d version 0x%02x\n",
++		 nv_encoder->dcb->dpconf.link_bw,
++		 nv_encoder->dcb->dpconf.link_nr,
++		 dpcd[1], dpcd[2] & 0x0f, dpcd[0]);
++
++	nv_encoder->dp.dpcd_version = dpcd[0];
++
++	nv_encoder->dp.link_bw = dpcd[1];
++	if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62 &&
++	    !nv_encoder->dcb->dpconf.link_bw)
++		nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
++
++	nv_encoder->dp.link_nr = dpcd[2] & 0xf;
++	if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
++		nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
++
++	return true;
++}
++
++int
++nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
++		 uint8_t *data, int data_nr)
++{
++	struct drm_device *dev = auxch->dev;
++	uint32_t tmp, ctrl, stat = 0, data32[4] = {};
++	int ret = 0, i, index = auxch->rd;
++
++	NV_DEBUG_KMS(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
++
++	tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
++	nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
++	tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
++	if (!(tmp & 0x01000000)) {
++		NV_ERROR(dev, "expected bit 24 == 1, got 0x%08x\n", tmp);
++		ret = -EIO;
++		goto out;
++	}
++
++	for (i = 0; i < 3; i++) {
++		tmp = nv_rd32(dev, NV50_AUXCH_STAT(auxch->rd));
++		if (tmp & NV50_AUXCH_STAT_STATE_READY)
++			break;
++		udelay(100);
++	}
++
++	if (i == 3) {
++		ret = -EBUSY;
++		goto out;
++	}
++
++	if (!(cmd & 1)) {
++		memcpy(data32, data, data_nr);
++		for (i = 0; i < 4; i++) {
++			NV_DEBUG_KMS(dev, "wr %d: 0x%08x\n", i, data32[i]);
++			nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
++		}
++	}
++
++	nv_wr32(dev, NV50_AUXCH_ADDR(index), addr);
++	ctrl  = nv_rd32(dev, NV50_AUXCH_CTRL(index));
++	ctrl &= ~(NV50_AUXCH_CTRL_CMD | NV50_AUXCH_CTRL_LEN);
++	ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
++	ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
++
++	for (;;) {
++		nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
++		nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
++		nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
++		if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
++			NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
++				 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
++			ret = -EBUSY;
++			goto out;
++		}
++
++		udelay(400);
++
++		stat = nv_rd32(dev, NV50_AUXCH_STAT(index));
++		if ((stat & NV50_AUXCH_STAT_REPLY_AUX) !=
++			    NV50_AUXCH_STAT_REPLY_AUX_DEFER)
++			break;
++	}
++
++	if (cmd & 1) {
++		if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
++			ret = -EREMOTEIO;
++			goto out;
++		}
++
++		for (i = 0; i < 4; i++) {
++			data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
++			NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
++		}
++		memcpy(data, data32, data_nr);
++	}
++
++out:
++	tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
++	nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp & ~0x00100000);
++	tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
++	if (tmp & 0x01000000) {
++		NV_ERROR(dev, "expected bit 24 == 0, got 0x%08x\n", tmp);
++		ret = -EIO;
++	}
++
++	udelay(400);
++
++	return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY);
++}
++
++int
++nouveau_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
++		      uint8_t write_byte, uint8_t *read_byte)
++{
++	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
++	struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adapter;
++	struct drm_device *dev = auxch->dev;
++	int ret = 0, cmd, addr = algo_data->address;
++	uint8_t *buf;
++
++	if (mode == MODE_I2C_READ) {
++		cmd = AUX_I2C_READ;
++		buf = read_byte;
++	} else {
++		cmd = (mode & MODE_I2C_READ) ? AUX_I2C_READ : AUX_I2C_WRITE;
++		buf = &write_byte;
++	}
++
++	if (!(mode & MODE_I2C_STOP))
++		cmd |= AUX_I2C_MOT;
++
++	if (mode & MODE_I2C_START)
++		return 1;
++
++	for (;;) {
++		ret = nouveau_dp_auxch(auxch, cmd, addr, buf, 1);
++		if (ret < 0)
++			return ret;
++
++		switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
++		case NV50_AUXCH_STAT_REPLY_I2C_ACK:
++			return 1;
++		case NV50_AUXCH_STAT_REPLY_I2C_NACK:
++			return -EREMOTEIO;
++		case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
++			udelay(100);
++			break;
++		default:
++			NV_ERROR(dev, "invalid auxch status: 0x%08x\n", ret);
++			return -EREMOTEIO;
++		}
++	}
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
+new file mode 100644
+index 0000000..da3b93b
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
+@@ -0,0 +1,421 @@
++/*
++ * Copyright 2005 Stephane Marchesin.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/console.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_crtc_helper.h"
++#include "nouveau_drv.h"
++#include "nouveau_hw.h"
++#include "nouveau_fb.h"
++#include "nouveau_fbcon.h"
++#include "nv50_display.h"
++
++#include "drm_pciids.h"
++
++MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)");
++int nouveau_ctxfw = 0;
++module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
++
++MODULE_PARM_DESC(noagp, "Disable AGP");
++int nouveau_noagp;
++module_param_named(noagp, nouveau_noagp, int, 0400);
++
++MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
++static int nouveau_modeset = -1; /* kms */
++module_param_named(modeset, nouveau_modeset, int, 0400);
++
++MODULE_PARM_DESC(vbios, "Override default VBIOS location");
++char *nouveau_vbios;
++module_param_named(vbios, nouveau_vbios, charp, 0400);
++
++MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
++int nouveau_vram_pushbuf;
++module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
++
++MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
++int nouveau_vram_notify = 1;
++module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
++
++MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
++int nouveau_duallink = 1;
++module_param_named(duallink, nouveau_duallink, int, 0400);
++
++MODULE_PARM_DESC(uscript_lvds, "LVDS output script table ID (>=GeForce 8)");
++int nouveau_uscript_lvds = -1;
++module_param_named(uscript_lvds, nouveau_uscript_lvds, int, 0400);
++
++MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
++int nouveau_uscript_tmds = -1;
++module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
++
++MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
++int nouveau_ignorelid = 0;
++module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
++
++MODULE_PARM_DESC(noagp, "Disable all acceleration");
++int nouveau_noaccel = 0;
++module_param_named(noaccel, nouveau_noaccel, int, 0400);
++
++MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
++int nouveau_nofbaccel = 0;
++module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
++
++MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
++		 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
++		 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
++		 "\t\tDefault: PAL\n"
++		 "\t\t*NOTE* Ignored for cards with external TV encoders.");
++char *nouveau_tv_norm;
++module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
++
++MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
++		"\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
++		"\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
++		"\t\t0x100 vgaattr, 0x200 EVO (G80+). ");
++int nouveau_reg_debug;
++module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
++
++int nouveau_fbpercrtc;
++#if 0
++module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
++#endif
++
++static struct pci_device_id pciidlist[] = {
++	{
++		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
++		.class = PCI_BASE_CLASS_DISPLAY << 16,
++		.class_mask  = 0xff << 16,
++	},
++	{
++		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
++		.class = PCI_BASE_CLASS_DISPLAY << 16,
++		.class_mask  = 0xff << 16,
++	},
++	{}
++};
++
++MODULE_DEVICE_TABLE(pci, pciidlist);
++
++static struct drm_driver driver;
++
++static int __devinit
++nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++	return drm_get_dev(pdev, ent, &driver);
++}
++
++static void
++nouveau_pci_remove(struct pci_dev *pdev)
++{
++	struct drm_device *dev = pci_get_drvdata(pdev);
++
++	drm_put_dev(dev);
++}
++
++static int
++nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
++{
++	struct drm_device *dev = pci_get_drvdata(pdev);
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	struct nouveau_channel *chan;
++	struct drm_crtc *crtc;
++	uint32_t fbdev_flags;
++	int ret, i;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -ENODEV;
++
++	if (pm_state.event == PM_EVENT_PRETHAW)
++		return 0;
++
++	fbdev_flags = dev_priv->fbdev_info->flags;
++	dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
++
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		struct nouveau_framebuffer *nouveau_fb;
++
++		nouveau_fb = nouveau_framebuffer(crtc->fb);
++		if (!nouveau_fb || !nouveau_fb->nvbo)
++			continue;
++
++		nouveau_bo_unpin(nouveau_fb->nvbo);
++	}
++
++	NV_INFO(dev, "Evicting buffers...\n");
++	ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
++
++	NV_INFO(dev, "Idling channels...\n");
++	for (i = 0; i < pfifo->channels; i++) {
++		struct nouveau_fence *fence = NULL;
++
++		chan = dev_priv->fifos[i];
++		if (!chan || (dev_priv->card_type >= NV_50 &&
++			      chan == dev_priv->fifos[0]))
++			continue;
++
++		ret = nouveau_fence_new(chan, &fence, true);
++		if (ret == 0) {
++			ret = nouveau_fence_wait(fence, NULL, false, false);
++			nouveau_fence_unref((void *)&fence);
++		}
++
++		if (ret) {
++			NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
++				 chan->id);
++		}
++	}
++
++	pgraph->fifo_access(dev, false);
++	nouveau_wait_for_idle(dev);
++	pfifo->reassign(dev, false);
++	pfifo->disable(dev);
++	pfifo->unload_context(dev);
++	pgraph->unload_context(dev);
++
++	NV_INFO(dev, "Suspending GPU objects...\n");
++	ret = nouveau_gpuobj_suspend(dev);
++	if (ret) {
++		NV_ERROR(dev, "... failed: %d\n", ret);
++		goto out_abort;
++	}
++
++	ret = pinstmem->suspend(dev);
++	if (ret) {
++		NV_ERROR(dev, "... failed: %d\n", ret);
++		nouveau_gpuobj_suspend_cleanup(dev);
++		goto out_abort;
++	}
++
++	NV_INFO(dev, "And we're gone!\n");
++	pci_save_state(pdev);
++	if (pm_state.event == PM_EVENT_SUSPEND) {
++		pci_disable_device(pdev);
++		pci_set_power_state(pdev, PCI_D3hot);
++	}
++
++	acquire_console_sem();
++	fb_set_suspend(dev_priv->fbdev_info, 1);
++	release_console_sem();
++	dev_priv->fbdev_info->flags = fbdev_flags;
++	return 0;
++
++out_abort:
++	NV_INFO(dev, "Re-enabling acceleration..\n");
++	pfifo->enable(dev);
++	pfifo->reassign(dev, true);
++	pgraph->fifo_access(dev, true);
++	return ret;
++}
++
++static int
++nouveau_pci_resume(struct pci_dev *pdev)
++{
++	struct drm_device *dev = pci_get_drvdata(pdev);
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_engine *engine = &dev_priv->engine;
++	struct drm_crtc *crtc;
++	uint32_t fbdev_flags;
++	int ret, i;
++
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -ENODEV;
++
++	fbdev_flags = dev_priv->fbdev_info->flags;
++	dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
++
++	NV_INFO(dev, "We're back, enabling device...\n");
++	pci_set_power_state(pdev, PCI_D0);
++	pci_restore_state(pdev);
++	if (pci_enable_device(pdev))
++		return -1;
++	pci_set_master(dev->pdev);
++
++	NV_INFO(dev, "POSTing device...\n");
++	ret = nouveau_run_vbios_init(dev);
++	if (ret)
++		return ret;
++
++	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
++		ret = nouveau_mem_init_agp(dev);
++		if (ret) {
++			NV_ERROR(dev, "error reinitialising AGP: %d\n", ret);
++			return ret;
++		}
++	}
++
++	NV_INFO(dev, "Reinitialising engines...\n");
++	engine->instmem.resume(dev);
++	engine->mc.init(dev);
++	engine->timer.init(dev);
++	engine->fb.init(dev);
++	engine->graph.init(dev);
++	engine->fifo.init(dev);
++
++	NV_INFO(dev, "Restoring GPU objects...\n");
++	nouveau_gpuobj_resume(dev);
++
++	nouveau_irq_postinstall(dev);
++
++	/* Re-write SKIPS, they'll have been lost over the suspend */
++	if (nouveau_vram_pushbuf) {
++		struct nouveau_channel *chan;
++		int j;
++
++		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
++			chan = dev_priv->fifos[i];
++			if (!chan || !chan->pushbuf_bo)
++				continue;
++
++			for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
++				nouveau_bo_wr32(chan->pushbuf_bo, i, 0);
++		}
++	}
++
++	NV_INFO(dev, "Restoring mode...\n");
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		struct nouveau_framebuffer *nouveau_fb;
++
++		nouveau_fb = nouveau_framebuffer(crtc->fb);
++		if (!nouveau_fb || !nouveau_fb->nvbo)
++			continue;
++
++		nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
++	}
++
++	if (dev_priv->card_type < NV_50) {
++		nv04_display_restore(dev);
++		NVLockVgaCrtcs(dev, false);
++	} else
++		nv50_display_init(dev);
++
++	/* Force CLUT to get re-loaded during modeset */
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++
++		nv_crtc->lut.depth = 0;
++	}
++
++	acquire_console_sem();
++	fb_set_suspend(dev_priv->fbdev_info, 0);
++	release_console_sem();
++
++	nouveau_fbcon_zfill(dev);
++
++	drm_helper_resume_force_mode(dev);
++	dev_priv->fbdev_info->flags = fbdev_flags;
++	return 0;
++}
++
++static struct drm_driver driver = {
++	.driver_features =
++		DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
++		DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
++	.load = nouveau_load,
++	.firstopen = nouveau_firstopen,
++	.lastclose = nouveau_lastclose,
++	.unload = nouveau_unload,
++	.preclose = nouveau_preclose,
++#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
++	.debugfs_init = nouveau_debugfs_init,
++	.debugfs_cleanup = nouveau_debugfs_takedown,
++#endif
++	.irq_preinstall = nouveau_irq_preinstall,
++	.irq_postinstall = nouveau_irq_postinstall,
++	.irq_uninstall = nouveau_irq_uninstall,
++	.irq_handler = nouveau_irq_handler,
++	.reclaim_buffers = drm_core_reclaim_buffers,
++	.get_map_ofs = drm_core_get_map_ofs,
++	.get_reg_ofs = drm_core_get_reg_ofs,
++	.ioctls = nouveau_ioctls,
++	.fops = {
++		.owner = THIS_MODULE,
++		.open = drm_open,
++		.release = drm_release,
++		.unlocked_ioctl = drm_ioctl,
++		.mmap = nouveau_ttm_mmap,
++		.poll = drm_poll,
++		.fasync = drm_fasync,
++#if defined(CONFIG_COMPAT)
++		.compat_ioctl = nouveau_compat_ioctl,
++#endif
++	},
++	.pci_driver = {
++		.name = DRIVER_NAME,
++		.id_table = pciidlist,
++		.probe = nouveau_pci_probe,
++		.remove = nouveau_pci_remove,
++		.suspend = nouveau_pci_suspend,
++		.resume = nouveau_pci_resume
++	},
++
++	.gem_init_object = nouveau_gem_object_new,
++	.gem_free_object = nouveau_gem_object_del,
++
++	.name = DRIVER_NAME,
++	.desc = DRIVER_DESC,
++#ifdef GIT_REVISION
++	.date = GIT_REVISION,
++#else
++	.date = DRIVER_DATE,
++#endif
++	.major = DRIVER_MAJOR,
++	.minor = DRIVER_MINOR,
++	.patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int __init nouveau_init(void)
++{
++	driver.num_ioctls = nouveau_max_ioctl;
++
++	if (nouveau_modeset == -1) {
++#ifdef CONFIG_VGA_CONSOLE
++		if (vgacon_text_force())
++			nouveau_modeset = 0;
++		else
++#endif
++			nouveau_modeset = 1;
++	}
++
++	if (nouveau_modeset == 1)
++		driver.driver_features |= DRIVER_MODESET;
++
++	return drm_init(&driver);
++}
++
++static void __exit nouveau_exit(void)
++{
++	drm_exit(&driver);
++}
++
++module_init(nouveau_init);
++module_exit(nouveau_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
+new file mode 100644
+index 0000000..1c15ef3
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
+@@ -0,0 +1,1351 @@
++/*
++ * Copyright 2005 Stephane Marchesin.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __NOUVEAU_DRV_H__
++#define __NOUVEAU_DRV_H__
++
++#define DRIVER_AUTHOR		"Stephane Marchesin"
++#define DRIVER_EMAIL		"dri-devel at lists.sourceforge.net"
++
++#define DRIVER_NAME		"nouveau"
++#define DRIVER_DESC		"nVidia Riva/TNT/GeForce"
++#define DRIVER_DATE		"20090420"
++
++#define DRIVER_MAJOR		0
++#define DRIVER_MINOR		0
++#define DRIVER_PATCHLEVEL	15
++
++#define NOUVEAU_FAMILY   0x0000FFFF
++#define NOUVEAU_FLAGS    0xFFFF0000
++
++#include "ttm/ttm_bo_api.h"
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_placement.h"
++#include "ttm/ttm_memory.h"
++#include "ttm/ttm_module.h"
++
++struct nouveau_fpriv {
++	struct ttm_object_file *tfile;
++};
++
++#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
++
++#include "nouveau_drm.h"
++#include "nouveau_reg.h"
++#include "nouveau_bios.h"
++struct nouveau_grctx;
++
++#define MAX_NUM_DCB_ENTRIES 16
++
++#define NOUVEAU_MAX_CHANNEL_NR 128
++#define NOUVEAU_MAX_TILE_NR 15
++
++#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
++#define NV50_VM_BLOCK    (512*1024*1024ULL)
++#define NV50_VM_VRAM_NR  (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
++
++struct nouveau_tile_reg {
++	struct nouveau_fence *fence;
++	uint32_t addr;
++	uint32_t size;
++	bool used;
++};
++
++struct nouveau_bo {
++	struct ttm_buffer_object bo;
++	struct ttm_placement placement;
++	u32 placements[3];
++	struct ttm_bo_kmap_obj kmap;
++	struct list_head head;
++
++	/* protected by ttm_bo_reserve() */
++	struct drm_file *reserved_by;
++	struct list_head entry;
++	int pbbo_index;
++
++	struct nouveau_channel *channel;
++
++	bool mappable;
++	bool no_vm;
++
++	uint32_t tile_mode;
++	uint32_t tile_flags;
++	struct nouveau_tile_reg *tile;
++
++	struct drm_gem_object *gem;
++	struct drm_file *cpu_filp;
++	int pin_refcnt;
++};
++
++static inline struct nouveau_bo *
++nouveau_bo(struct ttm_buffer_object *bo)
++{
++	return container_of(bo, struct nouveau_bo, bo);
++}
++
++static inline struct nouveau_bo *
++nouveau_gem_object(struct drm_gem_object *gem)
++{
++	return gem ? gem->driver_private : NULL;
++}
++
++/* TODO: submit equivalent to TTM generic API upstream? */
++static inline void __iomem *
++nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
++{
++	bool is_iomem;
++	void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
++						&nvbo->kmap, &is_iomem);
++	WARN_ON_ONCE(ioptr && !is_iomem);
++	return ioptr;
++}
++
++struct mem_block {
++	struct mem_block *next;
++	struct mem_block *prev;
++	uint64_t start;
++	uint64_t size;
++	struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
++};
++
++enum nouveau_flags {
++	NV_NFORCE   = 0x10000000,
++	NV_NFORCE2  = 0x20000000
++};
++
++#define NVOBJ_ENGINE_SW		0
++#define NVOBJ_ENGINE_GR		1
++#define NVOBJ_ENGINE_DISPLAY	2
++#define NVOBJ_ENGINE_INT	0xdeadbeef
++
++#define NVOBJ_FLAG_ALLOW_NO_REFS	(1 << 0)
++#define NVOBJ_FLAG_ZERO_ALLOC		(1 << 1)
++#define NVOBJ_FLAG_ZERO_FREE		(1 << 2)
++#define NVOBJ_FLAG_FAKE			(1 << 3)
++struct nouveau_gpuobj {
++	struct list_head list;
++
++	struct nouveau_channel *im_channel;
++	struct mem_block *im_pramin;
++	struct nouveau_bo *im_backing;
++	uint32_t im_backing_start;
++	uint32_t *im_backing_suspend;
++	int im_bound;
++
++	uint32_t flags;
++	int refcount;
++
++	uint32_t engine;
++	uint32_t class;
++
++	void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
++	void *priv;
++};
++
++struct nouveau_gpuobj_ref {
++	struct list_head list;
++
++	struct nouveau_gpuobj *gpuobj;
++	uint32_t instance;
++
++	struct nouveau_channel *channel;
++	int handle;
++};
++
++struct nouveau_channel {
++	struct drm_device *dev;
++	int id;
++
++	/* owner of this fifo */
++	struct drm_file *file_priv;
++	/* mapping of the fifo itself */
++	struct drm_local_map *map;
++
++	/* mapping of the regs controling the fifo */
++	void __iomem *user;
++	uint32_t user_get;
++	uint32_t user_put;
++
++	/* Fencing */
++	struct {
++		/* lock protects the pending list only */
++		spinlock_t lock;
++		struct list_head pending;
++		uint32_t sequence;
++		uint32_t sequence_ack;
++		uint32_t last_sequence_irq;
++	} fence;
++
++	/* DMA push buffer */
++	struct nouveau_gpuobj_ref *pushbuf;
++	struct nouveau_bo         *pushbuf_bo;
++	uint32_t                   pushbuf_base;
++
++	/* Notifier memory */
++	struct nouveau_bo *notifier_bo;
++	struct mem_block *notifier_heap;
++
++	/* PFIFO context */
++	struct nouveau_gpuobj_ref *ramfc;
++	struct nouveau_gpuobj_ref *cache;
++
++	/* PGRAPH context */
++	/* XXX may be merge 2 pointers as private data ??? */
++	struct nouveau_gpuobj_ref *ramin_grctx;
++	void *pgraph_ctx;
++
++	/* NV50 VM */
++	struct nouveau_gpuobj     *vm_pd;
++	struct nouveau_gpuobj_ref *vm_gart_pt;
++	struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR];
++
++	/* Objects */
++	struct nouveau_gpuobj_ref *ramin; /* Private instmem */
++	struct mem_block          *ramin_heap; /* Private PRAMIN heap */
++	struct nouveau_gpuobj_ref *ramht; /* Hash table */
++	struct list_head           ramht_refs; /* Objects referenced by RAMHT */
++
++	/* GPU object info for stuff used in-kernel (mm_enabled) */
++	uint32_t m2mf_ntfy;
++	uint32_t vram_handle;
++	uint32_t gart_handle;
++	bool accel_done;
++
++	/* Push buffer state (only for drm's channel on !mm_enabled) */
++	struct {
++		int max;
++		int free;
++		int cur;
++		int put;
++		/* access via pushbuf_bo */
++	} dma;
++
++	uint32_t sw_subchannel[8];
++
++	struct {
++		struct nouveau_gpuobj *vblsem;
++		uint32_t vblsem_offset;
++		uint32_t vblsem_rval;
++		struct list_head vbl_wait;
++	} nvsw;
++
++	struct {
++		bool active;
++		char name[32];
++		struct drm_info_list info;
++	} debugfs;
++};
++
++struct nouveau_instmem_engine {
++	void	*priv;
++
++	int	(*init)(struct drm_device *dev);
++	void	(*takedown)(struct drm_device *dev);
++	int	(*suspend)(struct drm_device *dev);
++	void	(*resume)(struct drm_device *dev);
++
++	int	(*populate)(struct drm_device *, struct nouveau_gpuobj *,
++			    uint32_t *size);
++	void	(*clear)(struct drm_device *, struct nouveau_gpuobj *);
++	int	(*bind)(struct drm_device *, struct nouveau_gpuobj *);
++	int	(*unbind)(struct drm_device *, struct nouveau_gpuobj *);
++	void	(*prepare_access)(struct drm_device *, bool write);
++	void	(*finish_access)(struct drm_device *);
++};
++
++struct nouveau_mc_engine {
++	int  (*init)(struct drm_device *dev);
++	void (*takedown)(struct drm_device *dev);
++};
++
++struct nouveau_timer_engine {
++	int      (*init)(struct drm_device *dev);
++	void     (*takedown)(struct drm_device *dev);
++	uint64_t (*read)(struct drm_device *dev);
++};
++
++struct nouveau_fb_engine {
++	int num_tiles;
++
++	int  (*init)(struct drm_device *dev);
++	void (*takedown)(struct drm_device *dev);
++
++	void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
++				 uint32_t size, uint32_t pitch);
++};
++
++struct nouveau_fifo_engine {
++	void *priv;
++
++	int  channels;
++
++	int  (*init)(struct drm_device *);
++	void (*takedown)(struct drm_device *);
++
++	void (*disable)(struct drm_device *);
++	void (*enable)(struct drm_device *);
++	bool (*reassign)(struct drm_device *, bool enable);
++	bool (*cache_flush)(struct drm_device *dev);
++	bool (*cache_pull)(struct drm_device *dev, bool enable);
++
++	int  (*channel_id)(struct drm_device *);
++
++	int  (*create_context)(struct nouveau_channel *);
++	void (*destroy_context)(struct nouveau_channel *);
++	int  (*load_context)(struct nouveau_channel *);
++	int  (*unload_context)(struct drm_device *);
++};
++
++struct nouveau_pgraph_object_method {
++	int id;
++	int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
++		      uint32_t data);
++};
++
++struct nouveau_pgraph_object_class {
++	int id;
++	bool software;
++	struct nouveau_pgraph_object_method *methods;
++};
++
++struct nouveau_pgraph_engine {
++	struct nouveau_pgraph_object_class *grclass;
++	bool accel_blocked;
++	void *ctxprog;
++	void *ctxvals;
++	int grctx_size;
++
++	int  (*init)(struct drm_device *);
++	void (*takedown)(struct drm_device *);
++
++	void (*fifo_access)(struct drm_device *, bool);
++
++	struct nouveau_channel *(*channel)(struct drm_device *);
++	int  (*create_context)(struct nouveau_channel *);
++	void (*destroy_context)(struct nouveau_channel *);
++	int  (*load_context)(struct nouveau_channel *);
++	int  (*unload_context)(struct drm_device *);
++
++	void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
++				  uint32_t size, uint32_t pitch);
++};
++
++struct nouveau_engine {
++	struct nouveau_instmem_engine instmem;
++	struct nouveau_mc_engine      mc;
++	struct nouveau_timer_engine   timer;
++	struct nouveau_fb_engine      fb;
++	struct nouveau_pgraph_engine  graph;
++	struct nouveau_fifo_engine    fifo;
++};
++
++struct nouveau_pll_vals {
++	union {
++		struct {
++#ifdef __BIG_ENDIAN
++			uint8_t N1, M1, N2, M2;
++#else
++			uint8_t M1, N1, M2, N2;
++#endif
++		};
++		struct {
++			uint16_t NM1, NM2;
++		} __attribute__((packed));
++	};
++	int log2P;
++
++	int refclk;
++};
++
++enum nv04_fp_display_regs {
++	FP_DISPLAY_END,
++	FP_TOTAL,
++	FP_CRTC,
++	FP_SYNC_START,
++	FP_SYNC_END,
++	FP_VALID_START,
++	FP_VALID_END
++};
++
++struct nv04_crtc_reg {
++	unsigned char MiscOutReg;     /* */
++	uint8_t CRTC[0x9f];
++	uint8_t CR58[0x10];
++	uint8_t Sequencer[5];
++	uint8_t Graphics[9];
++	uint8_t Attribute[21];
++	unsigned char DAC[768];       /* Internal Colorlookuptable */
++
++	/* PCRTC regs */
++	uint32_t fb_start;
++	uint32_t crtc_cfg;
++	uint32_t cursor_cfg;
++	uint32_t gpio_ext;
++	uint32_t crtc_830;
++	uint32_t crtc_834;
++	uint32_t crtc_850;
++	uint32_t crtc_eng_ctrl;
++
++	/* PRAMDAC regs */
++	uint32_t nv10_cursync;
++	struct nouveau_pll_vals pllvals;
++	uint32_t ramdac_gen_ctrl;
++	uint32_t ramdac_630;
++	uint32_t ramdac_634;
++	uint32_t tv_setup;
++	uint32_t tv_vtotal;
++	uint32_t tv_vskew;
++	uint32_t tv_vsync_delay;
++	uint32_t tv_htotal;
++	uint32_t tv_hskew;
++	uint32_t tv_hsync_delay;
++	uint32_t tv_hsync_delay2;
++	uint32_t fp_horiz_regs[7];
++	uint32_t fp_vert_regs[7];
++	uint32_t dither;
++	uint32_t fp_control;
++	uint32_t dither_regs[6];
++	uint32_t fp_debug_0;
++	uint32_t fp_debug_1;
++	uint32_t fp_debug_2;
++	uint32_t fp_margin_color;
++	uint32_t ramdac_8c0;
++	uint32_t ramdac_a20;
++	uint32_t ramdac_a24;
++	uint32_t ramdac_a34;
++	uint32_t ctv_regs[38];
++};
++
++struct nv04_output_reg {
++	uint32_t output;
++	int head;
++};
++
++struct nv04_mode_state {
++	uint32_t bpp;
++	uint32_t width;
++	uint32_t height;
++	uint32_t interlace;
++	uint32_t repaint0;
++	uint32_t repaint1;
++	uint32_t screen;
++	uint32_t scale;
++	uint32_t dither;
++	uint32_t extra;
++	uint32_t fifo;
++	uint32_t pixel;
++	uint32_t horiz;
++	int arbitration0;
++	int arbitration1;
++	uint32_t pll;
++	uint32_t pllB;
++	uint32_t vpll;
++	uint32_t vpll2;
++	uint32_t vpllB;
++	uint32_t vpll2B;
++	uint32_t pllsel;
++	uint32_t sel_clk;
++	uint32_t general;
++	uint32_t crtcOwner;
++	uint32_t head;
++	uint32_t head2;
++	uint32_t cursorConfig;
++	uint32_t cursor0;
++	uint32_t cursor1;
++	uint32_t cursor2;
++	uint32_t timingH;
++	uint32_t timingV;
++	uint32_t displayV;
++	uint32_t crtcSync;
++
++	struct nv04_crtc_reg crtc_reg[2];
++};
++
++enum nouveau_card_type {
++	NV_04      = 0x00,
++	NV_10      = 0x10,
++	NV_20      = 0x20,
++	NV_30      = 0x30,
++	NV_40      = 0x40,
++	NV_50      = 0x50,
++};
++
++struct drm_nouveau_private {
++	struct drm_device *dev;
++	enum {
++		NOUVEAU_CARD_INIT_DOWN,
++		NOUVEAU_CARD_INIT_DONE,
++		NOUVEAU_CARD_INIT_FAILED
++	} init_state;
++
++	/* the card type, takes NV_* as values */
++	enum nouveau_card_type card_type;
++	/* exact chipset, derived from NV_PMC_BOOT_0 */
++	int chipset;
++	int flags;
++
++	void __iomem *mmio;
++	void __iomem *ramin;
++	uint32_t ramin_size;
++
++	struct nouveau_bo *vga_ram;
++
++	struct workqueue_struct *wq;
++	struct work_struct irq_work;
++
++	struct list_head vbl_waiting;
++
++	struct {
++		struct ttm_global_reference mem_global_ref;
++		struct ttm_bo_global_ref bo_global_ref;
++		struct ttm_bo_device bdev;
++		spinlock_t bo_list_lock;
++		struct list_head bo_list;
++		atomic_t validate_sequence;
++	} ttm;
++
++	struct fb_info *fbdev_info;
++
++	int fifo_alloc_count;
++	struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
++
++	struct nouveau_engine engine;
++	struct nouveau_channel *channel;
++
++	/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
++	struct nouveau_gpuobj *ramht;
++	uint32_t ramin_rsvd_vram;
++	uint32_t ramht_offset;
++	uint32_t ramht_size;
++	uint32_t ramht_bits;
++	uint32_t ramfc_offset;
++	uint32_t ramfc_size;
++	uint32_t ramro_offset;
++	uint32_t ramro_size;
++
++	/* base physical adresses */
++	uint64_t fb_phys;
++	uint64_t fb_available_size;
++	uint64_t fb_mappable_pages;
++	uint64_t fb_aper_free;
++
++	struct {
++		enum {
++			NOUVEAU_GART_NONE = 0,
++			NOUVEAU_GART_AGP,
++			NOUVEAU_GART_SGDMA
++		} type;
++		uint64_t aper_base;
++		uint64_t aper_size;
++		uint64_t aper_free;
++
++		struct nouveau_gpuobj *sg_ctxdma;
++		struct page *sg_dummy_page;
++		dma_addr_t sg_dummy_bus;
++
++		/* nottm hack */
++		struct drm_ttm_backend *sg_be;
++		unsigned long sg_handle;
++	} gart_info;
++
++	/* nv10-nv40 tiling regions */
++	struct {
++		struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
++		spinlock_t lock;
++	} tile;
++
++	/* G8x/G9x virtual address space */
++	uint64_t vm_gart_base;
++	uint64_t vm_gart_size;
++	uint64_t vm_vram_base;
++	uint64_t vm_vram_size;
++	uint64_t vm_end;
++	struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
++	int vm_vram_pt_nr;
++	uint64_t vram_sys_base;
++
++	/* the mtrr covering the FB */
++	int fb_mtrr;
++
++	struct mem_block *ramin_heap;
++
++	/* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
++	uint32_t ctx_table_size;
++	struct nouveau_gpuobj_ref *ctx_table;
++
++	struct list_head gpuobj_list;
++
++	struct nvbios VBIOS;
++	struct nouveau_bios_info *vbios;
++
++	struct nv04_mode_state mode_reg;
++	struct nv04_mode_state saved_reg;
++	uint32_t saved_vga_font[4][16384];
++	uint32_t crtc_owner;
++	uint32_t dac_users[4];
++
++	struct nouveau_suspend_resume {
++		uint32_t fifo_mode;
++		uint32_t graph_ctx_control;
++		uint32_t graph_state;
++		uint32_t *ramin_copy;
++		uint64_t ramin_size;
++	} susres;
++
++	struct backlight_device *backlight;
++	bool acpi_dsm;
++
++	struct nouveau_channel *evo;
++
++	struct {
++		struct dentry *channel_root;
++	} debugfs;
++};
++
++static inline struct drm_nouveau_private *
++nouveau_bdev(struct ttm_bo_device *bd)
++{
++	return container_of(bd, struct drm_nouveau_private, ttm.bdev);
++}
++
++static inline int
++nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
++{
++	struct nouveau_bo *prev;
++
++	if (!pnvbo)
++		return -EINVAL;
++	prev = *pnvbo;
++
++	*pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
++	if (prev) {
++		struct ttm_buffer_object *bo = &prev->bo;
++
++		ttm_bo_unref(&bo);
++	}
++
++	return 0;
++}
++
++#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do {            \
++	struct drm_nouveau_private *nv = dev->dev_private;    \
++	if (nv->init_state != NOUVEAU_CARD_INIT_DONE) {       \
++		NV_ERROR(dev, "called without init\n");       \
++		return -EINVAL;                               \
++	}                                                     \
++} while (0)
++
++#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do {    \
++	struct drm_nouveau_private *nv = dev->dev_private;       \
++	if (!nouveau_channel_owner(dev, (cl), (id))) {           \
++		NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
++			 DRM_CURRENTPID, (id));                  \
++		return -EPERM;                                   \
++	}                                                        \
++	(ch) = nv->fifos[(id)];                                  \
++} while (0)
++
++/* nouveau_drv.c */
++extern int nouveau_noagp;
++extern int nouveau_duallink;
++extern int nouveau_uscript_lvds;
++extern int nouveau_uscript_tmds;
++extern int nouveau_vram_pushbuf;
++extern int nouveau_vram_notify;
++extern int nouveau_fbpercrtc;
++extern char *nouveau_tv_norm;
++extern int nouveau_reg_debug;
++extern char *nouveau_vbios;
++extern int nouveau_ctxfw;
++extern int nouveau_ignorelid;
++extern int nouveau_nofbaccel;
++extern int nouveau_noaccel;
++
++/* nouveau_state.c */
++extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
++extern int  nouveau_load(struct drm_device *, unsigned long flags);
++extern int  nouveau_firstopen(struct drm_device *);
++extern void nouveau_lastclose(struct drm_device *);
++extern int  nouveau_unload(struct drm_device *);
++extern int  nouveau_ioctl_getparam(struct drm_device *, void *data,
++				   struct drm_file *);
++extern int  nouveau_ioctl_setparam(struct drm_device *, void *data,
++				   struct drm_file *);
++extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
++			       uint32_t reg, uint32_t mask, uint32_t val);
++extern bool nouveau_wait_for_idle(struct drm_device *);
++extern int  nouveau_card_init(struct drm_device *);
++extern int  nouveau_ioctl_card_init(struct drm_device *, void *data,
++				    struct drm_file *);
++extern int  nouveau_ioctl_suspend(struct drm_device *, void *data,
++				  struct drm_file *);
++extern int  nouveau_ioctl_resume(struct drm_device *, void *data,
++				 struct drm_file *);
++
++/* nouveau_mem.c */
++extern int  nouveau_mem_init_heap(struct mem_block **, uint64_t start,
++				 uint64_t size);
++extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
++						 uint64_t size, int align2,
++						 struct drm_file *, int tail);
++extern void nouveau_mem_takedown(struct mem_block **heap);
++extern void nouveau_mem_free_block(struct mem_block *);
++extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
++extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
++extern int  nouveau_mem_init(struct drm_device *);
++extern int  nouveau_mem_init_agp(struct drm_device *);
++extern void nouveau_mem_close(struct drm_device *);
++extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
++						    uint32_t addr,
++						    uint32_t size,
++						    uint32_t pitch);
++extern void nv10_mem_expire_tiling(struct drm_device *dev,
++				   struct nouveau_tile_reg *tile,
++				   struct nouveau_fence *fence);
++extern int  nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
++				    uint32_t size, uint32_t flags,
++				    uint64_t phys);
++extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
++			       uint32_t size);
++
++/* nouveau_notifier.c */
++extern int  nouveau_notifier_init_channel(struct nouveau_channel *);
++extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
++extern int  nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
++				   int cout, uint32_t *offset);
++extern int  nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
++extern int  nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
++					 struct drm_file *);
++extern int  nouveau_ioctl_notifier_free(struct drm_device *, void *data,
++					struct drm_file *);
++
++/* nouveau_channel.c */
++extern struct drm_ioctl_desc nouveau_ioctls[];
++extern int nouveau_max_ioctl;
++extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
++extern int  nouveau_channel_owner(struct drm_device *, struct drm_file *,
++				  int channel);
++extern int  nouveau_channel_alloc(struct drm_device *dev,
++				  struct nouveau_channel **chan,
++				  struct drm_file *file_priv,
++				  uint32_t fb_ctxdma, uint32_t tt_ctxdma);
++extern void nouveau_channel_free(struct nouveau_channel *);
++
++/* nouveau_object.c */
++extern int  nouveau_gpuobj_early_init(struct drm_device *);
++extern int  nouveau_gpuobj_init(struct drm_device *);
++extern void nouveau_gpuobj_takedown(struct drm_device *);
++extern void nouveau_gpuobj_late_takedown(struct drm_device *);
++extern int  nouveau_gpuobj_suspend(struct drm_device *dev);
++extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
++extern void nouveau_gpuobj_resume(struct drm_device *dev);
++extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
++				       uint32_t vram_h, uint32_t tt_h);
++extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
++extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
++			      uint32_t size, int align, uint32_t flags,
++			      struct nouveau_gpuobj **);
++extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
++extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
++				  uint32_t handle, struct nouveau_gpuobj *,
++				  struct nouveau_gpuobj_ref **);
++extern int nouveau_gpuobj_ref_del(struct drm_device *,
++				  struct nouveau_gpuobj_ref **);
++extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
++				   struct nouveau_gpuobj_ref **ref_ret);
++extern int nouveau_gpuobj_new_ref(struct drm_device *,
++				  struct nouveau_channel *alloc_chan,
++				  struct nouveau_channel *ref_chan,
++				  uint32_t handle, uint32_t size, int align,
++				  uint32_t flags, struct nouveau_gpuobj_ref **);
++extern int nouveau_gpuobj_new_fake(struct drm_device *,
++				   uint32_t p_offset, uint32_t b_offset,
++				   uint32_t size, uint32_t flags,
++				   struct nouveau_gpuobj **,
++				   struct nouveau_gpuobj_ref**);
++extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
++				  uint64_t offset, uint64_t size, int access,
++				  int target, struct nouveau_gpuobj **);
++extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
++				       uint64_t offset, uint64_t size,
++				       int access, struct nouveau_gpuobj **,
++				       uint32_t *o_ret);
++extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
++				 struct nouveau_gpuobj **);
++extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
++				 struct nouveau_gpuobj **);
++extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
++				     struct drm_file *);
++extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
++				     struct drm_file *);
++
++/* nouveau_irq.c */
++extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
++extern void        nouveau_irq_preinstall(struct drm_device *);
++extern int         nouveau_irq_postinstall(struct drm_device *);
++extern void        nouveau_irq_uninstall(struct drm_device *);
++
++/* nouveau_sgdma.c */
++extern int nouveau_sgdma_init(struct drm_device *);
++extern void nouveau_sgdma_takedown(struct drm_device *);
++extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
++				  uint32_t *page);
++extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
++
++/* nouveau_debugfs.c */
++#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
++extern int  nouveau_debugfs_init(struct drm_minor *);
++extern void nouveau_debugfs_takedown(struct drm_minor *);
++extern int  nouveau_debugfs_channel_init(struct nouveau_channel *);
++extern void nouveau_debugfs_channel_fini(struct nouveau_channel *);
++#else
++static inline int
++nouveau_debugfs_init(struct drm_minor *minor)
++{
++	return 0;
++}
++
++static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
++{
++}
++
++static inline int
++nouveau_debugfs_channel_init(struct nouveau_channel *chan)
++{
++	return 0;
++}
++
++static inline void
++nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
++{
++}
++#endif
++
++/* nouveau_dma.c */
++extern void nouveau_dma_pre_init(struct nouveau_channel *);
++extern int  nouveau_dma_init(struct nouveau_channel *);
++extern int  nouveau_dma_wait(struct nouveau_channel *, int size);
++
++/* nouveau_acpi.c */
++#ifdef CONFIG_ACPI
++extern int nouveau_hybrid_setup(struct drm_device *dev);
++extern bool nouveau_dsm_probe(struct drm_device *dev);
++#else
++static inline int nouveau_hybrid_setup(struct drm_device *dev)
++{
++	return 0;
++}
++static inline bool nouveau_dsm_probe(struct drm_device *dev)
++{
++	return false;
++}
++#endif
++
++/* nouveau_backlight.c */
++#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
++extern int nouveau_backlight_init(struct drm_device *);
++extern void nouveau_backlight_exit(struct drm_device *);
++#else
++static inline int nouveau_backlight_init(struct drm_device *dev)
++{
++	return 0;
++}
++
++static inline void nouveau_backlight_exit(struct drm_device *dev) { }
++#endif
++
++/* nouveau_bios.c */
++extern int nouveau_bios_init(struct drm_device *);
++extern void nouveau_bios_takedown(struct drm_device *dev);
++extern int nouveau_run_vbios_init(struct drm_device *);
++extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
++					struct dcb_entry *);
++extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
++						      enum dcb_gpio_tag);
++extern struct dcb_connector_table_entry *
++nouveau_bios_connector_entry(struct drm_device *, int index);
++extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
++			  struct pll_lims *);
++extern int nouveau_bios_run_display_table(struct drm_device *,
++					  struct dcb_entry *,
++					  uint32_t script, int pxclk);
++extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *,
++				   int *length);
++extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
++extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
++extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
++					 bool *dl, bool *if_is_24bit);
++extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
++			  int head, int pxclk);
++extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
++			    enum LVDS_script, int pxclk);
++
++/* nouveau_ttm.c */
++int nouveau_ttm_global_init(struct drm_nouveau_private *);
++void nouveau_ttm_global_release(struct drm_nouveau_private *);
++int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
++
++/* nouveau_dp.c */
++int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
++		     uint8_t *data, int data_nr);
++bool nouveau_dp_detect(struct drm_encoder *);
++bool nouveau_dp_link_train(struct drm_encoder *);
++
++/* nv04_fb.c */
++extern int  nv04_fb_init(struct drm_device *);
++extern void nv04_fb_takedown(struct drm_device *);
++
++/* nv10_fb.c */
++extern int  nv10_fb_init(struct drm_device *);
++extern void nv10_fb_takedown(struct drm_device *);
++extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
++				      uint32_t, uint32_t);
++
++/* nv40_fb.c */
++extern int  nv40_fb_init(struct drm_device *);
++extern void nv40_fb_takedown(struct drm_device *);
++extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
++				      uint32_t, uint32_t);
++
++/* nv04_fifo.c */
++extern int  nv04_fifo_init(struct drm_device *);
++extern void nv04_fifo_disable(struct drm_device *);
++extern void nv04_fifo_enable(struct drm_device *);
++extern bool nv04_fifo_reassign(struct drm_device *, bool);
++extern bool nv04_fifo_cache_flush(struct drm_device *);
++extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
++extern int  nv04_fifo_channel_id(struct drm_device *);
++extern int  nv04_fifo_create_context(struct nouveau_channel *);
++extern void nv04_fifo_destroy_context(struct nouveau_channel *);
++extern int  nv04_fifo_load_context(struct nouveau_channel *);
++extern int  nv04_fifo_unload_context(struct drm_device *);
++
++/* nv10_fifo.c */
++extern int  nv10_fifo_init(struct drm_device *);
++extern int  nv10_fifo_channel_id(struct drm_device *);
++extern int  nv10_fifo_create_context(struct nouveau_channel *);
++extern void nv10_fifo_destroy_context(struct nouveau_channel *);
++extern int  nv10_fifo_load_context(struct nouveau_channel *);
++extern int  nv10_fifo_unload_context(struct drm_device *);
++
++/* nv40_fifo.c */
++extern int  nv40_fifo_init(struct drm_device *);
++extern int  nv40_fifo_create_context(struct nouveau_channel *);
++extern void nv40_fifo_destroy_context(struct nouveau_channel *);
++extern int  nv40_fifo_load_context(struct nouveau_channel *);
++extern int  nv40_fifo_unload_context(struct drm_device *);
++
++/* nv50_fifo.c */
++extern int  nv50_fifo_init(struct drm_device *);
++extern void nv50_fifo_takedown(struct drm_device *);
++extern int  nv50_fifo_channel_id(struct drm_device *);
++extern int  nv50_fifo_create_context(struct nouveau_channel *);
++extern void nv50_fifo_destroy_context(struct nouveau_channel *);
++extern int  nv50_fifo_load_context(struct nouveau_channel *);
++extern int  nv50_fifo_unload_context(struct drm_device *);
++
++/* nv04_graph.c */
++extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
++extern int  nv04_graph_init(struct drm_device *);
++extern void nv04_graph_takedown(struct drm_device *);
++extern void nv04_graph_fifo_access(struct drm_device *, bool);
++extern struct nouveau_channel *nv04_graph_channel(struct drm_device *);
++extern int  nv04_graph_create_context(struct nouveau_channel *);
++extern void nv04_graph_destroy_context(struct nouveau_channel *);
++extern int  nv04_graph_load_context(struct nouveau_channel *);
++extern int  nv04_graph_unload_context(struct drm_device *);
++extern void nv04_graph_context_switch(struct drm_device *);
++
++/* nv10_graph.c */
++extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
++extern int  nv10_graph_init(struct drm_device *);
++extern void nv10_graph_takedown(struct drm_device *);
++extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
++extern int  nv10_graph_create_context(struct nouveau_channel *);
++extern void nv10_graph_destroy_context(struct nouveau_channel *);
++extern int  nv10_graph_load_context(struct nouveau_channel *);
++extern int  nv10_graph_unload_context(struct drm_device *);
++extern void nv10_graph_context_switch(struct drm_device *);
++extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
++					 uint32_t, uint32_t);
++
++/* nv20_graph.c */
++extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
++extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
++extern int  nv20_graph_create_context(struct nouveau_channel *);
++extern void nv20_graph_destroy_context(struct nouveau_channel *);
++extern int  nv20_graph_load_context(struct nouveau_channel *);
++extern int  nv20_graph_unload_context(struct drm_device *);
++extern int  nv20_graph_init(struct drm_device *);
++extern void nv20_graph_takedown(struct drm_device *);
++extern int  nv30_graph_init(struct drm_device *);
++extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
++					 uint32_t, uint32_t);
++
++/* nv40_graph.c */
++extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
++extern int  nv40_graph_init(struct drm_device *);
++extern void nv40_graph_takedown(struct drm_device *);
++extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
++extern int  nv40_graph_create_context(struct nouveau_channel *);
++extern void nv40_graph_destroy_context(struct nouveau_channel *);
++extern int  nv40_graph_load_context(struct nouveau_channel *);
++extern int  nv40_graph_unload_context(struct drm_device *);
++extern void nv40_grctx_init(struct nouveau_grctx *);
++extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
++					 uint32_t, uint32_t);
++
++/* nv50_graph.c */
++extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
++extern int  nv50_graph_init(struct drm_device *);
++extern void nv50_graph_takedown(struct drm_device *);
++extern void nv50_graph_fifo_access(struct drm_device *, bool);
++extern struct nouveau_channel *nv50_graph_channel(struct drm_device *);
++extern int  nv50_graph_create_context(struct nouveau_channel *);
++extern void nv50_graph_destroy_context(struct nouveau_channel *);
++extern int  nv50_graph_load_context(struct nouveau_channel *);
++extern int  nv50_graph_unload_context(struct drm_device *);
++extern void nv50_graph_context_switch(struct drm_device *);
++
++/* nouveau_grctx.c */
++extern int  nouveau_grctx_prog_load(struct drm_device *);
++extern void nouveau_grctx_vals_load(struct drm_device *,
++				    struct nouveau_gpuobj *);
++extern void nouveau_grctx_fini(struct drm_device *);
++
++/* nv04_instmem.c */
++extern int  nv04_instmem_init(struct drm_device *);
++extern void nv04_instmem_takedown(struct drm_device *);
++extern int  nv04_instmem_suspend(struct drm_device *);
++extern void nv04_instmem_resume(struct drm_device *);
++extern int  nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
++				  uint32_t *size);
++extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
++extern int  nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
++extern int  nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
++extern void nv04_instmem_prepare_access(struct drm_device *, bool write);
++extern void nv04_instmem_finish_access(struct drm_device *);
++
++/* nv50_instmem.c */
++extern int  nv50_instmem_init(struct drm_device *);
++extern void nv50_instmem_takedown(struct drm_device *);
++extern int  nv50_instmem_suspend(struct drm_device *);
++extern void nv50_instmem_resume(struct drm_device *);
++extern int  nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
++				  uint32_t *size);
++extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
++extern int  nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
++extern int  nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
++extern void nv50_instmem_prepare_access(struct drm_device *, bool write);
++extern void nv50_instmem_finish_access(struct drm_device *);
++
++/* nv04_mc.c */
++extern int  nv04_mc_init(struct drm_device *);
++extern void nv04_mc_takedown(struct drm_device *);
++
++/* nv40_mc.c */
++extern int  nv40_mc_init(struct drm_device *);
++extern void nv40_mc_takedown(struct drm_device *);
++
++/* nv50_mc.c */
++extern int  nv50_mc_init(struct drm_device *);
++extern void nv50_mc_takedown(struct drm_device *);
++
++/* nv04_timer.c */
++extern int  nv04_timer_init(struct drm_device *);
++extern uint64_t nv04_timer_read(struct drm_device *);
++extern void nv04_timer_takedown(struct drm_device *);
++
++extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
++				 unsigned long arg);
++
++/* nv04_dac.c */
++extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
++extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
++extern int nv04_dac_output_offset(struct drm_encoder *encoder);
++extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
++
++/* nv04_dfp.c */
++extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry);
++extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent);
++extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
++			       int head, bool dl);
++extern void nv04_dfp_disable(struct drm_device *dev, int head);
++extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
++
++/* nv04_tv.c */
++extern int nv04_tv_identify(struct drm_device *dev, int i2c_index);
++extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
++
++/* nv17_tv.c */
++extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
++
++/* nv04_display.c */
++extern int nv04_display_create(struct drm_device *);
++extern void nv04_display_destroy(struct drm_device *);
++extern void nv04_display_restore(struct drm_device *);
++
++/* nv04_crtc.c */
++extern int nv04_crtc_create(struct drm_device *, int index);
++
++/* nouveau_bo.c */
++extern struct ttm_bo_driver nouveau_bo_driver;
++extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
++			  int size, int align, uint32_t flags,
++			  uint32_t tile_mode, uint32_t tile_flags,
++			  bool no_vm, bool mappable, struct nouveau_bo **);
++extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
++extern int nouveau_bo_unpin(struct nouveau_bo *);
++extern int nouveau_bo_map(struct nouveau_bo *);
++extern void nouveau_bo_unmap(struct nouveau_bo *);
++extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype);
++extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
++extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
++extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
++extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
++
++/* nouveau_fence.c */
++struct nouveau_fence;
++extern int nouveau_fence_init(struct nouveau_channel *);
++extern void nouveau_fence_fini(struct nouveau_channel *);
++extern void nouveau_fence_update(struct nouveau_channel *);
++extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
++			     bool emit);
++extern int nouveau_fence_emit(struct nouveau_fence *);
++struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
++extern bool nouveau_fence_signalled(void *obj, void *arg);
++extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
++extern int nouveau_fence_flush(void *obj, void *arg);
++extern void nouveau_fence_unref(void **obj);
++extern void *nouveau_fence_ref(void *obj);
++extern void nouveau_fence_handler(struct drm_device *dev, int channel);
++
++/* nouveau_gem.c */
++extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
++			   int size, int align, uint32_t flags,
++			   uint32_t tile_mode, uint32_t tile_flags,
++			   bool no_vm, bool mappable, struct nouveau_bo **);
++extern int nouveau_gem_object_new(struct drm_gem_object *);
++extern void nouveau_gem_object_del(struct drm_gem_object *);
++extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
++				 struct drm_file *);
++extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
++				     struct drm_file *);
++extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
++					  struct drm_file *);
++extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
++					   struct drm_file *);
++extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
++				 struct drm_file *);
++extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
++				   struct drm_file *);
++extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
++				  struct drm_file *);
++extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
++				      struct drm_file *);
++extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
++				      struct drm_file *);
++extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
++				  struct drm_file *);
++
++/* nv17_gpio.c */
++int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
++int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
++
++#ifndef ioread32_native
++#ifdef __BIG_ENDIAN
++#define ioread16_native ioread16be
++#define iowrite16_native iowrite16be
++#define ioread32_native  ioread32be
++#define iowrite32_native iowrite32be
++#else /* def __BIG_ENDIAN */
++#define ioread16_native ioread16
++#define iowrite16_native iowrite16
++#define ioread32_native  ioread32
++#define iowrite32_native iowrite32
++#endif /* def __BIG_ENDIAN else */
++#endif /* !ioread32_native */
++
++/* channel control reg access */
++static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg)
++{
++	return ioread32_native(chan->user + reg);
++}
++
++static inline void nvchan_wr32(struct nouveau_channel *chan,
++							unsigned reg, u32 val)
++{
++	iowrite32_native(val, chan->user + reg);
++}
++
++/* register access */
++static inline u32 nv_rd32(struct drm_device *dev, unsigned reg)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	return ioread32_native(dev_priv->mmio + reg);
++}
++
++static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	iowrite32_native(val, dev_priv->mmio + reg);
++}
++
++static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	return ioread8(dev_priv->mmio + reg);
++}
++
++static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	iowrite8(val, dev_priv->mmio + reg);
++}
++
++#define nv_wait(reg, mask, val) \
++	nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
++
++/* PRAMIN access */
++static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	return ioread32_native(dev_priv->ramin + offset);
++}
++
++static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	iowrite32_native(val, dev_priv->ramin + offset);
++}
++
++/* object access */
++static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj,
++				unsigned index)
++{
++	return nv_ri32(dev, obj->im_pramin->start + index * 4);
++}
++
++static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
++				unsigned index, u32 val)
++{
++	nv_wi32(dev, obj->im_pramin->start + index * 4, val);
++}
++
++/*
++ * Logging
++ * Argument d is (struct drm_device *).
++ */
++#define NV_PRINTK(level, d, fmt, arg...) \
++	printk(level "[" DRM_NAME "] " DRIVER_NAME " %s: " fmt, \
++					pci_name(d->pdev), ##arg)
++#ifndef NV_DEBUG_NOTRACE
++#define NV_DEBUG(d, fmt, arg...) do {                                          \
++	if (drm_debug & DRM_UT_DRIVER) {                                       \
++		NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__,             \
++			  __LINE__, ##arg);                                    \
++	}                                                                      \
++} while (0)
++#define NV_DEBUG_KMS(d, fmt, arg...) do {                                      \
++	if (drm_debug & DRM_UT_KMS) {                                          \
++		NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__,             \
++			  __LINE__, ##arg);                                    \
++	}                                                                      \
++} while (0)
++#else
++#define NV_DEBUG(d, fmt, arg...) do {                                          \
++	if (drm_debug & DRM_UT_DRIVER)                                         \
++		NV_PRINTK(KERN_DEBUG, d, fmt, ##arg);                          \
++} while (0)
++#define NV_DEBUG_KMS(d, fmt, arg...) do {                                      \
++	if (drm_debug & DRM_UT_KMS)                                            \
++		NV_PRINTK(KERN_DEBUG, d, fmt, ##arg);                          \
++} while (0)
++#endif
++#define NV_ERROR(d, fmt, arg...) NV_PRINTK(KERN_ERR, d, fmt, ##arg)
++#define NV_INFO(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
++#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
++#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
++#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
++
++/* nouveau_reg_debug bitmask */
++enum {
++	NOUVEAU_REG_DEBUG_MC             = 0x1,
++	NOUVEAU_REG_DEBUG_VIDEO          = 0x2,
++	NOUVEAU_REG_DEBUG_FB             = 0x4,
++	NOUVEAU_REG_DEBUG_EXTDEV         = 0x8,
++	NOUVEAU_REG_DEBUG_CRTC           = 0x10,
++	NOUVEAU_REG_DEBUG_RAMDAC         = 0x20,
++	NOUVEAU_REG_DEBUG_VGACRTC        = 0x40,
++	NOUVEAU_REG_DEBUG_RMVIO          = 0x80,
++	NOUVEAU_REG_DEBUG_VGAATTR        = 0x100,
++	NOUVEAU_REG_DEBUG_EVO            = 0x200,
++};
++
++#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
++	if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_##type) \
++		NV_PRINTK(KERN_DEBUG, dev, "%s: " fmt, __func__, ##arg); \
++} while (0)
++
++static inline bool
++nv_two_heads(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	const int impl = dev->pci_device & 0x0ff0;
++
++	if (dev_priv->card_type >= NV_10 && impl != 0x0100 &&
++	    impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
++		return true;
++
++	return false;
++}
++
++static inline bool
++nv_gf4_disp_arch(struct drm_device *dev)
++{
++	return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
++}
++
++static inline bool
++nv_two_reg_pll(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	const int impl = dev->pci_device & 0x0ff0;
++
++	if (impl == 0x0310 || impl == 0x0340 || dev_priv->card_type >= NV_40)
++		return true;
++	return false;
++}
++
++#define NV_SW                                                        0x0000506e
++#define NV_SW_DMA_SEMAPHORE                                          0x00000060
++#define NV_SW_SEMAPHORE_OFFSET                                       0x00000064
++#define NV_SW_SEMAPHORE_ACQUIRE                                      0x00000068
++#define NV_SW_SEMAPHORE_RELEASE                                      0x0000006c
++#define NV_SW_DMA_VBLSEM                                             0x0000018c
++#define NV_SW_VBLSEM_OFFSET                                          0x00000400
++#define NV_SW_VBLSEM_RELEASE_VALUE                                   0x00000404
++#define NV_SW_VBLSEM_RELEASE                                         0x00000408
++
++#endif /* __NOUVEAU_DRV_H__ */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
+new file mode 100644
+index 0000000..bc4a240
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
+@@ -0,0 +1,91 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __NOUVEAU_ENCODER_H__
++#define __NOUVEAU_ENCODER_H__
++
++#include "drm_encoder_slave.h"
++#include "nouveau_drv.h"
++
++#define NV_DPMS_CLEARED 0x80
++
++struct nouveau_encoder {
++	struct drm_encoder_slave base;
++
++	struct dcb_entry *dcb;
++	int or;
++
++	struct drm_display_mode mode;
++	int last_dpms;
++
++	struct nv04_output_reg restore;
++
++	void (*disconnect)(struct nouveau_encoder *encoder);
++
++	union {
++		struct {
++			int dpcd_version;
++			int link_nr;
++			int link_bw;
++		} dp;
++	};
++};
++
++static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
++{
++	struct drm_encoder_slave *slave = to_encoder_slave(enc);
++
++	return container_of(slave, struct nouveau_encoder, base);
++}
++
++static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
++{
++	return &enc->base.base;
++}
++
++struct nouveau_connector *
++nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
++int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry);
++int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry);
++
++struct bit_displayport_encoder_table {
++	uint32_t match;
++	uint8_t  record_nr;
++	uint8_t  unknown;
++	uint16_t script0;
++	uint16_t script1;
++	uint16_t unknown_table;
++} __attribute__ ((packed));
++
++struct bit_displayport_encoder_table_entry {
++	uint8_t vs_level;
++	uint8_t pre_level;
++	uint8_t reg0;
++	uint8_t reg1;
++	uint8_t reg2;
++} __attribute__ ((packed));
++
++#endif /* __NOUVEAU_ENCODER_H__ */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
+new file mode 100644
+index 0000000..4a3f31a
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
+@@ -0,0 +1,47 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __NOUVEAU_FB_H__
++#define __NOUVEAU_FB_H__
++
++struct nouveau_framebuffer {
++	struct drm_framebuffer base;
++	struct nouveau_bo *nvbo;
++};
++
++static inline struct nouveau_framebuffer *
++nouveau_framebuffer(struct drm_framebuffer *fb)
++{
++	return container_of(fb, struct nouveau_framebuffer, base);
++}
++
++extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
++
++struct drm_framebuffer *
++nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
++			   struct drm_mode_fb_cmd *);
++
++#endif /* __NOUVEAU_FB_H__ */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+new file mode 100644
+index 0000000..ea879a2
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -0,0 +1,423 @@
++/*
++ * Copyright © 2007 David Airlie
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *     David Airlie
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/tty.h>
++#include <linux/slab.h>
++#include <linux/sysrq.h>
++#include <linux/delay.h>
++#include <linux/fb.h>
++#include <linux/init.h>
++#include <linux/screen_info.h>
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_crtc.h"
++#include "drm_crtc_helper.h"
++#include "drm_fb_helper.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++#include "nouveau_crtc.h"
++#include "nouveau_fb.h"
++#include "nouveau_fbcon.h"
++#include "nouveau_dma.h"
++
++static int
++nouveau_fbcon_sync(struct fb_info *info)
++{
++	struct nouveau_fbcon_par *par = info->par;
++	struct drm_device *dev = par->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = dev_priv->channel;
++	int ret, i;
++
++	if (!chan || !chan->accel_done ||
++	    info->state != FBINFO_STATE_RUNNING ||
++	    info->flags & FBINFO_HWACCEL_DISABLED)
++		return 0;
++
++	if (RING_SPACE(chan, 4)) {
++		nouveau_fbcon_gpu_lockup(info);
++		return 0;
++	}
++
++	BEGIN_RING(chan, 0, 0x0104, 1);
++	OUT_RING(chan, 0);
++	BEGIN_RING(chan, 0, 0x0100, 1);
++	OUT_RING(chan, 0);
++	nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
++	FIRE_RING(chan);
++
++	ret = -EBUSY;
++	for (i = 0; i < 100000; i++) {
++		if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
++			ret = 0;
++			break;
++		}
++		DRM_UDELAY(1);
++	}
++
++	if (ret) {
++		nouveau_fbcon_gpu_lockup(info);
++		return 0;
++	}
++
++	chan->accel_done = false;
++	return 0;
++}
++
++static struct fb_ops nouveau_fbcon_ops = {
++	.owner = THIS_MODULE,
++	.fb_check_var = drm_fb_helper_check_var,
++	.fb_set_par = drm_fb_helper_set_par,
++	.fb_setcolreg = drm_fb_helper_setcolreg,
++	.fb_fillrect = cfb_fillrect,
++	.fb_copyarea = cfb_copyarea,
++	.fb_imageblit = cfb_imageblit,
++	.fb_sync = nouveau_fbcon_sync,
++	.fb_pan_display = drm_fb_helper_pan_display,
++	.fb_blank = drm_fb_helper_blank,
++	.fb_setcmap = drm_fb_helper_setcmap,
++};
++
++static struct fb_ops nv04_fbcon_ops = {
++	.owner = THIS_MODULE,
++	.fb_check_var = drm_fb_helper_check_var,
++	.fb_set_par = drm_fb_helper_set_par,
++	.fb_setcolreg = drm_fb_helper_setcolreg,
++	.fb_fillrect = nv04_fbcon_fillrect,
++	.fb_copyarea = nv04_fbcon_copyarea,
++	.fb_imageblit = nv04_fbcon_imageblit,
++	.fb_sync = nouveau_fbcon_sync,
++	.fb_pan_display = drm_fb_helper_pan_display,
++	.fb_blank = drm_fb_helper_blank,
++	.fb_setcmap = drm_fb_helper_setcmap,
++};
++
++static struct fb_ops nv50_fbcon_ops = {
++	.owner = THIS_MODULE,
++	.fb_check_var = drm_fb_helper_check_var,
++	.fb_set_par = drm_fb_helper_set_par,
++	.fb_setcolreg = drm_fb_helper_setcolreg,
++	.fb_fillrect = nv50_fbcon_fillrect,
++	.fb_copyarea = nv50_fbcon_copyarea,
++	.fb_imageblit = nv50_fbcon_imageblit,
++	.fb_sync = nouveau_fbcon_sync,
++	.fb_pan_display = drm_fb_helper_pan_display,
++	.fb_blank = drm_fb_helper_blank,
++	.fb_setcmap = drm_fb_helper_setcmap,
++};
++
++static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
++				    u16 blue, int regno)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++
++	nv_crtc->lut.r[regno] = red;
++	nv_crtc->lut.g[regno] = green;
++	nv_crtc->lut.b[regno] = blue;
++}
++
++static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
++				    u16 *blue, int regno)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++
++	*red = nv_crtc->lut.r[regno];
++	*green = nv_crtc->lut.g[regno];
++	*blue = nv_crtc->lut.b[regno];
++}
++
++static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
++	.gamma_set = nouveau_fbcon_gamma_set,
++	.gamma_get = nouveau_fbcon_gamma_get
++};
++
++#if defined(__i386__) || defined(__x86_64__)
++static bool
++nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
++{
++	struct pci_dev *pdev = dev->pdev;
++	int ramin;
++
++	if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
++	    screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
++		return false;
++
++	if (screen_info.lfb_base < pci_resource_start(pdev, 1))
++		goto not_fb;
++
++	if (screen_info.lfb_base + screen_info.lfb_size >=
++	    pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
++		goto not_fb;
++
++	return true;
++not_fb:
++	ramin = 2;
++	if (pci_resource_len(pdev, ramin) == 0) {
++		ramin = 3;
++		if (pci_resource_len(pdev, ramin) == 0)
++			return false;
++	}
++
++	if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
++		return false;
++
++	if (screen_info.lfb_base + screen_info.lfb_size >=
++	    pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
++		return false;
++
++	return true;
++}
++#endif
++
++void
++nouveau_fbcon_zfill(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct fb_info *info = dev_priv->fbdev_info;
++	struct fb_fillrect rect;
++
++	/* Clear the entire fbcon.  The drm will program every connector
++	 * with it's preferred mode.  If the sizes differ, one display will
++	 * quite likely have garbage around the console.
++	 */
++	rect.dx = rect.dy = 0;
++	rect.width = info->var.xres_virtual;
++	rect.height = info->var.yres_virtual;
++	rect.color = 0;
++	rect.rop = ROP_COPY;
++	info->fbops->fb_fillrect(info, &rect);
++}
++
++static int
++nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
++		     uint32_t fb_height, uint32_t surface_width,
++		     uint32_t surface_height, uint32_t surface_depth,
++		     uint32_t surface_bpp, struct drm_framebuffer **pfb)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct fb_info *info;
++	struct nouveau_fbcon_par *par;
++	struct drm_framebuffer *fb;
++	struct nouveau_framebuffer *nouveau_fb;
++	struct nouveau_bo *nvbo;
++	struct drm_mode_fb_cmd mode_cmd;
++	struct device *device = &dev->pdev->dev;
++	int size, ret;
++
++	mode_cmd.width = surface_width;
++	mode_cmd.height = surface_height;
++
++	mode_cmd.bpp = surface_bpp;
++	mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
++	mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
++	mode_cmd.depth = surface_depth;
++
++	size = mode_cmd.pitch * mode_cmd.height;
++	size = roundup(size, PAGE_SIZE);
++
++	ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
++			      0, 0x0000, false, true, &nvbo);
++	if (ret) {
++		NV_ERROR(dev, "failed to allocate framebuffer\n");
++		goto out;
++	}
++
++	ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
++	if (ret) {
++		NV_ERROR(dev, "failed to pin fb: %d\n", ret);
++		nouveau_bo_ref(NULL, &nvbo);
++		goto out;
++	}
++
++	ret = nouveau_bo_map(nvbo);
++	if (ret) {
++		NV_ERROR(dev, "failed to map fb: %d\n", ret);
++		nouveau_bo_unpin(nvbo);
++		nouveau_bo_ref(NULL, &nvbo);
++		goto out;
++	}
++
++	mutex_lock(&dev->struct_mutex);
++
++	fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
++	if (!fb) {
++		ret = -ENOMEM;
++		NV_ERROR(dev, "failed to allocate fb.\n");
++		goto out_unref;
++	}
++
++	list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
++
++	nouveau_fb = nouveau_framebuffer(fb);
++	*pfb = fb;
++
++	info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
++	if (!info) {
++		ret = -ENOMEM;
++		goto out_unref;
++	}
++
++	par = info->par;
++	par->helper.funcs = &nouveau_fbcon_helper_funcs;
++	par->helper.dev = dev;
++	ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
++	if (ret)
++		goto out_unref;
++	dev_priv->fbdev_info = info;
++
++	strcpy(info->fix.id, "nouveaufb");
++	if (nouveau_nofbaccel)
++		info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
++	else
++		info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
++			      FBINFO_HWACCEL_FILLRECT |
++			      FBINFO_HWACCEL_IMAGEBLIT;
++	info->fbops = &nouveau_fbcon_ops;
++	info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
++			       dev_priv->vm_vram_base;
++	info->fix.smem_len = size;
++
++	info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
++	info->screen_size = size;
++
++	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
++	drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
++
++	/* FIXME: we really shouldn't expose mmio space at all */
++	info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
++	info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
++
++	/* Set aperture base/size for vesafb takeover */
++#if defined(__i386__) || defined(__x86_64__)
++	if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
++		/* Some NVIDIA VBIOS' are stupid and decide to put the
++		 * framebuffer in the middle of the PRAMIN BAR for
++		 * whatever reason.  We need to know the exact lfb_base
++		 * to get vesafb kicked off, and the only reliable way
++		 * we have left is to find out lfb_base the same way
++		 * vesafb did.
++		 */
++		info->aperture_base = screen_info.lfb_base;
++		info->aperture_size = screen_info.lfb_size;
++		if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
++			info->aperture_size *= 65536;
++	} else
++#endif
++	{
++		info->aperture_base = info->fix.mmio_start;
++		info->aperture_size = info->fix.mmio_len;
++	}
++
++	info->pixmap.size = 64*1024;
++	info->pixmap.buf_align = 8;
++	info->pixmap.access_align = 32;
++	info->pixmap.flags = FB_PIXMAP_SYSTEM;
++	info->pixmap.scan_align = 1;
++
++	fb->fbdev = info;
++
++	par->nouveau_fb = nouveau_fb;
++	par->dev = dev;
++
++	if (dev_priv->channel && !nouveau_nofbaccel) {
++		switch (dev_priv->card_type) {
++		case NV_50:
++			nv50_fbcon_accel_init(info);
++			info->fbops = &nv50_fbcon_ops;
++			break;
++		default:
++			nv04_fbcon_accel_init(info);
++			info->fbops = &nv04_fbcon_ops;
++			break;
++		};
++	}
++
++	nouveau_fbcon_zfill(dev);
++
++	/* To allow resizeing without swapping buffers */
++	NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
++						nouveau_fb->base.width,
++						nouveau_fb->base.height,
++						nvbo->bo.offset, nvbo);
++
++	mutex_unlock(&dev->struct_mutex);
++	return 0;
++
++out_unref:
++	mutex_unlock(&dev->struct_mutex);
++out:
++	return ret;
++}
++
++int
++nouveau_fbcon_probe(struct drm_device *dev)
++{
++	NV_DEBUG_KMS(dev, "\n");
++
++	return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
++}
++
++int
++nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
++{
++	struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
++	struct fb_info *info;
++
++	if (!fb)
++		return -EINVAL;
++
++	info = fb->fbdev;
++	if (info) {
++		struct nouveau_fbcon_par *par = info->par;
++
++		unregister_framebuffer(info);
++		nouveau_bo_unmap(nouveau_fb->nvbo);
++		mutex_lock(&dev->struct_mutex);
++		drm_gem_object_unreference(nouveau_fb->nvbo->gem);
++		nouveau_fb->nvbo = NULL;
++		mutex_unlock(&dev->struct_mutex);
++		if (par)
++			drm_fb_helper_free(&par->helper);
++		framebuffer_release(info);
++	}
++
++	return 0;
++}
++
++void nouveau_fbcon_gpu_lockup(struct fb_info *info)
++{
++	struct nouveau_fbcon_par *par = info->par;
++	struct drm_device *dev = par->dev;
++
++	NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
++	info->flags |= FBINFO_HWACCEL_DISABLED;
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+new file mode 100644
+index 0000000..f9c34e1
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+@@ -0,0 +1,54 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __NOUVEAU_FBCON_H__
++#define __NOUVEAU_FBCON_H__
++
++#include "drm_fb_helper.h"
++
++struct nouveau_fbcon_par {
++	struct drm_fb_helper helper;
++	struct drm_device *dev;
++	struct nouveau_framebuffer *nouveau_fb;
++};
++
++int nouveau_fbcon_probe(struct drm_device *dev);
++int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
++void nouveau_fbcon_restore(void);
++void nouveau_fbcon_zfill(struct drm_device *dev);
++
++void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
++void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
++void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
++int nv04_fbcon_accel_init(struct fb_info *info);
++void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
++void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
++void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
++int nv50_fbcon_accel_init(struct fb_info *info);
++
++void nouveau_fbcon_gpu_lockup(struct fb_info *info);
++#endif /* __NV50_FBCON_H__ */
++
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
+new file mode 100644
+index 0000000..faddf53
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
+@@ -0,0 +1,262 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++
++#define USE_REFCNT (dev_priv->card_type >= NV_10)
++
++struct nouveau_fence {
++	struct nouveau_channel *channel;
++	struct kref refcount;
++	struct list_head entry;
++
++	uint32_t sequence;
++	bool signalled;
++};
++
++static inline struct nouveau_fence *
++nouveau_fence(void *sync_obj)
++{
++	return (struct nouveau_fence *)sync_obj;
++}
++
++static void
++nouveau_fence_del(struct kref *ref)
++{
++	struct nouveau_fence *fence =
++		container_of(ref, struct nouveau_fence, refcount);
++
++	kfree(fence);
++}
++
++void
++nouveau_fence_update(struct nouveau_channel *chan)
++{
++	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
++	struct list_head *entry, *tmp;
++	struct nouveau_fence *fence;
++	uint32_t sequence;
++
++	if (USE_REFCNT)
++		sequence = nvchan_rd32(chan, 0x48);
++	else
++		sequence = chan->fence.last_sequence_irq;
++
++	if (chan->fence.sequence_ack == sequence)
++		return;
++	chan->fence.sequence_ack = sequence;
++
++	list_for_each_safe(entry, tmp, &chan->fence.pending) {
++		fence = list_entry(entry, struct nouveau_fence, entry);
++
++		sequence = fence->sequence;
++		fence->signalled = true;
++		list_del(&fence->entry);
++		kref_put(&fence->refcount, nouveau_fence_del);
++
++		if (sequence == chan->fence.sequence_ack)
++			break;
++	}
++}
++
++int
++nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
++		  bool emit)
++{
++	struct nouveau_fence *fence;
++	int ret = 0;
++
++	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
++	if (!fence)
++		return -ENOMEM;
++	kref_init(&fence->refcount);
++	fence->channel = chan;
++
++	if (emit)
++		ret = nouveau_fence_emit(fence);
++
++	if (ret)
++		nouveau_fence_unref((void *)&fence);
++	*pfence = fence;
++	return ret;
++}
++
++struct nouveau_channel *
++nouveau_fence_channel(struct nouveau_fence *fence)
++{
++	return fence ? fence->channel : NULL;
++}
++
++int
++nouveau_fence_emit(struct nouveau_fence *fence)
++{
++	struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
++	struct nouveau_channel *chan = fence->channel;
++	unsigned long flags;
++	int ret;
++
++	ret = RING_SPACE(chan, 2);
++	if (ret)
++		return ret;
++
++	if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
++		spin_lock_irqsave(&chan->fence.lock, flags);
++		nouveau_fence_update(chan);
++		spin_unlock_irqrestore(&chan->fence.lock, flags);
++
++		BUG_ON(chan->fence.sequence ==
++		       chan->fence.sequence_ack - 1);
++	}
++
++	fence->sequence = ++chan->fence.sequence;
++
++	kref_get(&fence->refcount);
++	spin_lock_irqsave(&chan->fence.lock, flags);
++	list_add_tail(&fence->entry, &chan->fence.pending);
++	spin_unlock_irqrestore(&chan->fence.lock, flags);
++
++	BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
++	OUT_RING(chan, fence->sequence);
++	FIRE_RING(chan);
++
++	return 0;
++}
++
++void
++nouveau_fence_unref(void **sync_obj)
++{
++	struct nouveau_fence *fence = nouveau_fence(*sync_obj);
++
++	if (fence)
++		kref_put(&fence->refcount, nouveau_fence_del);
++	*sync_obj = NULL;
++}
++
++void *
++nouveau_fence_ref(void *sync_obj)
++{
++	struct nouveau_fence *fence = nouveau_fence(sync_obj);
++
++	kref_get(&fence->refcount);
++	return sync_obj;
++}
++
++bool
++nouveau_fence_signalled(void *sync_obj, void *sync_arg)
++{
++	struct nouveau_fence *fence = nouveau_fence(sync_obj);
++	struct nouveau_channel *chan = fence->channel;
++	unsigned long flags;
++
++	if (fence->signalled)
++		return true;
++
++	spin_lock_irqsave(&chan->fence.lock, flags);
++	nouveau_fence_update(chan);
++	spin_unlock_irqrestore(&chan->fence.lock, flags);
++	return fence->signalled;
++}
++
++int
++nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
++{
++	unsigned long timeout = jiffies + (3 * DRM_HZ);
++	int ret = 0;
++
++	__set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
++
++	while (1) {
++		if (nouveau_fence_signalled(sync_obj, sync_arg))
++			break;
++
++		if (time_after_eq(jiffies, timeout)) {
++			ret = -EBUSY;
++			break;
++		}
++
++		if (lazy)
++			schedule_timeout(1);
++
++		if (intr && signal_pending(current)) {
++			ret = -ERESTARTSYS;
++			break;
++		}
++	}
++
++	__set_current_state(TASK_RUNNING);
++
++	return ret;
++}
++
++int
++nouveau_fence_flush(void *sync_obj, void *sync_arg)
++{
++	return 0;
++}
++
++void
++nouveau_fence_handler(struct drm_device *dev, int channel)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = NULL;
++
++	if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
++		chan = dev_priv->fifos[channel];
++
++	if (chan) {
++		spin_lock_irq(&chan->fence.lock);
++		nouveau_fence_update(chan);
++		spin_unlock_irq(&chan->fence.lock);
++	}
++}
++
++int
++nouveau_fence_init(struct nouveau_channel *chan)
++{
++	INIT_LIST_HEAD(&chan->fence.pending);
++	spin_lock_init(&chan->fence.lock);
++	return 0;
++}
++
++void
++nouveau_fence_fini(struct nouveau_channel *chan)
++{
++	struct list_head *entry, *tmp;
++	struct nouveau_fence *fence;
++
++	list_for_each_safe(entry, tmp, &chan->fence.pending) {
++		fence = list_entry(entry, struct nouveau_fence, entry);
++
++		fence->signalled = true;
++		list_del(&fence->entry);
++		kref_put(&fence->refcount, nouveau_fence_del);
++	}
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+new file mode 100644
+index 0000000..70cc308
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -0,0 +1,994 @@
++/*
++ * Copyright (C) 2008 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++#include "drmP.h"
++#include "drm.h"
++
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++#include "nouveau_dma.h"
++
++#define nouveau_gem_pushbuf_sync(chan) 0
++
++int
++nouveau_gem_object_new(struct drm_gem_object *gem)
++{
++	return 0;
++}
++
++void
++nouveau_gem_object_del(struct drm_gem_object *gem)
++{
++	struct nouveau_bo *nvbo = gem->driver_private;
++	struct ttm_buffer_object *bo = &nvbo->bo;
++
++	if (!nvbo)
++		return;
++	nvbo->gem = NULL;
++
++	if (unlikely(nvbo->cpu_filp))
++		ttm_bo_synccpu_write_release(bo);
++
++	if (unlikely(nvbo->pin_refcnt)) {
++		nvbo->pin_refcnt = 1;
++		nouveau_bo_unpin(nvbo);
++	}
++
++	ttm_bo_unref(&bo);
++}
++
++int
++nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
++		int size, int align, uint32_t flags, uint32_t tile_mode,
++		uint32_t tile_flags, bool no_vm, bool mappable,
++		struct nouveau_bo **pnvbo)
++{
++	struct nouveau_bo *nvbo;
++	int ret;
++
++	ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
++			     tile_flags, no_vm, mappable, pnvbo);
++	if (ret)
++		return ret;
++	nvbo = *pnvbo;
++
++	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
++	if (!nvbo->gem) {
++		nouveau_bo_ref(NULL, pnvbo);
++		return -ENOMEM;
++	}
++
++	nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
++	nvbo->gem->driver_private = nvbo;
++	return 0;
++}
++
++static int
++nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
++{
++	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
++
++	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
++		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
++	else
++		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
++
++	rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
++	rep->offset = nvbo->bo.offset;
++	rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
++	rep->tile_mode = nvbo->tile_mode;
++	rep->tile_flags = nvbo->tile_flags;
++	return 0;
++}
++
++static bool
++nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
++	switch (tile_flags) {
++	case 0x0000:
++	case 0x1800:
++	case 0x2800:
++	case 0x4800:
++	case 0x7000:
++	case 0x7400:
++	case 0x7a00:
++	case 0xe000:
++		break;
++	default:
++		NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
++		return false;
++	}
++
++	return true;
++}
++
++int
++nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
++		      struct drm_file *file_priv)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_nouveau_gem_new *req = data;
++	struct nouveau_bo *nvbo = NULL;
++	struct nouveau_channel *chan = NULL;
++	uint32_t flags = 0;
++	int ret = 0;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
++		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
++
++	if (req->channel_hint) {
++		NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
++						     file_priv, chan);
++	}
++
++	if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
++		flags |= TTM_PL_FLAG_VRAM;
++	if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
++		flags |= TTM_PL_FLAG_TT;
++	if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
++		flags |= TTM_PL_FLAG_SYSTEM;
++
++	if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
++		return -EINVAL;
++
++	ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
++			      req->info.tile_mode, req->info.tile_flags, false,
++			      (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
++			      &nvbo);
++	if (ret)
++		return ret;
++
++	ret = nouveau_gem_info(nvbo->gem, &req->info);
++	if (ret)
++		goto out;
++
++	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
++out:
++	mutex_lock(&dev->struct_mutex);
++	drm_gem_object_handle_unreference(nvbo->gem);
++	mutex_unlock(&dev->struct_mutex);
++
++	if (ret)
++		drm_gem_object_unreference(nvbo->gem);
++	return ret;
++}
++
++static int
++nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
++		       uint32_t write_domains, uint32_t valid_domains)
++{
++	struct nouveau_bo *nvbo = gem->driver_private;
++	struct ttm_buffer_object *bo = &nvbo->bo;
++	uint64_t flags;
++
++	if (!valid_domains || (!read_domains && !write_domains))
++		return -EINVAL;
++
++	if (write_domains) {
++		if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
++		    (write_domains & NOUVEAU_GEM_DOMAIN_VRAM))
++			flags = TTM_PL_FLAG_VRAM;
++		else
++		if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
++		    (write_domains & NOUVEAU_GEM_DOMAIN_GART))
++			flags = TTM_PL_FLAG_TT;
++		else
++			return -EINVAL;
++	} else {
++		if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
++		    (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
++		    bo->mem.mem_type == TTM_PL_VRAM)
++			flags = TTM_PL_FLAG_VRAM;
++		else
++		if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
++		    (read_domains & NOUVEAU_GEM_DOMAIN_GART) &&
++		    bo->mem.mem_type == TTM_PL_TT)
++			flags = TTM_PL_FLAG_TT;
++		else
++		if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
++		    (read_domains & NOUVEAU_GEM_DOMAIN_VRAM))
++			flags = TTM_PL_FLAG_VRAM;
++		else
++			flags = TTM_PL_FLAG_TT;
++	}
++
++	nouveau_bo_placement_set(nvbo, flags);
++	return 0;
++}
++
++struct validate_op {
++	struct list_head vram_list;
++	struct list_head gart_list;
++	struct list_head both_list;
++};
++
++static void
++validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
++{
++	struct list_head *entry, *tmp;
++	struct nouveau_bo *nvbo;
++
++	list_for_each_safe(entry, tmp, list) {
++		nvbo = list_entry(entry, struct nouveau_bo, entry);
++		if (likely(fence)) {
++			struct nouveau_fence *prev_fence;
++
++			spin_lock(&nvbo->bo.lock);
++			prev_fence = nvbo->bo.sync_obj;
++			nvbo->bo.sync_obj = nouveau_fence_ref(fence);
++			spin_unlock(&nvbo->bo.lock);
++			nouveau_fence_unref((void *)&prev_fence);
++		}
++
++		list_del(&nvbo->entry);
++		nvbo->reserved_by = NULL;
++		ttm_bo_unreserve(&nvbo->bo);
++		drm_gem_object_unreference(nvbo->gem);
++	}
++}
++
++static void
++validate_fini(struct validate_op *op, struct nouveau_fence* fence)
++{
++	validate_fini_list(&op->vram_list, fence);
++	validate_fini_list(&op->gart_list, fence);
++	validate_fini_list(&op->both_list, fence);
++}
++
++static int
++validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
++	      struct drm_nouveau_gem_pushbuf_bo *pbbo,
++	      int nr_buffers, struct validate_op *op)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t sequence;
++	int trycnt = 0;
++	int ret, i;
++
++	sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
++retry:
++	if (++trycnt > 100000) {
++		NV_ERROR(dev, "%s failed and gave up.\n", __func__);
++		return -EINVAL;
++	}
++
++	for (i = 0; i < nr_buffers; i++) {
++		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
++		struct drm_gem_object *gem;
++		struct nouveau_bo *nvbo;
++
++		gem = drm_gem_object_lookup(dev, file_priv, b->handle);
++		if (!gem) {
++			NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
++			validate_fini(op, NULL);
++			return -EINVAL;
++		}
++		nvbo = gem->driver_private;
++
++		if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
++			NV_ERROR(dev, "multiple instances of buffer %d on "
++				      "validation list\n", b->handle);
++			validate_fini(op, NULL);
++			return -EINVAL;
++		}
++
++		ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
++		if (ret) {
++			validate_fini(op, NULL);
++			if (ret == -EAGAIN)
++				ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
++			drm_gem_object_unreference(gem);
++			if (ret)
++				return ret;
++			goto retry;
++		}
++
++		nvbo->reserved_by = file_priv;
++		nvbo->pbbo_index = i;
++		if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
++		    (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
++			list_add_tail(&nvbo->entry, &op->both_list);
++		else
++		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
++			list_add_tail(&nvbo->entry, &op->vram_list);
++		else
++		if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
++			list_add_tail(&nvbo->entry, &op->gart_list);
++		else {
++			NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
++				 b->valid_domains);
++			list_add_tail(&nvbo->entry, &op->both_list);
++			validate_fini(op, NULL);
++			return -EINVAL;
++		}
++
++		if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
++			validate_fini(op, NULL);
++
++			if (nvbo->cpu_filp == file_priv) {
++				NV_ERROR(dev, "bo %p mapped by process trying "
++					      "to validate it!\n", nvbo);
++				return -EINVAL;
++			}
++
++			ret = ttm_bo_wait_cpu(&nvbo->bo, false);
++			if (ret)
++				return ret;
++			goto retry;
++		}
++	}
++
++	return 0;
++}
++
++static int
++validate_list(struct nouveau_channel *chan, struct list_head *list,
++	      struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
++{
++	struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
++				(void __force __user *)(uintptr_t)user_pbbo_ptr;
++	struct nouveau_bo *nvbo;
++	int ret, relocs = 0;
++
++	list_for_each_entry(nvbo, list, entry) {
++		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
++		struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
++
++		if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
++			spin_lock(&nvbo->bo.lock);
++			ret = ttm_bo_wait(&nvbo->bo, false, false, false);
++			spin_unlock(&nvbo->bo.lock);
++			if (unlikely(ret))
++				return ret;
++		}
++
++		ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
++					     b->write_domains,
++					     b->valid_domains);
++		if (unlikely(ret))
++			return ret;
++
++		nvbo->channel = chan;
++		ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
++				      false, false);
++		nvbo->channel = NULL;
++		if (unlikely(ret))
++			return ret;
++
++		if (nvbo->bo.offset == b->presumed_offset &&
++		    ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
++		      b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
++		     (nvbo->bo.mem.mem_type == TTM_PL_TT &&
++		      b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART)))
++			continue;
++
++		if (nvbo->bo.mem.mem_type == TTM_PL_TT)
++			b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART;
++		else
++			b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM;
++		b->presumed_offset = nvbo->bo.offset;
++		b->presumed_ok = 0;
++		relocs++;
++
++		if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b)))
++			return -EFAULT;
++	}
++
++	return relocs;
++}
++
++static int
++nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
++			     struct drm_file *file_priv,
++			     struct drm_nouveau_gem_pushbuf_bo *pbbo,
++			     uint64_t user_buffers, int nr_buffers,
++			     struct validate_op *op, int *apply_relocs)
++{
++	int ret, relocs = 0;
++
++	INIT_LIST_HEAD(&op->vram_list);
++	INIT_LIST_HEAD(&op->gart_list);
++	INIT_LIST_HEAD(&op->both_list);
++
++	if (nr_buffers == 0)
++		return 0;
++
++	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
++	if (unlikely(ret))
++		return ret;
++
++	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
++	if (unlikely(ret < 0)) {
++		validate_fini(op, NULL);
++		return ret;
++	}
++	relocs += ret;
++
++	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
++	if (unlikely(ret < 0)) {
++		validate_fini(op, NULL);
++		return ret;
++	}
++	relocs += ret;
++
++	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
++	if (unlikely(ret < 0)) {
++		validate_fini(op, NULL);
++		return ret;
++	}
++	relocs += ret;
++
++	*apply_relocs = relocs;
++	return 0;
++}
++
++static inline void *
++u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
++{
++	void *mem;
++	void __user *userptr = (void __force __user *)(uintptr_t)user;
++
++	mem = kmalloc(nmemb * size, GFP_KERNEL);
++	if (!mem)
++		return ERR_PTR(-ENOMEM);
++
++	if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
++		kfree(mem);
++		return ERR_PTR(-EFAULT);
++	}
++
++	return mem;
++}
++
++static int
++nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
++				struct drm_nouveau_gem_pushbuf_bo *bo,
++				unsigned nr_relocs, uint64_t ptr_relocs,
++				unsigned nr_dwords, unsigned first_dword,
++				uint32_t *pushbuf, bool is_iomem)
++{
++	struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
++	struct drm_device *dev = chan->dev;
++	int ret = 0;
++	unsigned i;
++
++	reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
++	if (IS_ERR(reloc))
++		return PTR_ERR(reloc);
++
++	for (i = 0; i < nr_relocs; i++) {
++		struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
++		struct drm_nouveau_gem_pushbuf_bo *b;
++		uint32_t data;
++
++		if (r->bo_index >= nr_bo || r->reloc_index < first_dword ||
++		    r->reloc_index >= first_dword + nr_dwords) {
++			NV_ERROR(dev, "Bad relocation %d\n", i);
++			NV_ERROR(dev, "  bo: %d max %d\n", r->bo_index, nr_bo);
++			NV_ERROR(dev, "  id: %d max %d\n", r->reloc_index, nr_dwords);
++			ret = -EINVAL;
++			break;
++		}
++
++		b = &bo[r->bo_index];
++		if (b->presumed_ok)
++			continue;
++
++		if (r->flags & NOUVEAU_GEM_RELOC_LOW)
++			data = b->presumed_offset + r->data;
++		else
++		if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
++			data = (b->presumed_offset + r->data) >> 32;
++		else
++			data = r->data;
++
++		if (r->flags & NOUVEAU_GEM_RELOC_OR) {
++			if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART)
++				data |= r->tor;
++			else
++				data |= r->vor;
++		}
++
++		if (is_iomem)
++			iowrite32_native(data, (void __force __iomem *)
++						&pushbuf[r->reloc_index]);
++		else
++			pushbuf[r->reloc_index] = data;
++	}
++
++	kfree(reloc);
++	return ret;
++}
++
++int
++nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
++			  struct drm_file *file_priv)
++{
++	struct drm_nouveau_gem_pushbuf *req = data;
++	struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
++	struct nouveau_channel *chan;
++	struct validate_op op;
++	struct nouveau_fence* fence = 0;
++	uint32_t *pushbuf = NULL;
++	int ret = 0, do_reloc = 0, i;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
++
++	if (req->nr_dwords >= chan->dma.max ||
++	    req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
++	    req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
++		NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
++		NV_ERROR(dev, "  dwords : %d max %d\n", req->nr_dwords,
++			 chan->dma.max - 1);
++		NV_ERROR(dev, "  buffers: %d max %d\n", req->nr_buffers,
++			 NOUVEAU_GEM_MAX_BUFFERS);
++		NV_ERROR(dev, "  relocs : %d max %d\n", req->nr_relocs,
++			 NOUVEAU_GEM_MAX_RELOCS);
++		return -EINVAL;
++	}
++
++	pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
++	if (IS_ERR(pushbuf))
++		return PTR_ERR(pushbuf);
++
++	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
++	if (IS_ERR(bo)) {
++		kfree(pushbuf);
++		return PTR_ERR(bo);
++	}
++
++	mutex_lock(&dev->struct_mutex);
++
++	/* Validate buffer list */
++	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
++					   req->nr_buffers, &op, &do_reloc);
++	if (ret)
++		goto out;
++
++	/* Apply any relocations that are required */
++	if (do_reloc) {
++		ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
++						      bo, req->nr_relocs,
++						      req->relocs,
++						      req->nr_dwords, 0,
++						      pushbuf, false);
++		if (ret)
++			goto out;
++	}
++
++	/* Emit push buffer to the hw
++	 */
++	ret = RING_SPACE(chan, req->nr_dwords);
++	if (ret)
++		goto out;
++
++	OUT_RINGp(chan, pushbuf, req->nr_dwords);
++
++	ret = nouveau_fence_new(chan, &fence, true);
++	if (ret) {
++		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
++		WIND_RING(chan);
++		goto out;
++	}
++
++	if (nouveau_gem_pushbuf_sync(chan)) {
++		ret = nouveau_fence_wait(fence, NULL, false, false);
++		if (ret) {
++			for (i = 0; i < req->nr_dwords; i++)
++				NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
++			NV_ERROR(dev, "^^ above push buffer is fail :(\n");
++		}
++	}
++
++out:
++	validate_fini(&op, fence);
++	nouveau_fence_unref((void**)&fence);
++	mutex_unlock(&dev->struct_mutex);
++	kfree(pushbuf);
++	kfree(bo);
++	return ret;
++}
++
++#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
++
++int
++nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
++			       struct drm_file *file_priv)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_nouveau_gem_pushbuf_call *req = data;
++	struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
++	struct nouveau_channel *chan;
++	struct drm_gem_object *gem;
++	struct nouveau_bo *pbbo;
++	struct validate_op op;
++	struct nouveau_fence* fence = 0;
++	int i, ret = 0, do_reloc = 0;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
++
++	if (unlikely(req->handle == 0))
++		goto out_next;
++
++	if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
++	    req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
++		NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
++		NV_ERROR(dev, "  buffers: %d max %d\n", req->nr_buffers,
++			 NOUVEAU_GEM_MAX_BUFFERS);
++		NV_ERROR(dev, "  relocs : %d max %d\n", req->nr_relocs,
++			 NOUVEAU_GEM_MAX_RELOCS);
++		return -EINVAL;
++	}
++
++	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
++	if (IS_ERR(bo))
++		return PTR_ERR(bo);
++
++	mutex_lock(&dev->struct_mutex);
++
++	/* Validate buffer list */
++	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
++					   req->nr_buffers, &op, &do_reloc);
++	if (ret) {
++		NV_ERROR(dev, "validate: %d\n", ret);
++		goto out;
++	}
++
++	/* Validate DMA push buffer */
++	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
++	if (!gem) {
++		NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
++		ret = -EINVAL;
++		goto out;
++	}
++	pbbo = nouveau_gem_object(gem);
++
++	if ((req->offset & 3) || req->nr_dwords < 2 ||
++	    (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
++	    (unsigned long)req->nr_dwords >
++	     ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
++		NV_ERROR(dev, "pb call misaligned or out of bounds: "
++			      "%d + %d * 4 > %ld\n",
++			 req->offset, req->nr_dwords, pbbo->bo.mem.size);
++		ret = -EINVAL;
++		drm_gem_object_unreference(gem);
++		goto out;
++	}
++
++	ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
++			     chan->fence.sequence);
++	if (ret) {
++		NV_ERROR(dev, "resv pb: %d\n", ret);
++		drm_gem_object_unreference(gem);
++		goto out;
++	}
++
++	nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
++	ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
++	if (ret) {
++		NV_ERROR(dev, "validate pb: %d\n", ret);
++		ttm_bo_unreserve(&pbbo->bo);
++		drm_gem_object_unreference(gem);
++		goto out;
++	}
++
++	list_add_tail(&pbbo->entry, &op.both_list);
++
++	/* If presumed return address doesn't match, we need to map the
++	 * push buffer and fix it..
++	 */
++	if (!PUSHBUF_CAL) {
++		uint32_t retaddy;
++
++		if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
++			ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
++			if (ret) {
++				NV_ERROR(dev, "jmp_space: %d\n", ret);
++				goto out;
++			}
++		}
++
++		retaddy  = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
++		retaddy |= 0x20000000;
++		if (retaddy != req->suffix0) {
++			req->suffix0 = retaddy;
++			do_reloc = 1;
++		}
++	}
++
++	/* Apply any relocations that are required */
++	if (do_reloc) {
++		void *pbvirt;
++		bool is_iomem;
++		ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
++				  &pbbo->kmap);
++		if (ret) {
++			NV_ERROR(dev, "kmap pb: %d\n", ret);
++			goto out;
++		}
++
++		pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem);
++		ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo,
++						      req->nr_relocs,
++						      req->relocs,
++						      req->nr_dwords,
++						      req->offset / 4,
++						      pbvirt, is_iomem);
++
++		if (!PUSHBUF_CAL) {
++			nouveau_bo_wr32(pbbo,
++					req->offset / 4 + req->nr_dwords - 2,
++					req->suffix0);
++		}
++
++		ttm_bo_kunmap(&pbbo->kmap);
++		if (ret) {
++			NV_ERROR(dev, "reloc apply: %d\n", ret);
++			goto out;
++		}
++	}
++
++	if (PUSHBUF_CAL) {
++		ret = RING_SPACE(chan, 2);
++		if (ret) {
++			NV_ERROR(dev, "cal_space: %d\n", ret);
++			goto out;
++		}
++		OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
++				  req->offset) | 2);
++		OUT_RING(chan, 0);
++	} else {
++		ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS);
++		if (ret) {
++			NV_ERROR(dev, "jmp_space: %d\n", ret);
++			goto out;
++		}
++		OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
++				  req->offset) | 0x20000000);
++		OUT_RING(chan, 0);
++
++		/* Space the jumps apart with NOPs. */
++		for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
++			OUT_RING(chan, 0);
++	}
++
++	ret = nouveau_fence_new(chan, &fence, true);
++	if (ret) {
++		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
++		WIND_RING(chan);
++		goto out;
++	}
++
++out:
++	validate_fini(&op, fence);
++	nouveau_fence_unref((void**)&fence);
++	mutex_unlock(&dev->struct_mutex);
++	kfree(bo);
++
++out_next:
++	if (PUSHBUF_CAL) {
++		req->suffix0 = 0x00020000;
++		req->suffix1 = 0x00000000;
++	} else {
++		req->suffix0 = 0x20000000 |
++			      (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
++		req->suffix1 = 0x00000000;
++	}
++
++	return ret;
++}
++
++int
++nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
++				struct drm_file *file_priv)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_nouveau_gem_pushbuf_call *req = data;
++
++	req->vram_available = dev_priv->fb_aper_free;
++	req->gart_available = dev_priv->gart_info.aper_free;
++
++	return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
++}
++
++static inline uint32_t
++domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
++{
++	uint32_t flags = 0;
++
++	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
++		flags |= TTM_PL_FLAG_VRAM;
++	if (domain & NOUVEAU_GEM_DOMAIN_GART)
++		flags |= TTM_PL_FLAG_TT;
++
++	return flags;
++}
++
++int
++nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
++		      struct drm_file *file_priv)
++{
++	struct drm_nouveau_gem_pin *req = data;
++	struct drm_gem_object *gem;
++	struct nouveau_bo *nvbo;
++	int ret = 0;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
++		return -EINVAL;
++	}
++
++	if (!DRM_SUSER(DRM_CURPROC))
++		return -EPERM;
++
++	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
++	if (!gem)
++		return -EINVAL;
++	nvbo = nouveau_gem_object(gem);
++
++	ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
++	if (ret)
++		goto out;
++
++	req->offset = nvbo->bo.offset;
++	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
++		req->domain = NOUVEAU_GEM_DOMAIN_GART;
++	else
++		req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
++
++out:
++	mutex_lock(&dev->struct_mutex);
++	drm_gem_object_unreference(gem);
++	mutex_unlock(&dev->struct_mutex);
++
++	return ret;
++}
++
++int
++nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
++			struct drm_file *file_priv)
++{
++	struct drm_nouveau_gem_pin *req = data;
++	struct drm_gem_object *gem;
++	int ret;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++	if (drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
++	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
++	if (!gem)
++		return -EINVAL;
++
++	ret = nouveau_bo_unpin(nouveau_gem_object(gem));
++
++	mutex_lock(&dev->struct_mutex);
++	drm_gem_object_unreference(gem);
++	mutex_unlock(&dev->struct_mutex);
++
++	return ret;
++}
++
++int
++nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
++			   struct drm_file *file_priv)
++{
++	struct drm_nouveau_gem_cpu_prep *req = data;
++	struct drm_gem_object *gem;
++	struct nouveau_bo *nvbo;
++	bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
++	int ret = -EINVAL;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
++	if (!gem)
++		return ret;
++	nvbo = nouveau_gem_object(gem);
++
++	if (nvbo->cpu_filp) {
++		if (nvbo->cpu_filp == file_priv)
++			goto out;
++
++		ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
++		if (ret)
++			goto out;
++	}
++
++	if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
++		spin_lock(&nvbo->bo.lock);
++		ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
++		spin_unlock(&nvbo->bo.lock);
++	} else {
++		ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
++		if (ret == 0)
++			nvbo->cpu_filp = file_priv;
++	}
++
++out:
++	mutex_lock(&dev->struct_mutex);
++	drm_gem_object_unreference(gem);
++	mutex_unlock(&dev->struct_mutex);
++	return ret;
++}
++
++int
++nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
++			   struct drm_file *file_priv)
++{
++	struct drm_nouveau_gem_cpu_prep *req = data;
++	struct drm_gem_object *gem;
++	struct nouveau_bo *nvbo;
++	int ret = -EINVAL;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
++	if (!gem)
++		return ret;
++	nvbo = nouveau_gem_object(gem);
++
++	if (nvbo->cpu_filp != file_priv)
++		goto out;
++	nvbo->cpu_filp = NULL;
++
++	ttm_bo_synccpu_write_release(&nvbo->bo);
++	ret = 0;
++
++out:
++	mutex_lock(&dev->struct_mutex);
++	drm_gem_object_unreference(gem);
++	mutex_unlock(&dev->struct_mutex);
++	return ret;
++}
++
++int
++nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
++		       struct drm_file *file_priv)
++{
++	struct drm_nouveau_gem_info *req = data;
++	struct drm_gem_object *gem;
++	int ret;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
++	if (!gem)
++		return -EINVAL;
++
++	ret = nouveau_gem_info(gem, req);
++	mutex_lock(&dev->struct_mutex);
++	drm_gem_object_unreference(gem);
++	mutex_unlock(&dev->struct_mutex);
++	return ret;
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
+new file mode 100644
+index 0000000..c7ebec6
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
+@@ -0,0 +1,161 @@
++/*
++ * Copyright 2009 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++#include <linux/firmware.h>
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++
++struct nouveau_ctxprog {
++	uint32_t signature;
++	uint8_t  version;
++	uint16_t length;
++	uint32_t data[];
++} __attribute__ ((packed));
++
++struct nouveau_ctxvals {
++	uint32_t signature;
++	uint8_t  version;
++	uint32_t length;
++	struct {
++		uint32_t offset;
++		uint32_t value;
++	} data[];
++} __attribute__ ((packed));
++
++int
++nouveau_grctx_prog_load(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	const int chipset = dev_priv->chipset;
++	const struct firmware *fw;
++	const struct nouveau_ctxprog *cp;
++	const struct nouveau_ctxvals *cv;
++	char name[32];
++	int ret, i;
++
++	if (pgraph->accel_blocked)
++		return -ENODEV;
++
++	if (!pgraph->ctxprog) {
++		sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
++		ret = request_firmware(&fw, name, &dev->pdev->dev);
++		if (ret) {
++			NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
++			return ret;
++		}
++
++		pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
++		if (!pgraph->ctxprog) {
++			NV_ERROR(dev, "OOM copying ctxprog\n");
++			release_firmware(fw);
++			return -ENOMEM;
++		}
++		memcpy(pgraph->ctxprog, fw->data, fw->size);
++
++		cp = pgraph->ctxprog;
++		if (le32_to_cpu(cp->signature) != 0x5043564e ||
++		    cp->version != 0 ||
++		    le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
++			NV_ERROR(dev, "ctxprog invalid\n");
++			release_firmware(fw);
++			nouveau_grctx_fini(dev);
++			return -EINVAL;
++		}
++		release_firmware(fw);
++	}
++
++	if (!pgraph->ctxvals) {
++		sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
++		ret = request_firmware(&fw, name, &dev->pdev->dev);
++		if (ret) {
++			NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
++			nouveau_grctx_fini(dev);
++			return ret;
++		}
++
++		pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
++		if (!pgraph->ctxvals) {
++			NV_ERROR(dev, "OOM copying ctxvals\n");
++			release_firmware(fw);
++			nouveau_grctx_fini(dev);
++			return -ENOMEM;
++		}
++		memcpy(pgraph->ctxvals, fw->data, fw->size);
++
++		cv = (void *)pgraph->ctxvals;
++		if (le32_to_cpu(cv->signature) != 0x5643564e ||
++		    cv->version != 0 ||
++		    le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
++			NV_ERROR(dev, "ctxvals invalid\n");
++			release_firmware(fw);
++			nouveau_grctx_fini(dev);
++			return -EINVAL;
++		}
++		release_firmware(fw);
++	}
++
++	cp = pgraph->ctxprog;
++
++	nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
++	for (i = 0; i < le16_to_cpu(cp->length); i++)
++		nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
++			le32_to_cpu(cp->data[i]));
++
++	return 0;
++}
++
++void
++nouveau_grctx_fini(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++
++	if (pgraph->ctxprog) {
++		kfree(pgraph->ctxprog);
++		pgraph->ctxprog = NULL;
++	}
++
++	if (pgraph->ctxvals) {
++		kfree(pgraph->ctxprog);
++		pgraph->ctxvals = NULL;
++	}
++}
++
++void
++nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	struct nouveau_ctxvals *cv = pgraph->ctxvals;
++	int i;
++
++	if (!cv)
++		return;
++
++	for (i = 0; i < le32_to_cpu(cv->length); i++)
++		nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
++			le32_to_cpu(cv->data[i].value));
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h
+new file mode 100644
+index 0000000..5d39c4c
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h
+@@ -0,0 +1,133 @@
++#ifndef __NOUVEAU_GRCTX_H__
++#define __NOUVEAU_GRCTX_H__
++
++struct nouveau_grctx {
++	struct drm_device *dev;
++
++	enum {
++		NOUVEAU_GRCTX_PROG,
++		NOUVEAU_GRCTX_VALS
++	} mode;
++	void *data;
++
++	uint32_t ctxprog_max;
++	uint32_t ctxprog_len;
++	uint32_t ctxprog_reg;
++	int      ctxprog_label[32];
++	uint32_t ctxvals_pos;
++	uint32_t ctxvals_base;
++};
++
++#ifdef CP_CTX
++static inline void
++cp_out(struct nouveau_grctx *ctx, uint32_t inst)
++{
++	uint32_t *ctxprog = ctx->data;
++
++	if (ctx->mode != NOUVEAU_GRCTX_PROG)
++		return;
++
++	BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max);
++	ctxprog[ctx->ctxprog_len++] = inst;
++}
++
++static inline void
++cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
++{
++	cp_out(ctx, CP_LOAD_SR | val);
++}
++
++static inline void
++cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
++{
++	ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
++
++	ctx->ctxvals_base = ctx->ctxvals_pos;
++	ctx->ctxvals_pos = ctx->ctxvals_base + length;
++
++	if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) {
++		cp_lsr(ctx, length);
++		length = 0;
++	}
++
++	cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg);
++}
++
++static inline void
++cp_name(struct nouveau_grctx *ctx, int name)
++{
++	uint32_t *ctxprog = ctx->data;
++	int i;
++
++	if (ctx->mode != NOUVEAU_GRCTX_PROG)
++		return;
++
++	ctx->ctxprog_label[name] = ctx->ctxprog_len;
++	for (i = 0; i < ctx->ctxprog_len; i++) {
++		if ((ctxprog[i] & 0xfff00000) != 0xff400000)
++			continue;
++		if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT))
++			continue;
++		ctxprog[i] = (ctxprog[i] & 0x00ff00ff) |
++			     (ctx->ctxprog_len << CP_BRA_IP_SHIFT);
++	}
++}
++
++static inline void
++_cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
++{
++	int ip = 0;
++
++	if (mod != 2) {
++		ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT;
++		if (ip == 0)
++			ip = 0xff000000 | (name << CP_BRA_IP_SHIFT);
++	}
++
++	cp_out(ctx, CP_BRA | (mod << 18) | ip | flag |
++		    (state ? 0 : CP_BRA_IF_CLEAR));
++}
++#define cp_bra(c,f,s,n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
++#ifdef CP_BRA_MOD
++#define cp_cal(c,f,s,n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
++#define cp_ret(c,f,s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
++#endif
++
++static inline void
++_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
++{
++	cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
++}
++#define cp_wait(c,f,s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
++
++static inline void
++_cp_set(struct nouveau_grctx *ctx, int flag, int state)
++{
++	cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
++}
++#define cp_set(c,f,s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
++
++static inline void
++cp_pos(struct nouveau_grctx *ctx, int offset)
++{
++	ctx->ctxvals_pos = offset;
++	ctx->ctxvals_base = ctx->ctxvals_pos;
++
++	cp_lsr(ctx, ctx->ctxvals_pos);
++	cp_out(ctx, CP_SET_CONTEXT_POINTER);
++}
++
++static inline void
++gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
++{
++	if (ctx->mode != NOUVEAU_GRCTX_VALS)
++		return;
++
++	reg = (reg - 0x00400000) / 4;
++	reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
++
++	nv_wo32(ctx->dev, ctx->data, reg, val);
++}
++#endif
++
++#endif
+diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
+new file mode 100644
+index 0000000..dc46792
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
+@@ -0,0 +1,1080 @@
++/*
++ * Copyright 2006 Dave Airlie
++ * Copyright 2007 Maarten Maathuis
++ * Copyright 2007-2009 Stuart Bennett
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
++ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_hw.h"
++
++#define CHIPSET_NFORCE 0x01a0
++#define CHIPSET_NFORCE2 0x01f0
++
++/*
++ * misc hw access wrappers/control functions
++ */
++
++void
++NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value)
++{
++	NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
++	NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value);
++}
++
++uint8_t
++NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index)
++{
++	NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
++	return NVReadPRMVIO(dev, head, NV_PRMVIO_SR);
++}
++
++void
++NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value)
++{
++	NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
++	NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value);
++}
++
++uint8_t
++NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
++{
++	NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
++	return NVReadPRMVIO(dev, head, NV_PRMVIO_GX);
++}
++
++/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
++ * it affects only the 8 bit vga io regs, which we access using mmio at
++ * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
++ * in general, the set value of cr44 does not matter: reg access works as
++ * expected and values can be set for the appropriate head by using a 0x2000
++ * offset as required
++ * however:
++ * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
++ *    cr44 must be set to 0 or 3 for accessing values on the correct head
++ *    through the common 0xc03c* addresses
++ * b) in tied mode (4) head B is programmed to the values set on head A, and
++ *    access using the head B addresses can have strange results, ergo we leave
++ *    tied mode in init once we know to what cr44 should be restored on exit
++ *
++ * the owner parameter is slightly abused:
++ * 0 and 1 are treated as head values and so the set value is (owner * 3)
++ * other values are treated as literal values to set
++ */
++void
++NVSetOwner(struct drm_device *dev, int owner)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (owner == 1)
++		owner *= 3;
++
++	if (dev_priv->chipset == 0x11) {
++		/* This might seem stupid, but the blob does it and
++		 * omitting it often locks the system up.
++		 */
++		NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
++		NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX);
++	}
++
++	/* CR44 is always changed on CRTC0 */
++	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
++
++	if (dev_priv->chipset == 0x11) {	/* set me harder */
++		NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
++		NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
++	}
++}
++
++void
++NVBlankScreen(struct drm_device *dev, int head, bool blank)
++{
++	unsigned char seq1;
++
++	if (nv_two_heads(dev))
++		NVSetOwner(dev, head);
++
++	seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
++
++	NVVgaSeqReset(dev, head, true);
++	if (blank)
++		NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
++	else
++		NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);
++	NVVgaSeqReset(dev, head, false);
++}
++
++/*
++ * PLL setting
++ */
++
++static int
++powerctrl_1_shift(int chip_version, int reg)
++{
++	int shift = -4;
++
++	if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
++		return shift;
++
++	switch (reg) {
++	case NV_RAMDAC_VPLL2:
++		shift += 4;
++	case NV_PRAMDAC_VPLL_COEFF:
++		shift += 4;
++	case NV_PRAMDAC_MPLL_COEFF:
++		shift += 4;
++	case NV_PRAMDAC_NVPLL_COEFF:
++		shift += 4;
++	}
++
++	/*
++	 * the shift for vpll regs is only used for nv3x chips with a single
++	 * stage pll
++	 */
++	if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
++			  chip_version == 0x36 || chip_version >= 0x40))
++		shift = -4;
++
++	return shift;
++}
++
++static void
++setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int chip_version = dev_priv->vbios->chip_version;
++	uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
++	int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
++	uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
++	uint32_t saved_powerctrl_1 = 0;
++	int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
++
++	if (oldpll == pll)
++		return;	/* already set */
++
++	if (shift_powerctrl_1 >= 0) {
++		saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
++		nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
++			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
++			1 << shift_powerctrl_1);
++	}
++
++	if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
++		/* upclock -- write new post divider first */
++		NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff));
++	else
++		/* downclock -- write new NM first */
++		NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1);
++
++	if (chip_version < 0x17 && chip_version != 0x11)
++		/* wait a bit on older chips */
++		msleep(64);
++	NVReadRAMDAC(dev, 0, reg);
++
++	/* then write the other half as well */
++	NVWriteRAMDAC(dev, 0, reg, pll);
++
++	if (shift_powerctrl_1 >= 0)
++		nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
++}
++
++static uint32_t
++new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
++{
++	bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF);
++
++	if (ss)	/* single stage pll mode */
++		ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE :
++				      NV_RAMDAC_580_VPLL2_ACTIVE;
++	else
++		ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE :
++				      ~NV_RAMDAC_580_VPLL2_ACTIVE;
++
++	return ramdac580;
++}
++
++static void
++setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
++		       struct nouveau_pll_vals *pv)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int chip_version = dev_priv->vbios->chip_version;
++	bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
++	uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
++	uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
++	uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0;
++	uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
++	uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
++	uint32_t oldramdac580 = 0, ramdac580 = 0;
++	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;	/* nv41+ only */
++	uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
++	int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
++
++	/* model specific additions to generic pll1 and pll2 set up above */
++	if (nv3035) {
++		pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
++		       (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
++		pll2 = 0;
++	}
++	if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */
++		oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
++		ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
++		if (oldramdac580 != ramdac580)
++			oldpll1 = ~0;	/* force mismatch */
++		if (single_stage)
++			/* magic value used by nvidia in single stage mode */
++			pll2 |= 0x011f;
++	}
++	if (chip_version > 0x70)
++		/* magic bits set by the blob (but not the bios) on g71-73 */
++		pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
++
++	if (oldpll1 == pll1 && oldpll2 == pll2)
++		return;	/* already set */
++
++	if (shift_powerctrl_1 >= 0) {
++		saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
++		nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
++			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
++			1 << shift_powerctrl_1);
++	}
++
++	if (chip_version >= 0x40) {
++		int shift_c040 = 14;
++
++		switch (reg1) {
++		case NV_PRAMDAC_MPLL_COEFF:
++			shift_c040 += 2;
++		case NV_PRAMDAC_NVPLL_COEFF:
++			shift_c040 += 2;
++		case NV_RAMDAC_VPLL2:
++			shift_c040 += 2;
++		case NV_PRAMDAC_VPLL_COEFF:
++			shift_c040 += 2;
++		}
++
++		savedc040 = nvReadMC(dev, 0xc040);
++		if (shift_c040 != 14)
++			nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040));
++	}
++
++	if (oldramdac580 != ramdac580)
++		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580);
++
++	if (!nv3035)
++		NVWriteRAMDAC(dev, 0, reg2, pll2);
++	NVWriteRAMDAC(dev, 0, reg1, pll1);
++
++	if (shift_powerctrl_1 >= 0)
++		nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
++	if (chip_version >= 0x40)
++		nvWriteMC(dev, 0xc040, savedc040);
++}
++
++static void
++setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
++		      struct nouveau_pll_vals *pv)
++{
++	/* When setting PLLs, there is a merry game of disabling and enabling
++	 * various bits of hardware during the process. This function is a
++	 * synthesis of six nv4x traces, nearly each card doing a subtly
++	 * different thing. With luck all the necessary bits for each card are
++	 * combined herein. Without luck it deviates from each card's formula
++	 * so as to not work on any :)
++	 */
++
++	uint32_t Preg = NMNMreg - 4;
++	bool mpll = Preg == 0x4020;
++	uint32_t oldPval = nvReadMC(dev, Preg);
++	uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
++	uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) |
++			0xc << 28 | pv->log2P << 16;
++	uint32_t saved4600 = 0;
++	/* some cards have different maskc040s */
++	uint32_t maskc040 = ~(3 << 14), savedc040;
++	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
++
++	if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
++		return;
++
++	if (Preg == 0x4000)
++		maskc040 = ~0x333;
++	if (Preg == 0x4058)
++		maskc040 = ~(0xc << 24);
++
++	if (mpll) {
++		struct pll_lims pll_lim;
++		uint8_t Pval2;
++
++		if (get_pll_limits(dev, Preg, &pll_lim))
++			return;
++
++		Pval2 = pv->log2P + pll_lim.log2p_bias;
++		if (Pval2 > pll_lim.max_log2p)
++			Pval2 = pll_lim.max_log2p;
++		Pval |= 1 << 28 | Pval2 << 20;
++
++		saved4600 = nvReadMC(dev, 0x4600);
++		nvWriteMC(dev, 0x4600, saved4600 | 8 << 28);
++	}
++	if (single_stage)
++		Pval |= mpll ? 1 << 12 : 1 << 8;
++
++	nvWriteMC(dev, Preg, oldPval | 1 << 28);
++	nvWriteMC(dev, Preg, Pval & ~(4 << 28));
++	if (mpll) {
++		Pval |= 8 << 20;
++		nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28));
++		nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28));
++	}
++
++	savedc040 = nvReadMC(dev, 0xc040);
++	nvWriteMC(dev, 0xc040, savedc040 & maskc040);
++
++	nvWriteMC(dev, NMNMreg, NMNM);
++	if (NMNMreg == 0x4024)
++		nvWriteMC(dev, 0x403c, NMNM);
++
++	nvWriteMC(dev, Preg, Pval);
++	if (mpll) {
++		Pval &= ~(8 << 20);
++		nvWriteMC(dev, 0x4020, Pval);
++		nvWriteMC(dev, 0x4038, Pval);
++		nvWriteMC(dev, 0x4600, saved4600);
++	}
++
++	nvWriteMC(dev, 0xc040, savedc040);
++
++	if (mpll) {
++		nvWriteMC(dev, 0x4020, Pval & ~(1 << 28));
++		nvWriteMC(dev, 0x4038, Pval & ~(1 << 28));
++	}
++}
++
++void
++nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
++		  struct nouveau_pll_vals *pv)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int cv = dev_priv->vbios->chip_version;
++
++	if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
++	    cv >= 0x40) {
++		if (reg1 > 0x405c)
++			setPLL_double_highregs(dev, reg1, pv);
++		else
++			setPLL_double_lowregs(dev, reg1, pv);
++	} else
++		setPLL_single(dev, reg1, pv);
++}
++
++/*
++ * PLL getting
++ */
++
++static void
++nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
++		      uint32_t pll2, struct nouveau_pll_vals *pllvals)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	/* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
++
++	/* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */
++	pllvals->log2P = (pll1 >> 16) & 0x7;
++	pllvals->N2 = pllvals->M2 = 1;
++
++	if (reg1 <= 0x405c) {
++		pllvals->NM1 = pll2 & 0xffff;
++		/* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */
++		if (!(pll1 & 0x1100))
++			pllvals->NM2 = pll2 >> 16;
++	} else {
++		pllvals->NM1 = pll1 & 0xffff;
++		if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
++			pllvals->NM2 = pll2 & 0xffff;
++		else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) {
++			pllvals->M1 &= 0xf; /* only 4 bits */
++			if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
++				pllvals->M2 = (pll1 >> 4) & 0x7;
++				pllvals->N2 = ((pll1 >> 21) & 0x18) |
++					      ((pll1 >> 19) & 0x7);
++			}
++		}
++	}
++}
++
++int
++nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
++		       struct nouveau_pll_vals *pllvals)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF,
++						    NV_PRAMDAC_MPLL_COEFF,
++						    NV_PRAMDAC_VPLL_COEFF,
++						    NV_RAMDAC_VPLL2 };
++	const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
++						    0x4020,
++						    NV_PRAMDAC_VPLL_COEFF,
++						    NV_RAMDAC_VPLL2 };
++	uint32_t reg1, pll1, pll2 = 0;
++	struct pll_lims pll_lim;
++	int ret;
++
++	if (dev_priv->card_type < NV_40)
++		reg1 = nv04_regs[plltype];
++	else
++		reg1 = nv40_regs[plltype];
++
++	pll1 = nvReadMC(dev, reg1);
++
++	if (reg1 <= 0x405c)
++		pll2 = nvReadMC(dev, reg1 + 4);
++	else if (nv_two_reg_pll(dev)) {
++		uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
++
++		pll2 = nvReadMC(dev, reg2);
++	}
++
++	if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
++		uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
++
++		/* check whether vpll has been forced into single stage mode */
++		if (reg1 == NV_PRAMDAC_VPLL_COEFF) {
++			if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE)
++				pll2 = 0;
++		} else
++			if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE)
++				pll2 = 0;
++	}
++
++	nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
++
++	ret = get_pll_limits(dev, plltype, &pll_lim);
++	if (ret)
++		return ret;
++
++	pllvals->refclk = pll_lim.refclk;
++
++	return 0;
++}
++
++int
++nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
++{
++	/* Avoid divide by zero if called at an inappropriate time */
++	if (!pv->M1 || !pv->M2)
++		return 0;
++
++	return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P;
++}
++
++int
++nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
++{
++	struct nouveau_pll_vals pllvals;
++
++	if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
++		uint32_t mpllP;
++
++		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
++		if (!mpllP)
++			mpllP = 4;
++
++		return 400000 / mpllP;
++	} else
++	if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
++		uint32_t clock;
++
++		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
++		return clock;
++	}
++
++	nouveau_hw_get_pllvals(dev, plltype, &pllvals);
++
++	return nouveau_hw_pllvals_to_clk(&pllvals);
++}
++
++static void
++nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
++{
++	/* the vpll on an unused head can come up with a random value, way
++	 * beyond the pll limits.  for some reason this causes the chip to
++	 * lock up when reading the dac palette regs, so set a valid pll here
++	 * when such a condition detected.  only seen on nv11 to date
++	 */
++
++	struct pll_lims pll_lim;
++	struct nouveau_pll_vals pv;
++	uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
++
++	if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim))
++		return;
++	nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv);
++
++	if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
++	    pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
++	    pv.log2P <= pll_lim.max_log2p)
++		return;
++
++	NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1);
++
++	/* set lowest clock within static limits */
++	pv.M1 = pll_lim.vco1.max_m;
++	pv.N1 = pll_lim.vco1.min_n;
++	pv.log2P = pll_lim.max_usable_log2p;
++	nouveau_hw_setpll(dev, pllreg, &pv);
++}
++
++/*
++ * vga font save/restore
++ */
++
++static void nouveau_vga_font_io(struct drm_device *dev,
++				void __iomem *iovram,
++				bool save, unsigned plane)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	unsigned i;
++
++	NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
++	NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
++	for (i = 0; i < 16384; i++) {
++		if (save) {
++			dev_priv->saved_vga_font[plane][i] =
++					ioread32_native(iovram + i * 4);
++		} else {
++			iowrite32_native(dev_priv->saved_vga_font[plane][i],
++							iovram + i * 4);
++		}
++	}
++}
++
++void
++nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
++{
++	uint8_t misc, gr4, gr5, gr6, seq2, seq4;
++	bool graphicsmode;
++	unsigned plane;
++	void __iomem *iovram;
++
++	if (nv_two_heads(dev))
++		NVSetOwner(dev, 0);
++
++	NVSetEnablePalette(dev, 0, true);
++	graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1;
++	NVSetEnablePalette(dev, 0, false);
++
++	if (graphicsmode) /* graphics mode => framebuffer => no need to save */
++		return;
++
++	NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor");
++
++	/* map first 64KiB of VRAM, holds VGA fonts etc */
++	iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
++	if (!iovram) {
++		NV_ERROR(dev, "Failed to map VRAM, "
++					"cannot save/restore VGA fonts.\n");
++		return;
++	}
++
++	if (nv_two_heads(dev))
++		NVBlankScreen(dev, 1, true);
++	NVBlankScreen(dev, 0, true);
++
++	/* save control regs */
++	misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ);
++	seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX);
++	seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX);
++	gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX);
++	gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX);
++	gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX);
++
++	NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67);
++	NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6);
++	NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0);
++	NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5);
++
++	/* store font in planes 0..3 */
++	for (plane = 0; plane < 4; plane++)
++		nouveau_vga_font_io(dev, iovram, save, plane);
++
++	/* restore control regs */
++	NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc);
++	NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4);
++	NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5);
++	NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6);
++	NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2);
++	NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4);
++
++	if (nv_two_heads(dev))
++		NVBlankScreen(dev, 1, false);
++	NVBlankScreen(dev, 0, false);
++
++	iounmap(iovram);
++}
++
++/*
++ * mode state save/load
++ */
++
++static void
++rd_cio_state(struct drm_device *dev, int head,
++	     struct nv04_crtc_reg *crtcstate, int index)
++{
++	crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index);
++}
++
++static void
++wr_cio_state(struct drm_device *dev, int head,
++	     struct nv04_crtc_reg *crtcstate, int index)
++{
++	NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]);
++}
++
++static void
++nv_save_state_ramdac(struct drm_device *dev, int head,
++		     struct nv04_mode_state *state)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
++	int i;
++
++	if (dev_priv->card_type >= NV_10)
++		regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
++
++	nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals);
++	state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
++	if (nv_two_heads(dev))
++		state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
++	if (dev_priv->chipset == 0x11)
++		regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
++
++	regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
++
++	if (nv_gf4_disp_arch(dev))
++		regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
++	if (dev_priv->chipset >= 0x30)
++		regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
++
++	regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
++	regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL);
++	regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW);
++	regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY);
++	regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL);
++	regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW);
++	regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY);
++	regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2);
++
++	for (i = 0; i < 7; i++) {
++		uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
++		regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg);
++		regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20);
++	}
++
++	if (nv_gf4_disp_arch(dev)) {
++		regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER);
++		for (i = 0; i < 3; i++) {
++			regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4);
++			regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4);
++		}
++	}
++
++	regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
++	regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0);
++	if (!nv_gf4_disp_arch(dev) && head == 0) {
++		/* early chips don't allow access to PRAMDAC_TMDS_* without
++		 * the head A FPCLK on (nv11 even locks up) */
++		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 &
++			      ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK);
++	}
++	regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1);
++	regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2);
++
++	regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR);
++
++	if (nv_gf4_disp_arch(dev))
++		regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
++
++	if (dev_priv->card_type == NV_40) {
++		regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
++		regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
++		regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
++
++		for (i = 0; i < 38; i++)
++			regp->ctv_regs[i] = NVReadRAMDAC(dev, head,
++							 NV_PRAMDAC_CTV + 4*i);
++	}
++}
++
++static void
++nv_load_state_ramdac(struct drm_device *dev, int head,
++		     struct nv04_mode_state *state)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
++	uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
++	int i;
++
++	if (dev_priv->card_type >= NV_10)
++		NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
++
++	nouveau_hw_setpll(dev, pllreg, &regp->pllvals);
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
++	if (nv_two_heads(dev))
++		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
++	if (dev_priv->chipset == 0x11)
++		NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
++
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
++
++	if (nv_gf4_disp_arch(dev))
++		NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
++	if (dev_priv->chipset >= 0x30)
++		NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
++
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2);
++
++	for (i = 0; i < 7; i++) {
++		uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
++
++		NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]);
++		NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]);
++	}
++
++	if (nv_gf4_disp_arch(dev)) {
++		NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither);
++		for (i = 0; i < 3; i++) {
++			NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]);
++			NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]);
++		}
++	}
++
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2);
++
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color);
++
++	if (nv_gf4_disp_arch(dev))
++		NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
++
++	if (dev_priv->card_type == NV_40) {
++		NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
++		NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
++		NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
++
++		for (i = 0; i < 38; i++)
++			NVWriteRAMDAC(dev, head,
++				      NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]);
++	}
++}
++
++static void
++nv_save_state_vga(struct drm_device *dev, int head,
++		  struct nv04_mode_state *state)
++{
++	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
++	int i;
++
++	regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ);
++
++	for (i = 0; i < 25; i++)
++		rd_cio_state(dev, head, regp, i);
++
++	NVSetEnablePalette(dev, head, true);
++	for (i = 0; i < 21; i++)
++		regp->Attribute[i] = NVReadVgaAttr(dev, head, i);
++	NVSetEnablePalette(dev, head, false);
++
++	for (i = 0; i < 9; i++)
++		regp->Graphics[i] = NVReadVgaGr(dev, head, i);
++
++	for (i = 0; i < 5; i++)
++		regp->Sequencer[i] = NVReadVgaSeq(dev, head, i);
++}
++
++static void
++nv_load_state_vga(struct drm_device *dev, int head,
++		  struct nv04_mode_state *state)
++{
++	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
++	int i;
++
++	NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg);
++
++	for (i = 0; i < 5; i++)
++		NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]);
++
++	nv_lock_vga_crtc_base(dev, head, false);
++	for (i = 0; i < 25; i++)
++		wr_cio_state(dev, head, regp, i);
++	nv_lock_vga_crtc_base(dev, head, true);
++
++	for (i = 0; i < 9; i++)
++		NVWriteVgaGr(dev, head, i, regp->Graphics[i]);
++
++	NVSetEnablePalette(dev, head, true);
++	for (i = 0; i < 21; i++)
++		NVWriteVgaAttr(dev, head, i, regp->Attribute[i]);
++	NVSetEnablePalette(dev, head, false);
++}
++
++static void
++nv_save_state_ext(struct drm_device *dev, int head,
++		  struct nv04_mode_state *state)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
++	int i;
++
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
++
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
++	if (dev_priv->card_type >= NV_30)
++		rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
++
++	if (dev_priv->card_type >= NV_10) {
++		regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
++		regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
++
++		if (dev_priv->card_type >= NV_30)
++			regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
++
++		if (dev_priv->card_type == NV_40)
++			regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
++
++		if (nv_two_heads(dev))
++			regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL);
++		regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG);
++	}
++
++	regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG);
++
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
++	rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
++	if (dev_priv->card_type >= NV_10) {
++		rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
++		rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
++		rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
++		rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
++	}
++	/* NV11 and NV20 don't have this, they stop at 0x52. */
++	if (nv_gf4_disp_arch(dev)) {
++		rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
++		rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
++
++		for (i = 0; i < 0x10; i++)
++			regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i);
++		rd_cio_state(dev, head, regp, NV_CIO_CRE_59);
++		rd_cio_state(dev, head, regp, NV_CIO_CRE_5B);
++
++		rd_cio_state(dev, head, regp, NV_CIO_CRE_85);
++		rd_cio_state(dev, head, regp, NV_CIO_CRE_86);
++	}
++
++	regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START);
++}
++
++static void
++nv_load_state_ext(struct drm_device *dev, int head,
++		  struct nv04_mode_state *state)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv04_crtc_reg *regp = &state->crtc_reg[head];
++	uint32_t reg900;
++	int i;
++
++	if (dev_priv->card_type >= NV_10) {
++		if (nv_two_heads(dev))
++			/* setting ENGINE_CTRL (EC) *must* come before
++			 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
++			 * EC that should not be overwritten by writing stale EC
++			 */
++			NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
++
++		nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1);
++		nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0);
++		nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0);
++		nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0);
++		nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1);
++		nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1);
++		nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1);
++		nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1);
++		nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
++
++		NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
++		NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
++		NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
++
++		if (dev_priv->card_type >= NV_30)
++			NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
++
++		if (dev_priv->card_type == NV_40) {
++			NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
++
++			reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
++			if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
++				NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
++			else
++				NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
++		}
++	}
++
++	NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg);
++
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
++	if (dev_priv->card_type >= NV_30)
++		wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
++
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
++	if (dev_priv->card_type == NV_40)
++		nv_fix_nv40_hw_cursor(dev, head);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
++
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
++	wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
++	if (dev_priv->card_type >= NV_10) {
++		wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
++		wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
++		wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
++		wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
++	}
++	/* NV11 and NV20 stop at 0x52. */
++	if (nv_gf4_disp_arch(dev)) {
++		if (dev_priv->card_type == NV_10) {
++			/* Not waiting for vertical retrace before modifying
++			   CRE_53/CRE_54 causes lockups. */
++			nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
++			nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
++		}
++
++		wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
++		wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
++
++		for (i = 0; i < 0x10; i++)
++			NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]);
++		wr_cio_state(dev, head, regp, NV_CIO_CRE_59);
++		wr_cio_state(dev, head, regp, NV_CIO_CRE_5B);
++
++		wr_cio_state(dev, head, regp, NV_CIO_CRE_85);
++		wr_cio_state(dev, head, regp, NV_CIO_CRE_86);
++	}
++
++	NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
++
++	/* Setting 1 on this value gives you interrupts for every vblank period. */
++	NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
++	NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
++}
++
++static void
++nv_save_state_palette(struct drm_device *dev, int head,
++		      struct nv04_mode_state *state)
++{
++	int head_offset = head * NV_PRMDIO_SIZE, i;
++
++	nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
++				NV_PRMDIO_PIXEL_MASK_MASK);
++	nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
++
++	for (i = 0; i < 768; i++) {
++		state->crtc_reg[head].DAC[i] = nv_rd08(dev,
++				NV_PRMDIO_PALETTE_DATA + head_offset);
++	}
++
++	NVSetEnablePalette(dev, head, false);
++}
++
++void
++nouveau_hw_load_state_palette(struct drm_device *dev, int head,
++			      struct nv04_mode_state *state)
++{
++	int head_offset = head * NV_PRMDIO_SIZE, i;
++
++	nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
++				NV_PRMDIO_PIXEL_MASK_MASK);
++	nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
++
++	for (i = 0; i < 768; i++) {
++		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset,
++				state->crtc_reg[head].DAC[i]);
++	}
++
++	NVSetEnablePalette(dev, head, false);
++}
++
++void nouveau_hw_save_state(struct drm_device *dev, int head,
++			   struct nv04_mode_state *state)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->chipset == 0x11)
++		/* NB: no attempt is made to restore the bad pll later on */
++		nouveau_hw_fix_bad_vpll(dev, head);
++	nv_save_state_ramdac(dev, head, state);
++	nv_save_state_vga(dev, head, state);
++	nv_save_state_palette(dev, head, state);
++	nv_save_state_ext(dev, head, state);
++}
++
++void nouveau_hw_load_state(struct drm_device *dev, int head,
++			   struct nv04_mode_state *state)
++{
++	NVVgaProtect(dev, head, true);
++	nv_load_state_ramdac(dev, head, state);
++	nv_load_state_ext(dev, head, state);
++	nouveau_hw_load_state_palette(dev, head, state);
++	nv_load_state_vga(dev, head, state);
++	NVVgaProtect(dev, head, false);
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
+new file mode 100644
+index 0000000..869130f
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
+@@ -0,0 +1,455 @@
++/*
++ * Copyright 2008 Stuart Bennett
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
++ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++
++#ifndef __NOUVEAU_HW_H__
++#define __NOUVEAU_HW_H__
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++
++#define MASK(field) ( \
++	(0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field))
++
++#define XLATE(src, srclowbit, outfield) ( \
++	(((src) >> (srclowbit)) << (0 ? outfield)) & MASK(outfield))
++
++void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value);
++uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index);
++void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value);
++uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
++void NVSetOwner(struct drm_device *, int owner);
++void NVBlankScreen(struct drm_device *, int head, bool blank);
++void nouveau_hw_setpll(struct drm_device *, uint32_t reg1,
++		       struct nouveau_pll_vals *pv);
++int nouveau_hw_get_pllvals(struct drm_device *, enum pll_types plltype,
++			   struct nouveau_pll_vals *pllvals);
++int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals);
++int nouveau_hw_get_clock(struct drm_device *, enum pll_types plltype);
++void nouveau_hw_save_vga_fonts(struct drm_device *, bool save);
++void nouveau_hw_save_state(struct drm_device *, int head,
++			   struct nv04_mode_state *state);
++void nouveau_hw_load_state(struct drm_device *, int head,
++			   struct nv04_mode_state *state);
++void nouveau_hw_load_state_palette(struct drm_device *, int head,
++				   struct nv04_mode_state *state);
++
++/* nouveau_calc.c */
++extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
++			     int *burst, int *lwm);
++extern int nouveau_calc_pll_mnp(struct drm_device *, struct pll_lims *pll_lim,
++				int clk, struct nouveau_pll_vals *pv);
++
++static inline uint32_t
++nvReadMC(struct drm_device *dev, uint32_t reg)
++{
++	uint32_t val = nv_rd32(dev, reg);
++	NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
++	return val;
++}
++
++static inline void
++nvWriteMC(struct drm_device *dev, uint32_t reg, uint32_t val)
++{
++	NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
++	nv_wr32(dev, reg, val);
++}
++
++static inline uint32_t
++nvReadVIDEO(struct drm_device *dev, uint32_t reg)
++{
++	uint32_t val = nv_rd32(dev, reg);
++	NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
++	return val;
++}
++
++static inline void
++nvWriteVIDEO(struct drm_device *dev, uint32_t reg, uint32_t val)
++{
++	NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
++	nv_wr32(dev, reg, val);
++}
++
++static inline uint32_t
++nvReadFB(struct drm_device *dev, uint32_t reg)
++{
++	uint32_t val = nv_rd32(dev, reg);
++	NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
++	return val;
++}
++
++static inline void
++nvWriteFB(struct drm_device *dev, uint32_t reg, uint32_t val)
++{
++	NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
++	nv_wr32(dev, reg, val);
++}
++
++static inline uint32_t
++nvReadEXTDEV(struct drm_device *dev, uint32_t reg)
++{
++	uint32_t val = nv_rd32(dev, reg);
++	NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
++	return val;
++}
++
++static inline void
++nvWriteEXTDEV(struct drm_device *dev, uint32_t reg, uint32_t val)
++{
++	NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
++	nv_wr32(dev, reg, val);
++}
++
++static inline uint32_t NVReadCRTC(struct drm_device *dev,
++					int head, uint32_t reg)
++{
++	uint32_t val;
++	if (head)
++		reg += NV_PCRTC0_SIZE;
++	val = nv_rd32(dev, reg);
++	NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
++	return val;
++}
++
++static inline void NVWriteCRTC(struct drm_device *dev,
++					int head, uint32_t reg, uint32_t val)
++{
++	if (head)
++		reg += NV_PCRTC0_SIZE;
++	NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
++	nv_wr32(dev, reg, val);
++}
++
++static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
++					int head, uint32_t reg)
++{
++	uint32_t val;
++	if (head)
++		reg += NV_PRAMDAC0_SIZE;
++	val = nv_rd32(dev, reg);
++	NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
++							head, reg, val);
++	return val;
++}
++
++static inline void NVWriteRAMDAC(struct drm_device *dev,
++					int head, uint32_t reg, uint32_t val)
++{
++	if (head)
++		reg += NV_PRAMDAC0_SIZE;
++	NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
++							head, reg, val);
++	nv_wr32(dev, reg, val);
++}
++
++static inline uint8_t nv_read_tmds(struct drm_device *dev,
++					int or, int dl, uint8_t address)
++{
++	int ramdac = (or & OUTPUT_C) >> 2;
++
++	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8,
++	NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address);
++	return NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8);
++}
++
++static inline void nv_write_tmds(struct drm_device *dev,
++					int or, int dl, uint8_t address,
++					uint8_t data)
++{
++	int ramdac = (or & OUTPUT_C) >> 2;
++
++	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data);
++	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address);
++}
++
++static inline void NVWriteVgaCrtc(struct drm_device *dev,
++					int head, uint8_t index, uint8_t value)
++{
++	NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
++							head, index, value);
++	nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
++	nv_wr08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
++}
++
++static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
++					int head, uint8_t index)
++{
++	uint8_t val;
++	nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
++	val = nv_rd08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
++	NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
++							head, index, val);
++	return val;
++}
++
++/* CR57 and CR58 are a fun pair of regs. CR57 provides an index (0-0xf) for CR58
++ * I suspect they in fact do nothing, but are merely a way to carry useful
++ * per-head variables around
++ *
++ * Known uses:
++ * CR57		CR58
++ * 0x00		index to the appropriate dcb entry (or 7f for inactive)
++ * 0x02		dcb entry's "or" value (or 00 for inactive)
++ * 0x03		bit0 set for dual link (LVDS, possibly elsewhere too)
++ * 0x08 or 0x09	pxclk in MHz
++ * 0x0f		laptop panel info -	low nibble for PEXTDEV_BOOT_0 strap
++ * 					high nibble for xlat strap value
++ */
++
++static inline void
++NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value)
++{
++	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
++	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value);
++}
++
++static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index)
++{
++	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
++	return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58);
++}
++
++static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
++					int head, uint32_t reg)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint8_t val;
++
++	/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
++	 * NVSetOwner for the relevant head to be programmed */
++	if (head && dev_priv->card_type == NV_40)
++		reg += NV_PRMVIO_SIZE;
++
++	val = nv_rd08(dev, reg);
++	NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", head, reg, val);
++	return val;
++}
++
++static inline void NVWritePRMVIO(struct drm_device *dev,
++					int head, uint32_t reg, uint8_t value)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
++	 * NVSetOwner for the relevant head to be programmed */
++	if (head && dev_priv->card_type == NV_40)
++		reg += NV_PRMVIO_SIZE;
++
++	NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n",
++						head, reg, value);
++	nv_wr08(dev, reg, value);
++}
++
++static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
++{
++	nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
++	nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
++}
++
++static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
++{
++	nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
++	return !(nv_rd08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
++}
++
++static inline void NVWriteVgaAttr(struct drm_device *dev,
++					int head, uint8_t index, uint8_t value)
++{
++	if (NVGetEnablePalette(dev, head))
++		index &= ~0x20;
++	else
++		index |= 0x20;
++
++	nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
++	NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
++							head, index, value);
++	nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
++	nv_wr08(dev, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
++}
++
++static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
++					int head, uint8_t index)
++{
++	uint8_t val;
++	if (NVGetEnablePalette(dev, head))
++		index &= ~0x20;
++	else
++		index |= 0x20;
++
++	nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
++	nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
++	val = nv_rd08(dev, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
++	NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
++							head, index, val);
++	return val;
++}
++
++static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start)
++{
++	NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3);
++}
++
++static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
++{
++	uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
++
++	if (protect) {
++		NVVgaSeqReset(dev, head, true);
++		NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
++	} else {
++		/* Reenable sequencer, then turn on screen */
++		NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);   /* reenable display */
++		NVVgaSeqReset(dev, head, false);
++	}
++	NVSetEnablePalette(dev, head, protect);
++}
++
++static inline bool
++nv_heads_tied(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->chipset == 0x11)
++		return !!(nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28));
++
++	return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
++}
++
++/* makes cr0-7 on the specified head read-only */
++static inline bool
++nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock)
++{
++	uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX);
++	bool waslocked = cr11 & 0x80;
++
++	if (lock)
++		cr11 |= 0x80;
++	else
++		cr11 &= ~0x80;
++	NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11);
++
++	return waslocked;
++}
++
++static inline void
++nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock)
++{
++	/* shadow lock: connects 0x60?3d? regs to "real" 0x3d? regs
++	 * bit7: unlocks HDT, HBS, HBE, HRS, HRE, HEB
++	 * bit6: seems to have some effect on CR09 (double scan, VBS_9)
++	 * bit5: unlocks HDE
++	 * bit4: unlocks VDE
++	 * bit3: unlocks VDT, OVL, VRS, ?VRE?, VBS, VBE, LSR, EBR
++	 * bit2: same as bit 1 of 0x60?804
++	 * bit0: same as bit 0 of 0x60?804
++	 */
++
++	uint8_t cr21 = lock;
++
++	if (lock < 0)
++		/* 0xfa is generic "unlock all" mask */
++		cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa;
++
++	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21);
++}
++
++/* renders the extended crtc regs (cr19+) on all crtcs impervious:
++ * immutable and unreadable
++ */
++static inline bool
++NVLockVgaCrtcs(struct drm_device *dev, bool lock)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
++
++	NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
++		       lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
++	/* NV11 has independently lockable extended crtcs, except when tied */
++	if (dev_priv->chipset == 0x11 && !nv_heads_tied(dev))
++		NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
++			       lock ? NV_CIO_SR_LOCK_VALUE :
++				      NV_CIO_SR_UNLOCK_RW_VALUE);
++
++	return waslocked;
++}
++
++/* nv04 cursor max dimensions of 32x32 (A1R5G5B5) */
++#define NV04_CURSOR_SIZE 32
++/* limit nv10 cursors to 64x64 (ARGB8) (we could go to 64x255) */
++#define NV10_CURSOR_SIZE 64
++
++static inline int nv_cursor_width(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	return dev_priv->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
++}
++
++static inline void
++nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
++{
++	/* on some nv40 (such as the "true" (in the NV_PFB_BOOT_0 sense) nv40,
++	 * the gf6800gt) a hardware bug requires a write to PRAMDAC_CURSOR_POS
++	 * for changes to the CRTC CURCTL regs to take effect, whether changing
++	 * the pixmap location, or just showing/hiding the cursor
++	 */
++	uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos);
++}
++
++static inline void
++nv_show_cursor(struct drm_device *dev, int head, bool show)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint8_t *curctl1 =
++		&dev_priv->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX];
++
++	if (show)
++		*curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
++	else
++		*curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
++	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
++
++	if (dev_priv->card_type == NV_40)
++		nv_fix_nv40_hw_cursor(dev, head);
++}
++
++static inline uint32_t
++nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int mask;
++
++	if (bpp == 15)
++		bpp = 16;
++	if (bpp == 24)
++		bpp = 8;
++
++	/* Alignment requirements taken from the Haiku driver */
++	if (dev_priv->card_type == NV_04)
++		mask = 128 / bpp - 1;
++	else
++		mask = 512 / bpp - 1;
++
++	return (width + mask) & ~mask;
++}
++
++#endif	/* __NOUVEAU_HW_H__ */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
+new file mode 100644
+index 0000000..70e994d
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
+@@ -0,0 +1,269 @@
++/*
++ * Copyright 2009 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_i2c.h"
++#include "nouveau_hw.h"
++
++static void
++nv04_i2c_setscl(void *data, int state)
++{
++	struct nouveau_i2c_chan *i2c = data;
++	struct drm_device *dev = i2c->dev;
++	uint8_t val;
++
++	val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
++	NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
++}
++
++static void
++nv04_i2c_setsda(void *data, int state)
++{
++	struct nouveau_i2c_chan *i2c = data;
++	struct drm_device *dev = i2c->dev;
++	uint8_t val;
++
++	val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
++	NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
++}
++
++static int
++nv04_i2c_getscl(void *data)
++{
++	struct nouveau_i2c_chan *i2c = data;
++	struct drm_device *dev = i2c->dev;
++
++	return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
++}
++
++static int
++nv04_i2c_getsda(void *data)
++{
++	struct nouveau_i2c_chan *i2c = data;
++	struct drm_device *dev = i2c->dev;
++
++	return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
++}
++
++static void
++nv4e_i2c_setscl(void *data, int state)
++{
++	struct nouveau_i2c_chan *i2c = data;
++	struct drm_device *dev = i2c->dev;
++	uint8_t val;
++
++	val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
++	nv_wr32(dev, i2c->wr, val | 0x01);
++}
++
++static void
++nv4e_i2c_setsda(void *data, int state)
++{
++	struct nouveau_i2c_chan *i2c = data;
++	struct drm_device *dev = i2c->dev;
++	uint8_t val;
++
++	val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
++	nv_wr32(dev, i2c->wr, val | 0x01);
++}
++
++static int
++nv4e_i2c_getscl(void *data)
++{
++	struct nouveau_i2c_chan *i2c = data;
++	struct drm_device *dev = i2c->dev;
++
++	return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
++}
++
++static int
++nv4e_i2c_getsda(void *data)
++{
++	struct nouveau_i2c_chan *i2c = data;
++	struct drm_device *dev = i2c->dev;
++
++	return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
++}
++
++static int
++nv50_i2c_getscl(void *data)
++{
++	struct nouveau_i2c_chan *i2c = data;
++	struct drm_device *dev = i2c->dev;
++
++	return !!(nv_rd32(dev, i2c->rd) & 1);
++}
++
++
++static int
++nv50_i2c_getsda(void *data)
++{
++	struct nouveau_i2c_chan *i2c = data;
++	struct drm_device *dev = i2c->dev;
++
++	return !!(nv_rd32(dev, i2c->rd) & 2);
++}
++
++static void
++nv50_i2c_setscl(void *data, int state)
++{
++	struct nouveau_i2c_chan *i2c = data;
++	struct drm_device *dev = i2c->dev;
++
++	nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
++}
++
++static void
++nv50_i2c_setsda(void *data, int state)
++{
++	struct nouveau_i2c_chan *i2c = data;
++	struct drm_device *dev = i2c->dev;
++
++	nv_wr32(dev, i2c->wr,
++			(nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0));
++	i2c->data = state;
++}
++
++static const uint32_t nv50_i2c_port[] = {
++	0x00e138, 0x00e150, 0x00e168, 0x00e180,
++	0x00e254, 0x00e274, 0x00e764, 0x00e780,
++	0x00e79c, 0x00e7b8
++};
++#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
++
++int
++nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_i2c_chan *i2c;
++	int ret;
++
++	if (entry->chan)
++		return -EEXIST;
++
++	if (dev_priv->card_type == NV_50 && entry->read >= NV50_I2C_PORTS) {
++		NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
++		return -EINVAL;
++	}
++
++	i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
++	if (i2c == NULL)
++		return -ENOMEM;
++
++	switch (entry->port_type) {
++	case 0:
++		i2c->algo.bit.setsda = nv04_i2c_setsda;
++		i2c->algo.bit.setscl = nv04_i2c_setscl;
++		i2c->algo.bit.getsda = nv04_i2c_getsda;
++		i2c->algo.bit.getscl = nv04_i2c_getscl;
++		i2c->rd = entry->read;
++		i2c->wr = entry->write;
++		break;
++	case 4:
++		i2c->algo.bit.setsda = nv4e_i2c_setsda;
++		i2c->algo.bit.setscl = nv4e_i2c_setscl;
++		i2c->algo.bit.getsda = nv4e_i2c_getsda;
++		i2c->algo.bit.getscl = nv4e_i2c_getscl;
++		i2c->rd = 0x600800 + entry->read;
++		i2c->wr = 0x600800 + entry->write;
++		break;
++	case 5:
++		i2c->algo.bit.setsda = nv50_i2c_setsda;
++		i2c->algo.bit.setscl = nv50_i2c_setscl;
++		i2c->algo.bit.getsda = nv50_i2c_getsda;
++		i2c->algo.bit.getscl = nv50_i2c_getscl;
++		i2c->rd = nv50_i2c_port[entry->read];
++		i2c->wr = i2c->rd;
++		break;
++	case 6:
++		i2c->rd = entry->read;
++		i2c->wr = entry->write;
++		break;
++	default:
++		NV_ERROR(dev, "DCB I2C port type %d unknown\n",
++			 entry->port_type);
++		kfree(i2c);
++		return -EINVAL;
++	}
++
++	snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
++		 "nouveau-%s-%d", pci_name(dev->pdev), index);
++	i2c->adapter.owner = THIS_MODULE;
++	i2c->adapter.dev.parent = &dev->pdev->dev;
++	i2c->dev = dev;
++	i2c_set_adapdata(&i2c->adapter, i2c);
++
++	if (entry->port_type < 6) {
++		i2c->adapter.algo_data = &i2c->algo.bit;
++		i2c->algo.bit.udelay = 40;
++		i2c->algo.bit.timeout = usecs_to_jiffies(5000);
++		i2c->algo.bit.data = i2c;
++		ret = i2c_bit_add_bus(&i2c->adapter);
++	} else {
++		i2c->adapter.algo_data = &i2c->algo.dp;
++		i2c->algo.dp.running = false;
++		i2c->algo.dp.address = 0;
++		i2c->algo.dp.aux_ch = nouveau_dp_i2c_aux_ch;
++		ret = i2c_dp_aux_add_bus(&i2c->adapter);
++	}
++
++	if (ret) {
++		NV_ERROR(dev, "Failed to register i2c %d\n", index);
++		kfree(i2c);
++		return ret;
++	}
++
++	entry->chan = i2c;
++	return 0;
++}
++
++void
++nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
++{
++	if (!entry->chan)
++		return;
++
++	i2c_del_adapter(&entry->chan->adapter);
++	kfree(entry->chan);
++	entry->chan = NULL;
++}
++
++struct nouveau_i2c_chan *
++nouveau_i2c_find(struct drm_device *dev, int index)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nvbios *bios = &dev_priv->VBIOS;
++
++	if (index > DCB_MAX_NUM_I2C_ENTRIES)
++		return NULL;
++
++	if (!bios->bdcb.dcb.i2c[index].chan) {
++		if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index))
++			return NULL;
++	}
++
++	return bios->bdcb.dcb.i2c[index].chan;
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
+new file mode 100644
+index 0000000..c8eaf7a
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
+@@ -0,0 +1,52 @@
++/*
++ * Copyright 2009 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __NOUVEAU_I2C_H__
++#define __NOUVEAU_I2C_H__
++
++#include <linux/i2c.h>
++#include <linux/i2c-id.h>
++#include <linux/i2c-algo-bit.h>
++#include "drm_dp_helper.h"
++
++struct dcb_i2c_entry;
++
++struct nouveau_i2c_chan {
++	struct i2c_adapter adapter;
++	struct drm_device *dev;
++	union {
++		struct i2c_algo_bit_data bit;
++		struct i2c_algo_dp_aux_data dp;
++	} algo;
++	unsigned rd;
++	unsigned wr;
++	unsigned data;
++};
++
++int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
++void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
++struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
++
++int nouveau_dp_i2c_aux_ch(struct i2c_adapter *, int mode, uint8_t write_byte,
++			  uint8_t *read_byte);
++
++#endif /* __NOUVEAU_I2C_H__ */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+new file mode 100644
+index 0000000..475ba81
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+@@ -0,0 +1,70 @@
++/**
++ * \file mga_ioc32.c
++ *
++ * 32-bit ioctl compatibility routines for the MGA DRM.
++ *
++ * \author Dave Airlie <airlied at linux.ie> with code from patches by Egbert Eich
++ *
++ *
++ * Copyright (C) Paul Mackerras 2005
++ * Copyright (C) Egbert Eich 2003,2004
++ * Copyright (C) Dave Airlie 2005
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/compat.h>
++
++#include "drmP.h"
++#include "drm.h"
++
++#include "nouveau_drv.h"
++
++/**
++ * Called whenever a 32-bit process running under a 64-bit kernel
++ * performs an ioctl on /dev/dri/card<n>.
++ *
++ * \param filp file pointer.
++ * \param cmd command.
++ * \param arg user argument.
++ * \return zero on success or negative number on failure.
++ */
++long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
++			 unsigned long arg)
++{
++	unsigned int nr = DRM_IOCTL_NR(cmd);
++	drm_ioctl_compat_t *fn = NULL;
++	int ret;
++
++	if (nr < DRM_COMMAND_BASE)
++		return drm_compat_ioctl(filp, cmd, arg);
++
++#if 0
++	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
++		fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
++#endif
++	if (fn != NULL)
++		ret = (*fn)(filp, cmd, arg);
++	else
++		ret = drm_ioctl(filp, cmd, arg);
++
++	return ret;
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
+new file mode 100644
+index 0000000..447f9f6
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
+@@ -0,0 +1,737 @@
++/*
++ * Copyright (C) 2006 Ben Skeggs.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++/*
++ * Authors:
++ *   Ben Skeggs <darktama at iinet.net.au>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_reg.h"
++#include <linux/ratelimit.h>
++
++/* needed for hotplug irq */
++#include "nouveau_connector.h"
++#include "nv50_display.h"
++
++void
++nouveau_irq_preinstall(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	/* Master disable */
++	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
++
++	if (dev_priv->card_type == NV_50) {
++		INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
++		INIT_LIST_HEAD(&dev_priv->vbl_waiting);
++	}
++}
++
++int
++nouveau_irq_postinstall(struct drm_device *dev)
++{
++	/* Master enable */
++	nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
++	return 0;
++}
++
++void
++nouveau_irq_uninstall(struct drm_device *dev)
++{
++	/* Master disable */
++	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
++}
++
++static int
++nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
++{
++	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
++	struct nouveau_pgraph_object_method *grm;
++	struct nouveau_pgraph_object_class *grc;
++
++	grc = dev_priv->engine.graph.grclass;
++	while (grc->id) {
++		if (grc->id == class)
++			break;
++		grc++;
++	}
++
++	if (grc->id != class || !grc->methods)
++		return -ENOENT;
++
++	grm = grc->methods;
++	while (grm->id) {
++		if (grm->id == mthd)
++			return grm->exec(chan, class, mthd, data);
++		grm++;
++	}
++
++	return -ENOENT;
++}
++
++static bool
++nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
++{
++	struct drm_device *dev = chan->dev;
++	const int subc = (addr >> 13) & 0x7;
++	const int mthd = addr & 0x1ffc;
++
++	if (mthd == 0x0000) {
++		struct nouveau_gpuobj_ref *ref = NULL;
++
++		if (nouveau_gpuobj_ref_find(chan, data, &ref))
++			return false;
++
++		if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
++			return false;
++
++		chan->sw_subchannel[subc] = ref->gpuobj->class;
++		nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
++			NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
++		return true;
++	}
++
++	/* hw object */
++	if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
++		return false;
++
++	if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
++		return false;
++
++	return true;
++}
++
++static void
++nouveau_fifo_irq_handler(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_engine *engine = &dev_priv->engine;
++	uint32_t status, reassign;
++	int cnt = 0;
++
++	reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
++	while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
++		struct nouveau_channel *chan = NULL;
++		uint32_t chid, get;
++
++		nv_wr32(dev, NV03_PFIFO_CACHES, 0);
++
++		chid = engine->fifo.channel_id(dev);
++		if (chid >= 0 && chid < engine->fifo.channels)
++			chan = dev_priv->fifos[chid];
++		get  = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
++
++		if (status & NV_PFIFO_INTR_CACHE_ERROR) {
++			uint32_t mthd, data;
++			int ptr;
++
++			/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
++			 * wrapping on my G80 chips, but CACHE1 isn't big
++			 * enough for this much data.. Tests show that it
++			 * wraps around to the start at GET=0x800.. No clue
++			 * as to why..
++			 */
++			ptr = (get & 0x7ff) >> 2;
++
++			if (dev_priv->card_type < NV_40) {
++				mthd = nv_rd32(dev,
++					NV04_PFIFO_CACHE1_METHOD(ptr));
++				data = nv_rd32(dev,
++					NV04_PFIFO_CACHE1_DATA(ptr));
++			} else {
++				mthd = nv_rd32(dev,
++					NV40_PFIFO_CACHE1_METHOD(ptr));
++				data = nv_rd32(dev,
++					NV40_PFIFO_CACHE1_DATA(ptr));
++			}
++
++			if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
++				NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
++					     "Mthd 0x%04x Data 0x%08x\n",
++					chid, (mthd >> 13) & 7, mthd & 0x1ffc,
++					data);
++			}
++
++			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
++			nv_wr32(dev, NV03_PFIFO_INTR_0,
++						NV_PFIFO_INTR_CACHE_ERROR);
++
++			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
++				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
++			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
++			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
++				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
++			nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
++
++			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
++				nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
++			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
++
++			status &= ~NV_PFIFO_INTR_CACHE_ERROR;
++		}
++
++		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
++			NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
++
++			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
++			nv_wr32(dev, NV03_PFIFO_INTR_0,
++						NV_PFIFO_INTR_DMA_PUSHER);
++
++			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
++			if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
++				nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
++								get + 4);
++		}
++
++		if (status & NV_PFIFO_INTR_SEMAPHORE) {
++			uint32_t sem;
++
++			status &= ~NV_PFIFO_INTR_SEMAPHORE;
++			nv_wr32(dev, NV03_PFIFO_INTR_0,
++				NV_PFIFO_INTR_SEMAPHORE);
++
++			sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
++			nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
++
++			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
++			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
++		}
++
++		if (status) {
++			NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
++				status, chid);
++			nv_wr32(dev, NV03_PFIFO_INTR_0, status);
++			status = 0;
++		}
++
++		nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
++	}
++
++	if (status) {
++		NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
++		nv_wr32(dev, 0x2140, 0);
++		nv_wr32(dev, 0x140, 0);
++	}
++
++	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
++}
++
++struct nouveau_bitfield_names {
++	uint32_t mask;
++	const char *name;
++};
++
++static struct nouveau_bitfield_names nstatus_names[] =
++{
++	{ NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
++	{ NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
++	{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
++	{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
++};
++
++static struct nouveau_bitfield_names nstatus_names_nv10[] =
++{
++	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
++	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
++	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
++	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
++};
++
++static struct nouveau_bitfield_names nsource_names[] =
++{
++	{ NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
++	{ NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
++	{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
++	{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
++	{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
++	{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
++	{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
++	{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
++	{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
++	{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
++	{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
++	{ NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
++	{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
++	{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
++	{ NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
++	{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
++	{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
++	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
++	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
++};
++
++static void
++nouveau_print_bitfield_names_(uint32_t value,
++				const struct nouveau_bitfield_names *namelist,
++				const int namelist_len)
++{
++	/*
++	 * Caller must have already printed the KERN_* log level for us.
++	 * Also the caller is responsible for adding the newline.
++	 */
++	int i;
++	for (i = 0; i < namelist_len; ++i) {
++		uint32_t mask = namelist[i].mask;
++		if (value & mask) {
++			printk(" %s", namelist[i].name);
++			value &= ~mask;
++		}
++	}
++	if (value)
++		printk(" (unknown bits 0x%08x)", value);
++}
++#define nouveau_print_bitfield_names(val, namelist) \
++	nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
++
++
++static int
++nouveau_graph_chid_from_grctx(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t inst;
++	int i;
++
++	if (dev_priv->card_type < NV_40)
++		return dev_priv->engine.fifo.channels;
++	else
++	if (dev_priv->card_type < NV_50) {
++		inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
++
++		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
++			struct nouveau_channel *chan = dev_priv->fifos[i];
++
++			if (!chan || !chan->ramin_grctx)
++				continue;
++
++			if (inst == chan->ramin_grctx->instance)
++				break;
++		}
++	} else {
++		inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
++
++		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
++			struct nouveau_channel *chan = dev_priv->fifos[i];
++
++			if (!chan || !chan->ramin)
++				continue;
++
++			if (inst == chan->ramin->instance)
++				break;
++		}
++	}
++
++
++	return i;
++}
++
++static int
++nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_engine *engine = &dev_priv->engine;
++	int channel;
++
++	if (dev_priv->card_type < NV_10)
++		channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
++	else
++	if (dev_priv->card_type < NV_40)
++		channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
++	else
++		channel = nouveau_graph_chid_from_grctx(dev);
++
++	if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
++		NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
++		return -EINVAL;
++	}
++
++	*channel_ret = channel;
++	return 0;
++}
++
++struct nouveau_pgraph_trap {
++	int channel;
++	int class;
++	int subc, mthd, size;
++	uint32_t data, data2;
++	uint32_t nsource, nstatus;
++};
++
++static void
++nouveau_graph_trap_info(struct drm_device *dev,
++			struct nouveau_pgraph_trap *trap)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t address;
++
++	trap->nsource = trap->nstatus = 0;
++	if (dev_priv->card_type < NV_50) {
++		trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
++		trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
++	}
++
++	if (nouveau_graph_trapped_channel(dev, &trap->channel))
++		trap->channel = -1;
++	address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
++
++	trap->mthd = address & 0x1FFC;
++	trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
++	if (dev_priv->card_type < NV_10) {
++		trap->subc  = (address >> 13) & 0x7;
++	} else {
++		trap->subc  = (address >> 16) & 0x7;
++		trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
++	}
++
++	if (dev_priv->card_type < NV_10)
++		trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
++	else if (dev_priv->card_type < NV_40)
++		trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
++	else if (dev_priv->card_type < NV_50)
++		trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
++	else
++		trap->class = nv_rd32(dev, 0x400814);
++}
++
++static void
++nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
++			     struct nouveau_pgraph_trap *trap)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
++
++	NV_INFO(dev, "%s - nSource:", id);
++	nouveau_print_bitfield_names(nsource, nsource_names);
++	printk(", nStatus:");
++	if (dev_priv->card_type < NV_10)
++		nouveau_print_bitfield_names(nstatus, nstatus_names);
++	else
++		nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
++	printk("\n");
++
++	NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
++					"Data 0x%08x:0x%08x\n",
++					id, trap->channel, trap->subc,
++					trap->class, trap->mthd,
++					trap->data2, trap->data);
++}
++
++static int
++nouveau_pgraph_intr_swmthd(struct drm_device *dev,
++			   struct nouveau_pgraph_trap *trap)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (trap->channel < 0 ||
++	    trap->channel >= dev_priv->engine.fifo.channels ||
++	    !dev_priv->fifos[trap->channel])
++		return -ENODEV;
++
++	return nouveau_call_method(dev_priv->fifos[trap->channel],
++				   trap->class, trap->mthd, trap->data);
++}
++
++static inline void
++nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
++{
++	struct nouveau_pgraph_trap trap;
++	int unhandled = 0;
++
++	nouveau_graph_trap_info(dev, &trap);
++
++	if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
++		if (nouveau_pgraph_intr_swmthd(dev, &trap))
++			unhandled = 1;
++	} else {
++		unhandled = 1;
++	}
++
++	if (unhandled)
++		nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
++}
++
++static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
++
++static int nouveau_ratelimit(void)
++{
++	return __ratelimit(&nouveau_ratelimit_state);
++}
++
++
++static inline void
++nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
++{
++	struct nouveau_pgraph_trap trap;
++	int unhandled = 0;
++
++	nouveau_graph_trap_info(dev, &trap);
++	trap.nsource = nsource;
++
++	if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
++		if (nouveau_pgraph_intr_swmthd(dev, &trap))
++			unhandled = 1;
++	} else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
++		uint32_t v = nv_rd32(dev, 0x402000);
++		nv_wr32(dev, 0x402000, v);
++
++		/* dump the error anyway for now: it's useful for
++		   Gallium development */
++		unhandled = 1;
++	} else {
++		unhandled = 1;
++	}
++
++	if (unhandled && nouveau_ratelimit())
++		nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
++}
++
++static inline void
++nouveau_pgraph_intr_context_switch(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_engine *engine = &dev_priv->engine;
++	uint32_t chid;
++
++	chid = engine->fifo.channel_id(dev);
++	NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
++
++	switch (dev_priv->card_type) {
++	case NV_04:
++		nv04_graph_context_switch(dev);
++		break;
++	case NV_10:
++		nv10_graph_context_switch(dev);
++		break;
++	default:
++		NV_ERROR(dev, "Context switch not implemented\n");
++		break;
++	}
++}
++
++static void
++nouveau_pgraph_irq_handler(struct drm_device *dev)
++{
++	uint32_t status;
++
++	while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
++		uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
++
++		if (status & NV_PGRAPH_INTR_NOTIFY) {
++			nouveau_pgraph_intr_notify(dev, nsource);
++
++			status &= ~NV_PGRAPH_INTR_NOTIFY;
++			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
++		}
++
++		if (status & NV_PGRAPH_INTR_ERROR) {
++			nouveau_pgraph_intr_error(dev, nsource);
++
++			status &= ~NV_PGRAPH_INTR_ERROR;
++			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
++		}
++
++		if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
++			nouveau_pgraph_intr_context_switch(dev);
++
++			status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
++			nv_wr32(dev, NV03_PGRAPH_INTR,
++				 NV_PGRAPH_INTR_CONTEXT_SWITCH);
++		}
++
++		if (status) {
++			NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
++			nv_wr32(dev, NV03_PGRAPH_INTR, status);
++		}
++
++		if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
++			nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
++	}
++
++	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
++}
++
++static void
++nv50_pgraph_irq_handler(struct drm_device *dev)
++{
++	uint32_t status;
++
++	while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
++		uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
++
++		if (status & 0x00000001) {
++			nouveau_pgraph_intr_notify(dev, nsource);
++			status &= ~0x00000001;
++			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
++		}
++
++		if (status & 0x00000010) {
++			nouveau_pgraph_intr_error(dev, nsource |
++					NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
++
++			status &= ~0x00000010;
++			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
++		}
++
++		if (status & 0x00001000) {
++			nv_wr32(dev, 0x400500, 0x00000000);
++			nv_wr32(dev, NV03_PGRAPH_INTR,
++				NV_PGRAPH_INTR_CONTEXT_SWITCH);
++			nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
++				NV40_PGRAPH_INTR_EN) &
++				~NV_PGRAPH_INTR_CONTEXT_SWITCH);
++			nv_wr32(dev, 0x400500, 0x00010001);
++
++			nv50_graph_context_switch(dev);
++
++			status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
++		}
++
++		if (status & 0x00100000) {
++			nouveau_pgraph_intr_error(dev, nsource |
++					NV03_PGRAPH_NSOURCE_DATA_ERROR);
++
++			status &= ~0x00100000;
++			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
++		}
++
++		if (status & 0x00200000) {
++			int r;
++
++			nouveau_pgraph_intr_error(dev, nsource |
++					NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
++
++			NV_ERROR(dev, "magic set 1:\n");
++			for (r = 0x408900; r <= 0x408910; r += 4)
++				NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
++					nv_rd32(dev, r));
++			nv_wr32(dev, 0x408900,
++				nv_rd32(dev, 0x408904) | 0xc0000000);
++			for (r = 0x408e08; r <= 0x408e24; r += 4)
++				NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
++							nv_rd32(dev, r));
++			nv_wr32(dev, 0x408e08,
++				nv_rd32(dev, 0x408e08) | 0xc0000000);
++
++			NV_ERROR(dev, "magic set 2:\n");
++			for (r = 0x409900; r <= 0x409910; r += 4)
++				NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
++					nv_rd32(dev, r));
++			nv_wr32(dev, 0x409900,
++				nv_rd32(dev, 0x409904) | 0xc0000000);
++			for (r = 0x409e08; r <= 0x409e24; r += 4)
++				NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
++					nv_rd32(dev, r));
++			nv_wr32(dev, 0x409e08,
++				nv_rd32(dev, 0x409e08) | 0xc0000000);
++
++			status &= ~0x00200000;
++			nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
++			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
++		}
++
++		if (status) {
++			NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
++				status);
++			nv_wr32(dev, NV03_PGRAPH_INTR, status);
++		}
++
++		{
++			const int isb = (1 << 16) | (1 << 0);
++
++			if ((nv_rd32(dev, 0x400500) & isb) != isb)
++				nv_wr32(dev, 0x400500,
++					nv_rd32(dev, 0x400500) | isb);
++		}
++	}
++
++	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
++	nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
++}
++
++static void
++nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
++{
++	if (crtc & 1)
++		nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
++
++	if (crtc & 2)
++		nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
++}
++
++irqreturn_t
++nouveau_irq_handler(DRM_IRQ_ARGS)
++{
++	struct drm_device *dev = (struct drm_device *)arg;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t status, fbdev_flags = 0;
++
++	status = nv_rd32(dev, NV03_PMC_INTR_0);
++	if (!status)
++		return IRQ_NONE;
++
++	if (dev_priv->fbdev_info) {
++		fbdev_flags = dev_priv->fbdev_info->flags;
++		dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
++	}
++
++	if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
++		nouveau_fifo_irq_handler(dev);
++		status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
++	}
++
++	if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
++		if (dev_priv->card_type >= NV_50)
++			nv50_pgraph_irq_handler(dev);
++		else
++			nouveau_pgraph_irq_handler(dev);
++
++		status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
++	}
++
++	if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
++		nouveau_crtc_irq_handler(dev, (status>>24)&3);
++		status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
++	}
++
++	if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
++		      NV_PMC_INTR_0_NV50_I2C_PENDING)) {
++		nv50_display_irq_handler(dev);
++		status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
++			    NV_PMC_INTR_0_NV50_I2C_PENDING);
++	}
++
++	if (status)
++		NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
++
++	if (dev_priv->fbdev_info)
++		dev_priv->fbdev_info->flags = fbdev_flags;
++
++	return IRQ_HANDLED;
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
+new file mode 100644
+index 0000000..2dc09db
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
+@@ -0,0 +1,699 @@
++/*
++ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
++ * Copyright 2005 Stephane Marchesin
++ *
++ * The Weather Channel (TM) funded Tungsten Graphics to develop the
++ * initial release of the Radeon 8500 driver under the XFree86 license.
++ * This notice must be preserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ *    Keith Whitwell <keith at tungstengraphics.com>
++ */
++
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sarea.h"
++#include "nouveau_drv.h"
++
++static struct mem_block *
++split_block(struct mem_block *p, uint64_t start, uint64_t size,
++	    struct drm_file *file_priv)
++{
++	/* Maybe cut off the start of an existing block */
++	if (start > p->start) {
++		struct mem_block *newblock =
++			kmalloc(sizeof(*newblock), GFP_KERNEL);
++		if (!newblock)
++			goto out;
++		newblock->start = start;
++		newblock->size = p->size - (start - p->start);
++		newblock->file_priv = NULL;
++		newblock->next = p->next;
++		newblock->prev = p;
++		p->next->prev = newblock;
++		p->next = newblock;
++		p->size -= newblock->size;
++		p = newblock;
++	}
++
++	/* Maybe cut off the end of an existing block */
++	if (size < p->size) {
++		struct mem_block *newblock =
++			kmalloc(sizeof(*newblock), GFP_KERNEL);
++		if (!newblock)
++			goto out;
++		newblock->start = start + size;
++		newblock->size = p->size - size;
++		newblock->file_priv = NULL;
++		newblock->next = p->next;
++		newblock->prev = p;
++		p->next->prev = newblock;
++		p->next = newblock;
++		p->size = size;
++	}
++
++out:
++	/* Our block is in the middle */
++	p->file_priv = file_priv;
++	return p;
++}
++
++struct mem_block *
++nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
++			int align2, struct drm_file *file_priv, int tail)
++{
++	struct mem_block *p;
++	uint64_t mask = (1 << align2) - 1;
++
++	if (!heap)
++		return NULL;
++
++	if (tail) {
++		list_for_each_prev(p, heap) {
++			uint64_t start = ((p->start + p->size) - size) & ~mask;
++
++			if (p->file_priv == NULL && start >= p->start &&
++			    start + size <= p->start + p->size)
++				return split_block(p, start, size, file_priv);
++		}
++	} else {
++		list_for_each(p, heap) {
++			uint64_t start = (p->start + mask) & ~mask;
++
++			if (p->file_priv == NULL &&
++			    start + size <= p->start + p->size)
++				return split_block(p, start, size, file_priv);
++		}
++	}
++
++	return NULL;
++}
++
++void nouveau_mem_free_block(struct mem_block *p)
++{
++	p->file_priv = NULL;
++
++	/* Assumes a single contiguous range.  Needs a special file_priv in
++	 * 'heap' to stop it being subsumed.
++	 */
++	if (p->next->file_priv == NULL) {
++		struct mem_block *q = p->next;
++		p->size += q->size;
++		p->next = q->next;
++		p->next->prev = p;
++		kfree(q);
++	}
++
++	if (p->prev->file_priv == NULL) {
++		struct mem_block *q = p->prev;
++		q->size += p->size;
++		q->next = p->next;
++		q->next->prev = q;
++		kfree(p);
++	}
++}
++
++/* Initialize.  How to check for an uninitialized heap?
++ */
++int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
++			  uint64_t size)
++{
++	struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
++
++	if (!blocks)
++		return -ENOMEM;
++
++	*heap = kmalloc(sizeof(**heap), GFP_KERNEL);
++	if (!*heap) {
++		kfree(blocks);
++		return -ENOMEM;
++	}
++
++	blocks->start = start;
++	blocks->size = size;
++	blocks->file_priv = NULL;
++	blocks->next = blocks->prev = *heap;
++
++	memset(*heap, 0, sizeof(**heap));
++	(*heap)->file_priv = (struct drm_file *) -1;
++	(*heap)->next = (*heap)->prev = blocks;
++	return 0;
++}
++
++/*
++ * Free all blocks associated with the releasing file_priv
++ */
++void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
++{
++	struct mem_block *p;
++
++	if (!heap || !heap->next)
++		return;
++
++	list_for_each(p, heap) {
++		if (p->file_priv == file_priv)
++			p->file_priv = NULL;
++	}
++
++	/* Assumes a single contiguous range.  Needs a special file_priv in
++	 * 'heap' to stop it being subsumed.
++	 */
++	list_for_each(p, heap) {
++		while ((p->file_priv == NULL) &&
++					(p->next->file_priv == NULL) &&
++					(p->next != heap)) {
++			struct mem_block *q = p->next;
++			p->size += q->size;
++			p->next = q->next;
++			p->next->prev = p;
++			kfree(q);
++		}
++	}
++}
++
++/*
++ * NV10-NV40 tiling helpers
++ */
++
++static void
++nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
++			   uint32_t size, uint32_t pitch)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
++
++	tile->addr = addr;
++	tile->size = size;
++	tile->used = !!pitch;
++	nouveau_fence_unref((void **)&tile->fence);
++
++	if (!pfifo->cache_flush(dev))
++		return;
++
++	pfifo->reassign(dev, false);
++	pfifo->cache_flush(dev);
++	pfifo->cache_pull(dev, false);
++
++	nouveau_wait_for_idle(dev);
++
++	pgraph->set_region_tiling(dev, i, addr, size, pitch);
++	pfb->set_region_tiling(dev, i, addr, size, pitch);
++
++	pfifo->cache_pull(dev, true);
++	pfifo->reassign(dev, true);
++}
++
++struct nouveau_tile_reg *
++nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
++		    uint32_t pitch)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
++	struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
++	int i;
++
++	spin_lock(&dev_priv->tile.lock);
++
++	for (i = 0; i < pfb->num_tiles; i++) {
++		if (tile[i].used)
++			/* Tile region in use. */
++			continue;
++
++		if (tile[i].fence &&
++		    !nouveau_fence_signalled(tile[i].fence, NULL))
++			/* Pending tile region. */
++			continue;
++
++		if (max(tile[i].addr, addr) <
++		    min(tile[i].addr + tile[i].size, addr + size))
++			/* Kill an intersecting tile region. */
++			nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
++
++		if (pitch && !found) {
++			/* Free tile region. */
++			nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
++			found = &tile[i];
++		}
++	}
++
++	spin_unlock(&dev_priv->tile.lock);
++
++	return found;
++}
++
++void
++nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
++		       struct nouveau_fence *fence)
++{
++	if (fence) {
++		/* Mark it as pending. */
++		tile->fence = fence;
++		nouveau_fence_ref(fence);
++	}
++
++	tile->used = false;
++}
++
++/*
++ * NV50 VM helpers
++ */
++int
++nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
++			uint32_t flags, uint64_t phys)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *pgt;
++	unsigned block;
++	int i;
++
++	virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
++	size = (size >> 16) << 1;
++
++	phys |= ((uint64_t)flags << 32);
++	phys |= 1;
++	if (dev_priv->vram_sys_base) {
++		phys += dev_priv->vram_sys_base;
++		phys |= 0x30;
++	}
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	while (size) {
++		unsigned offset_h = upper_32_bits(phys);
++		unsigned offset_l = lower_32_bits(phys);
++		unsigned pte, end;
++
++		for (i = 7; i >= 0; i--) {
++			block = 1 << (i + 1);
++			if (size >= block && !(virt & (block - 1)))
++				break;
++		}
++		offset_l |= (i << 7);
++
++		phys += block << 15;
++		size -= block;
++
++		while (block) {
++			pgt = dev_priv->vm_vram_pt[virt >> 14];
++			pte = virt & 0x3ffe;
++
++			end = pte + block;
++			if (end > 16384)
++				end = 16384;
++			block -= (end - pte);
++			virt  += (end - pte);
++
++			while (pte < end) {
++				nv_wo32(dev, pgt, pte++, offset_l);
++				nv_wo32(dev, pgt, pte++, offset_h);
++			}
++		}
++	}
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nv_wr32(dev, 0x100c80, 0x00050001);
++	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
++		NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
++		NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
++		return -EBUSY;
++	}
++
++	nv_wr32(dev, 0x100c80, 0x00000001);
++	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
++		NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
++		NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
++		return -EBUSY;
++	}
++
++	return 0;
++}
++
++void
++nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *pgt;
++	unsigned pages, pte, end;
++
++	virt -= dev_priv->vm_vram_base;
++	pages = (size >> 16) << 1;
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	while (pages) {
++		pgt = dev_priv->vm_vram_pt[virt >> 29];
++		pte = (virt & 0x1ffe0000ULL) >> 15;
++
++		end = pte + pages;
++		if (end > 16384)
++			end = 16384;
++		pages -= (end - pte);
++		virt  += (end - pte) << 15;
++
++		while (pte < end)
++			nv_wo32(dev, pgt, pte++, 0);
++	}
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nv_wr32(dev, 0x100c80, 0x00050001);
++	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
++		NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
++		NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
++		return;
++	}
++
++	nv_wr32(dev, 0x100c80, 0x00000001);
++	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
++		NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
++		NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
++	}
++}
++
++/*
++ * Cleanup everything
++ */
++void nouveau_mem_takedown(struct mem_block **heap)
++{
++	struct mem_block *p;
++
++	if (!*heap)
++		return;
++
++	for (p = (*heap)->next; p != *heap;) {
++		struct mem_block *q = p;
++		p = p->next;
++		kfree(q);
++	}
++
++	kfree(*heap);
++	*heap = NULL;
++}
++
++void nouveau_mem_close(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	nouveau_bo_unpin(dev_priv->vga_ram);
++	nouveau_bo_ref(NULL, &dev_priv->vga_ram);
++
++	ttm_bo_device_release(&dev_priv->ttm.bdev);
++
++	nouveau_ttm_global_release(dev_priv);
++
++	if (drm_core_has_AGP(dev) && dev->agp &&
++	    drm_core_check_feature(dev, DRIVER_MODESET)) {
++		struct drm_agp_mem *entry, *tempe;
++
++		/* Remove AGP resources, but leave dev->agp
++		   intact until drv_cleanup is called. */
++		list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
++			if (entry->bound)
++				drm_unbind_agp(entry->memory);
++			drm_free_agp(entry->memory, entry->pages);
++			kfree(entry);
++		}
++		INIT_LIST_HEAD(&dev->agp->memory);
++
++		if (dev->agp->acquired)
++			drm_agp_release(dev);
++
++		dev->agp->acquired = 0;
++		dev->agp->enabled = 0;
++	}
++
++	if (dev_priv->fb_mtrr) {
++		drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
++			     drm_get_resource_len(dev, 1), DRM_MTRR_WC);
++		dev_priv->fb_mtrr = 0;
++	}
++}
++
++/*XXX won't work on BSD because of pci_read_config_dword */
++static uint32_t
++nouveau_mem_fb_amount_igp(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct pci_dev *bridge;
++	uint32_t mem;
++
++	bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
++	if (!bridge) {
++		NV_ERROR(dev, "no bridge device\n");
++		return 0;
++	}
++
++	if (dev_priv->flags&NV_NFORCE) {
++		pci_read_config_dword(bridge, 0x7C, &mem);
++		return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
++	} else
++	if (dev_priv->flags&NV_NFORCE2) {
++		pci_read_config_dword(bridge, 0x84, &mem);
++		return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
++	}
++
++	NV_ERROR(dev, "impossible!\n");
++	return 0;
++}
++
++/* returns the amount of FB ram in bytes */
++uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t boot0;
++
++	switch (dev_priv->card_type) {
++	case NV_04:
++		boot0 = nv_rd32(dev, NV03_BOOT_0);
++		if (boot0 & 0x00000100)
++			return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
++
++		switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
++		case NV04_BOOT_0_RAM_AMOUNT_32MB:
++			return 32 * 1024 * 1024;
++		case NV04_BOOT_0_RAM_AMOUNT_16MB:
++			return 16 * 1024 * 1024;
++		case NV04_BOOT_0_RAM_AMOUNT_8MB:
++			return 8 * 1024 * 1024;
++		case NV04_BOOT_0_RAM_AMOUNT_4MB:
++			return 4 * 1024 * 1024;
++		}
++		break;
++	case NV_10:
++	case NV_20:
++	case NV_30:
++	case NV_40:
++	case NV_50:
++	default:
++		if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
++			return nouveau_mem_fb_amount_igp(dev);
++		} else {
++			uint64_t mem;
++			mem = (nv_rd32(dev, NV04_FIFO_DATA) &
++					NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
++					NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
++			return mem * 1024 * 1024;
++		}
++		break;
++	}
++
++	NV_ERROR(dev,
++		"Unable to detect video ram size. Please report your setup to "
++							DRIVER_EMAIL "\n");
++	return 0;
++}
++
++#if __OS_HAS_AGP
++static void nouveau_mem_reset_agp(struct drm_device *dev)
++{
++	uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
++
++	saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
++	saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19);
++
++	/* clear busmaster bit */
++	nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
++	/* clear SBA and AGP bits */
++	nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
++
++	/* power cycle pgraph, if enabled */
++	pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
++	if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
++		nv_wr32(dev, NV03_PMC_ENABLE,
++				pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
++		nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
++				NV_PMC_ENABLE_PGRAPH);
++	}
++
++	/* and restore (gives effect of resetting AGP) */
++	nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
++	nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
++}
++#endif
++
++int
++nouveau_mem_init_agp(struct drm_device *dev)
++{
++#if __OS_HAS_AGP
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_agp_info info;
++	struct drm_agp_mode mode;
++	int ret;
++
++	if (nouveau_noagp)
++		return 0;
++
++	nouveau_mem_reset_agp(dev);
++
++	if (!dev->agp->acquired) {
++		ret = drm_agp_acquire(dev);
++		if (ret) {
++			NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
++			return ret;
++		}
++	}
++
++	ret = drm_agp_info(dev, &info);
++	if (ret) {
++		NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
++		return ret;
++	}
++
++	/* see agp.h for the AGPSTAT_* modes available */
++	mode.mode = info.mode;
++	ret = drm_agp_enable(dev, mode);
++	if (ret) {
++		NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
++		return ret;
++	}
++
++	dev_priv->gart_info.type	= NOUVEAU_GART_AGP;
++	dev_priv->gart_info.aper_base	= info.aperture_base;
++	dev_priv->gart_info.aper_size	= info.aperture_size;
++#endif
++	return 0;
++}
++
++int
++nouveau_mem_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
++	int ret, dma_bits = 32;
++
++	dev_priv->fb_phys = drm_get_resource_start(dev, 1);
++	dev_priv->gart_info.type = NOUVEAU_GART_NONE;
++
++	if (dev_priv->card_type >= NV_50 &&
++	    pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
++		dma_bits = 40;
++
++	ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
++	if (ret) {
++		NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
++		return ret;
++	}
++
++	ret = nouveau_ttm_global_init(dev_priv);
++	if (ret)
++		return ret;
++
++	ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
++				 dev_priv->ttm.bo_global_ref.ref.object,
++				 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
++				 dma_bits <= 32 ? true : false);
++	if (ret) {
++		NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
++		return ret;
++	}
++
++	INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
++	spin_lock_init(&dev_priv->ttm.bo_list_lock);
++	spin_lock_init(&dev_priv->tile.lock);
++
++	dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
++
++	dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
++	if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
++		dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
++	dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
++
++	NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20));
++
++	/* remove reserved space at end of vram from available amount */
++	dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
++	dev_priv->fb_aper_free = dev_priv->fb_available_size;
++
++	/* mappable vram */
++	ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
++			     dev_priv->fb_available_size >> PAGE_SHIFT);
++	if (ret) {
++		NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
++		return ret;
++	}
++
++	ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
++			     0, 0, true, true, &dev_priv->vga_ram);
++	if (ret == 0)
++		ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
++	if (ret) {
++		NV_WARN(dev, "failed to reserve VGA memory\n");
++		nouveau_bo_ref(NULL, &dev_priv->vga_ram);
++	}
++
++	/* GART */
++#if !defined(__powerpc__) && !defined(__ia64__)
++	if (drm_device_is_agp(dev) && dev->agp) {
++		ret = nouveau_mem_init_agp(dev);
++		if (ret)
++			NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
++	}
++#endif
++
++	if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
++		ret = nouveau_sgdma_init(dev);
++		if (ret) {
++			NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
++			return ret;
++		}
++	}
++
++	NV_INFO(dev, "%d MiB GART (aperture)\n",
++		(int)(dev_priv->gart_info.aper_size >> 20));
++	dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
++
++	ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
++			     dev_priv->gart_info.aper_size >> PAGE_SHIFT);
++	if (ret) {
++		NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
++		return ret;
++	}
++
++	dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
++					 drm_get_resource_len(dev, 1),
++					 DRM_MTRR_WC);
++
++	return 0;
++}
++
++
+diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
+new file mode 100644
+index 0000000..d99dc08
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
+@@ -0,0 +1,203 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++int
++nouveau_notifier_init_channel(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct nouveau_bo *ntfy = NULL;
++	uint32_t flags;
++	int ret;
++
++	if (nouveau_vram_notify)
++		flags = TTM_PL_FLAG_VRAM;
++	else
++		flags = TTM_PL_FLAG_TT;
++
++	ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
++			      0, 0x0000, false, true, &ntfy);
++	if (ret)
++		return ret;
++
++	ret = nouveau_bo_pin(ntfy, flags);
++	if (ret)
++		goto out_err;
++
++	ret = nouveau_bo_map(ntfy);
++	if (ret)
++		goto out_err;
++
++	ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size);
++	if (ret)
++		goto out_err;
++
++	chan->notifier_bo = ntfy;
++out_err:
++	if (ret) {
++		mutex_lock(&dev->struct_mutex);
++		drm_gem_object_unreference(ntfy->gem);
++		mutex_unlock(&dev->struct_mutex);
++	}
++
++	return ret;
++}
++
++void
++nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++
++	if (!chan->notifier_bo)
++		return;
++
++	nouveau_bo_unmap(chan->notifier_bo);
++	mutex_lock(&dev->struct_mutex);
++	nouveau_bo_unpin(chan->notifier_bo);
++	drm_gem_object_unreference(chan->notifier_bo->gem);
++	mutex_unlock(&dev->struct_mutex);
++	nouveau_mem_takedown(&chan->notifier_heap);
++}
++
++static void
++nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
++			     struct nouveau_gpuobj *gpuobj)
++{
++	NV_DEBUG(dev, "\n");
++
++	if (gpuobj->priv)
++		nouveau_mem_free_block(gpuobj->priv);
++}
++
++int
++nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
++		       int size, uint32_t *b_offset)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *nobj = NULL;
++	struct mem_block *mem;
++	uint32_t offset;
++	int target, ret;
++
++	if (!chan->notifier_heap) {
++		NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n",
++			 chan->id);
++		return -EINVAL;
++	}
++
++	mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0,
++				      (struct drm_file *)-2, 0);
++	if (!mem) {
++		NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
++		return -ENOMEM;
++	}
++
++	offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
++	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
++		target = NV_DMA_TARGET_VIDMEM;
++	} else
++	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
++		if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
++		    dev_priv->card_type < NV_50) {
++			ret = nouveau_sgdma_get_page(dev, offset, &offset);
++			if (ret)
++				return ret;
++			target = NV_DMA_TARGET_PCI;
++		} else {
++			target = NV_DMA_TARGET_AGP;
++			if (dev_priv->card_type >= NV_50)
++				offset += dev_priv->vm_gart_base;
++		}
++	} else {
++		NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
++			 chan->notifier_bo->bo.mem.mem_type);
++		return -EINVAL;
++	}
++	offset += mem->start;
++
++	ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
++				     mem->size, NV_DMA_ACCESS_RW, target,
++				     &nobj);
++	if (ret) {
++		nouveau_mem_free_block(mem);
++		NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
++		return ret;
++	}
++	nobj->dtor   = nouveau_notifier_gpuobj_dtor;
++	nobj->priv   = mem;
++
++	ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
++	if (ret) {
++		nouveau_gpuobj_del(dev, &nobj);
++		nouveau_mem_free_block(mem);
++		NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
++		return ret;
++	}
++
++	*b_offset = mem->start;
++	return 0;
++}
++
++int
++nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset)
++{
++	if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor)
++		return -EINVAL;
++
++	if (poffset) {
++		struct mem_block *mem = nobj->priv;
++
++		if (*poffset >= mem->size)
++			return false;
++
++		*poffset += mem->start;
++	}
++
++	return 0;
++}
++
++int
++nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
++			     struct drm_file *file_priv)
++{
++	struct drm_nouveau_notifierobj_alloc *na = data;
++	struct nouveau_channel *chan;
++	int ret;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
++
++	ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
++	if (ret)
++		return ret;
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
+new file mode 100644
+index 0000000..e7c100b
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_object.c
+@@ -0,0 +1,1295 @@
++/*
++ * Copyright (C) 2006 Ben Skeggs.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++/*
++ * Authors:
++ *   Ben Skeggs <darktama at iinet.net.au>
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++/* NVidia uses context objects to drive drawing operations.
++
++   Context objects can be selected into 8 subchannels in the FIFO,
++   and then used via DMA command buffers.
++
++   A context object is referenced by a user defined handle (CARD32). The HW
++   looks up graphics objects in a hash table in the instance RAM.
++
++   An entry in the hash table consists of 2 CARD32. The first CARD32 contains
++   the handle, the second one a bitfield, that contains the address of the
++   object in instance RAM.
++
++   The format of the second CARD32 seems to be:
++
++   NV4 to NV30:
++
++   15: 0  instance_addr >> 4
++   17:16  engine (here uses 1 = graphics)
++   28:24  channel id (here uses 0)
++   31	  valid (use 1)
++
++   NV40:
++
++   15: 0  instance_addr >> 4   (maybe 19-0)
++   21:20  engine (here uses 1 = graphics)
++   I'm unsure about the other bits, but using 0 seems to work.
++
++   The key into the hash table depends on the object handle and channel id and
++   is given as:
++*/
++static uint32_t
++nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t hash = 0;
++	int i;
++
++	NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
++
++	for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
++		hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
++		handle >>= dev_priv->ramht_bits;
++	}
++
++	if (dev_priv->card_type < NV_50)
++		hash ^= channel << (dev_priv->ramht_bits - 4);
++	hash <<= 3;
++
++	NV_DEBUG(dev, "hash=0x%08x\n", hash);
++	return hash;
++}
++
++static int
++nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
++			  uint32_t offset)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
++
++	if (dev_priv->card_type < NV_40)
++		return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
++	return (ctx != 0);
++}
++
++static int
++nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
++	struct nouveau_channel *chan = ref->channel;
++	struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
++	uint32_t ctx, co, ho;
++
++	if (!ramht) {
++		NV_ERROR(dev, "No hash table!\n");
++		return -EINVAL;
++	}
++
++	if (dev_priv->card_type < NV_40) {
++		ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
++		      (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
++		      (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
++	} else
++	if (dev_priv->card_type < NV_50) {
++		ctx = (ref->instance >> 4) |
++		      (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
++		      (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
++	} else {
++		if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
++			ctx = (ref->instance << 10) | 2;
++		} else {
++			ctx = (ref->instance >> 4) |
++			      ((ref->gpuobj->engine <<
++				NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
++		}
++	}
++
++	instmem->prepare_access(dev, true);
++	co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
++	do {
++		if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
++			NV_DEBUG(dev,
++				 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
++				 chan->id, co, ref->handle, ctx);
++			nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
++			nv_wo32(dev, ramht, (co + 4)/4, ctx);
++
++			list_add_tail(&ref->list, &chan->ramht_refs);
++			instmem->finish_access(dev);
++			return 0;
++		}
++		NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
++			 chan->id, co, nv_ro32(dev, ramht, co/4));
++
++		co += 8;
++		if (co >= dev_priv->ramht_size)
++			co = 0;
++	} while (co != ho);
++	instmem->finish_access(dev);
++
++	NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
++	return -ENOMEM;
++}
++
++static void
++nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
++	struct nouveau_channel *chan = ref->channel;
++	struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
++	uint32_t co, ho;
++
++	if (!ramht) {
++		NV_ERROR(dev, "No hash table!\n");
++		return;
++	}
++
++	instmem->prepare_access(dev, true);
++	co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
++	do {
++		if (nouveau_ramht_entry_valid(dev, ramht, co) &&
++		    (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
++			NV_DEBUG(dev,
++				 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
++				 chan->id, co, ref->handle,
++				 nv_ro32(dev, ramht, (co + 4)));
++			nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
++			nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
++
++			list_del(&ref->list);
++			instmem->finish_access(dev);
++			return;
++		}
++
++		co += 8;
++		if (co >= dev_priv->ramht_size)
++			co = 0;
++	} while (co != ho);
++	list_del(&ref->list);
++	instmem->finish_access(dev);
++
++	NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
++		 chan->id, ref->handle);
++}
++
++int
++nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
++		   uint32_t size, int align, uint32_t flags,
++		   struct nouveau_gpuobj **gpuobj_ret)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_engine *engine = &dev_priv->engine;
++	struct nouveau_gpuobj *gpuobj;
++	struct mem_block *pramin = NULL;
++	int ret;
++
++	NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
++		 chan ? chan->id : -1, size, align, flags);
++
++	if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
++		return -EINVAL;
++
++	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
++	if (!gpuobj)
++		return -ENOMEM;
++	NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
++	gpuobj->flags = flags;
++	gpuobj->im_channel = chan;
++
++	list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
++
++	/* Choose between global instmem heap, and per-channel private
++	 * instmem heap.  On <NV50 allow requests for private instmem
++	 * to be satisfied from global heap if no per-channel area
++	 * available.
++	 */
++	if (chan) {
++		if (chan->ramin_heap) {
++			NV_DEBUG(dev, "private heap\n");
++			pramin = chan->ramin_heap;
++		} else
++		if (dev_priv->card_type < NV_50) {
++			NV_DEBUG(dev, "global heap fallback\n");
++			pramin = dev_priv->ramin_heap;
++		}
++	} else {
++		NV_DEBUG(dev, "global heap\n");
++		pramin = dev_priv->ramin_heap;
++	}
++
++	if (!pramin) {
++		NV_ERROR(dev, "No PRAMIN heap!\n");
++		return -EINVAL;
++	}
++
++	if (!chan) {
++		ret = engine->instmem.populate(dev, gpuobj, &size);
++		if (ret) {
++			nouveau_gpuobj_del(dev, &gpuobj);
++			return ret;
++		}
++	}
++
++	/* Allocate a chunk of the PRAMIN aperture */
++	gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
++						    drm_order(align),
++						    (struct drm_file *)-2, 0);
++	if (!gpuobj->im_pramin) {
++		nouveau_gpuobj_del(dev, &gpuobj);
++		return -ENOMEM;
++	}
++
++	if (!chan) {
++		ret = engine->instmem.bind(dev, gpuobj);
++		if (ret) {
++			nouveau_gpuobj_del(dev, &gpuobj);
++			return ret;
++		}
++	}
++
++	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
++		int i;
++
++		engine->instmem.prepare_access(dev, true);
++		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
++			nv_wo32(dev, gpuobj, i/4, 0);
++		engine->instmem.finish_access(dev);
++	}
++
++	*gpuobj_ret = gpuobj;
++	return 0;
++}
++
++int
++nouveau_gpuobj_early_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	NV_DEBUG(dev, "\n");
++
++	INIT_LIST_HEAD(&dev_priv->gpuobj_list);
++
++	return 0;
++}
++
++int
++nouveau_gpuobj_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int ret;
++
++	NV_DEBUG(dev, "\n");
++
++	if (dev_priv->card_type < NV_50) {
++		ret = nouveau_gpuobj_new_fake(dev,
++			dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
++			NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
++						&dev_priv->ramht, NULL);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
++void
++nouveau_gpuobj_takedown(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	NV_DEBUG(dev, "\n");
++
++	nouveau_gpuobj_del(dev, &dev_priv->ramht);
++}
++
++void
++nouveau_gpuobj_late_takedown(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *gpuobj = NULL;
++	struct list_head *entry, *tmp;
++
++	NV_DEBUG(dev, "\n");
++
++	list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
++		gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
++
++		NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
++			 gpuobj, gpuobj->refcount);
++		gpuobj->refcount = 0;
++		nouveau_gpuobj_del(dev, &gpuobj);
++	}
++}
++
++int
++nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_engine *engine = &dev_priv->engine;
++	struct nouveau_gpuobj *gpuobj;
++	int i;
++
++	NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
++
++	if (!dev_priv || !pgpuobj || !(*pgpuobj))
++		return -EINVAL;
++	gpuobj = *pgpuobj;
++
++	if (gpuobj->refcount != 0) {
++		NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
++		return -EINVAL;
++	}
++
++	if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
++		engine->instmem.prepare_access(dev, true);
++		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
++			nv_wo32(dev, gpuobj, i/4, 0);
++		engine->instmem.finish_access(dev);
++	}
++
++	if (gpuobj->dtor)
++		gpuobj->dtor(dev, gpuobj);
++
++	if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
++		engine->instmem.clear(dev, gpuobj);
++
++	if (gpuobj->im_pramin) {
++		if (gpuobj->flags & NVOBJ_FLAG_FAKE)
++			kfree(gpuobj->im_pramin);
++		else
++			nouveau_mem_free_block(gpuobj->im_pramin);
++	}
++
++	list_del(&gpuobj->list);
++
++	*pgpuobj = NULL;
++	kfree(gpuobj);
++	return 0;
++}
++
++static int
++nouveau_gpuobj_instance_get(struct drm_device *dev,
++			    struct nouveau_channel *chan,
++			    struct nouveau_gpuobj *gpuobj, uint32_t *inst)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *cpramin;
++
++	/* <NV50 use PRAMIN address everywhere */
++	if (dev_priv->card_type < NV_50) {
++		*inst = gpuobj->im_pramin->start;
++		return 0;
++	}
++
++	if (chan && gpuobj->im_channel != chan) {
++		NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
++			 gpuobj->im_channel->id, chan->id);
++		return -EINVAL;
++	}
++
++	/* NV50 channel-local instance */
++	if (chan) {
++		cpramin = chan->ramin->gpuobj;
++		*inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
++		return 0;
++	}
++
++	/* NV50 global (VRAM) instance */
++	if (!gpuobj->im_channel) {
++		/* ...from global heap */
++		if (!gpuobj->im_backing) {
++			NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
++			return -EINVAL;
++		}
++		*inst = gpuobj->im_backing_start;
++		return 0;
++	} else {
++		/* ...from local heap */
++		cpramin = gpuobj->im_channel->ramin->gpuobj;
++		*inst = cpramin->im_backing_start +
++			(gpuobj->im_pramin->start - cpramin->im_pramin->start);
++		return 0;
++	}
++
++	return -EINVAL;
++}
++
++int
++nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
++		       uint32_t handle, struct nouveau_gpuobj *gpuobj,
++		       struct nouveau_gpuobj_ref **ref_ret)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj_ref *ref;
++	uint32_t instance;
++	int ret;
++
++	NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
++		 chan ? chan->id : -1, handle, gpuobj);
++
++	if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
++		return -EINVAL;
++
++	if (!chan && !ref_ret)
++		return -EINVAL;
++
++	if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
++		/* sw object */
++		instance = 0x40;
++	} else {
++		ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
++		if (ret)
++			return ret;
++	}
++
++	ref = kzalloc(sizeof(*ref), GFP_KERNEL);
++	if (!ref)
++		return -ENOMEM;
++	INIT_LIST_HEAD(&ref->list);
++	ref->gpuobj   = gpuobj;
++	ref->channel  = chan;
++	ref->instance = instance;
++
++	if (!ref_ret) {
++		ref->handle = handle;
++
++		ret = nouveau_ramht_insert(dev, ref);
++		if (ret) {
++			kfree(ref);
++			return ret;
++		}
++	} else {
++		ref->handle = ~0;
++		*ref_ret = ref;
++	}
++
++	ref->gpuobj->refcount++;
++	return 0;
++}
++
++int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
++{
++	struct nouveau_gpuobj_ref *ref;
++
++	NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
++
++	if (!dev || !pref || *pref == NULL)
++		return -EINVAL;
++	ref = *pref;
++
++	if (ref->handle != ~0)
++		nouveau_ramht_remove(dev, ref);
++
++	if (ref->gpuobj) {
++		ref->gpuobj->refcount--;
++
++		if (ref->gpuobj->refcount == 0) {
++			if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
++				nouveau_gpuobj_del(dev, &ref->gpuobj);
++		}
++	}
++
++	*pref = NULL;
++	kfree(ref);
++	return 0;
++}
++
++int
++nouveau_gpuobj_new_ref(struct drm_device *dev,
++		       struct nouveau_channel *oc, struct nouveau_channel *rc,
++		       uint32_t handle, uint32_t size, int align,
++		       uint32_t flags, struct nouveau_gpuobj_ref **ref)
++{
++	struct nouveau_gpuobj *gpuobj = NULL;
++	int ret;
++
++	ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
++	if (ret)
++		return ret;
++
++	ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
++	if (ret) {
++		nouveau_gpuobj_del(dev, &gpuobj);
++		return ret;
++	}
++
++	return 0;
++}
++
++int
++nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
++			struct nouveau_gpuobj_ref **ref_ret)
++{
++	struct nouveau_gpuobj_ref *ref;
++	struct list_head *entry, *tmp;
++
++	list_for_each_safe(entry, tmp, &chan->ramht_refs) {
++		ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
++
++		if (ref->handle == handle) {
++			if (ref_ret)
++				*ref_ret = ref;
++			return 0;
++		}
++	}
++
++	return -EINVAL;
++}
++
++int
++nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
++			uint32_t b_offset, uint32_t size,
++			uint32_t flags, struct nouveau_gpuobj **pgpuobj,
++			struct nouveau_gpuobj_ref **pref)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *gpuobj = NULL;
++	int i;
++
++	NV_DEBUG(dev,
++		 "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
++		 p_offset, b_offset, size, flags);
++
++	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
++	if (!gpuobj)
++		return -ENOMEM;
++	NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
++	gpuobj->im_channel = NULL;
++	gpuobj->flags      = flags | NVOBJ_FLAG_FAKE;
++
++	list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
++
++	if (p_offset != ~0) {
++		gpuobj->im_pramin = kzalloc(sizeof(struct mem_block),
++					    GFP_KERNEL);
++		if (!gpuobj->im_pramin) {
++			nouveau_gpuobj_del(dev, &gpuobj);
++			return -ENOMEM;
++		}
++		gpuobj->im_pramin->start = p_offset;
++		gpuobj->im_pramin->size  = size;
++	}
++
++	if (b_offset != ~0) {
++		gpuobj->im_backing = (struct nouveau_bo *)-1;
++		gpuobj->im_backing_start = b_offset;
++	}
++
++	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
++		dev_priv->engine.instmem.prepare_access(dev, true);
++		for (i = 0; i < gpuobj->im_pramin->size; i += 4)
++			nv_wo32(dev, gpuobj, i/4, 0);
++		dev_priv->engine.instmem.finish_access(dev);
++	}
++
++	if (pref) {
++		i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
++		if (i) {
++			nouveau_gpuobj_del(dev, &gpuobj);
++			return i;
++		}
++	}
++
++	if (pgpuobj)
++		*pgpuobj = gpuobj;
++	return 0;
++}
++
++
++static uint32_t
++nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	/*XXX: dodgy hack for now */
++	if (dev_priv->card_type >= NV_50)
++		return 24;
++	if (dev_priv->card_type >= NV_40)
++		return 32;
++	return 16;
++}
++
++/*
++   DMA objects are used to reference a piece of memory in the
++   framebuffer, PCI or AGP address space. Each object is 16 bytes big
++   and looks as follows:
++
++   entry[0]
++   11:0  class (seems like I can always use 0 here)
++   12    page table present?
++   13    page entry linear?
++   15:14 access: 0 rw, 1 ro, 2 wo
++   17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
++   31:20 dma adjust (bits 0-11 of the address)
++   entry[1]
++   dma limit (size of transfer)
++   entry[X]
++   1     0 readonly, 1 readwrite
++   31:12 dma frame address of the page (bits 12-31 of the address)
++   entry[N]
++   page table terminator, same value as the first pte, as does nvidia
++   rivatv uses 0xffffffff
++
++   Non linear page tables need a list of frame addresses afterwards,
++   the rivatv project has some info on this.
++
++   The method below creates a DMA object in instance RAM and returns a handle
++   to it that can be used to set up context objects.
++*/
++int
++nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
++		       uint64_t offset, uint64_t size, int access,
++		       int target, struct nouveau_gpuobj **gpuobj)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
++	int ret;
++
++	NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
++		 chan->id, class, offset, size);
++	NV_DEBUG(dev, "access=%d target=%d\n", access, target);
++
++	switch (target) {
++	case NV_DMA_TARGET_AGP:
++		offset += dev_priv->gart_info.aper_base;
++		break;
++	default:
++		break;
++	}
++
++	ret = nouveau_gpuobj_new(dev, chan,
++				 nouveau_gpuobj_class_instmem_size(dev, class),
++				 16, NVOBJ_FLAG_ZERO_ALLOC |
++				 NVOBJ_FLAG_ZERO_FREE, gpuobj);
++	if (ret) {
++		NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
++		return ret;
++	}
++
++	instmem->prepare_access(dev, true);
++
++	if (dev_priv->card_type < NV_50) {
++		uint32_t frame, adjust, pte_flags = 0;
++
++		if (access != NV_DMA_ACCESS_RO)
++			pte_flags |= (1<<1);
++		adjust = offset &  0x00000fff;
++		frame  = offset & ~0x00000fff;
++
++		nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
++				(adjust << 20) |
++				 (access << 14) |
++				 (target << 16) |
++				  class));
++		nv_wo32(dev, *gpuobj, 1, size - 1);
++		nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
++		nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
++	} else {
++		uint64_t limit = offset + size - 1;
++		uint32_t flags0, flags5;
++
++		if (target == NV_DMA_TARGET_VIDMEM) {
++			flags0 = 0x00190000;
++			flags5 = 0x00010000;
++		} else {
++			flags0 = 0x7fc00000;
++			flags5 = 0x00080000;
++		}
++
++		nv_wo32(dev, *gpuobj, 0, flags0 | class);
++		nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
++		nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
++		nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
++					(upper_32_bits(offset) & 0xff));
++		nv_wo32(dev, *gpuobj, 5, flags5);
++	}
++
++	instmem->finish_access(dev);
++
++	(*gpuobj)->engine = NVOBJ_ENGINE_SW;
++	(*gpuobj)->class  = class;
++	return 0;
++}
++
++int
++nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
++			    uint64_t offset, uint64_t size, int access,
++			    struct nouveau_gpuobj **gpuobj,
++			    uint32_t *o_ret)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int ret;
++
++	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
++	    (dev_priv->card_type >= NV_50 &&
++	     dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
++		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++					     offset + dev_priv->vm_gart_base,
++					     size, access, NV_DMA_TARGET_AGP,
++					     gpuobj);
++		if (o_ret)
++			*o_ret = 0;
++	} else
++	if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
++		*gpuobj = dev_priv->gart_info.sg_ctxdma;
++		if (offset & ~0xffffffffULL) {
++			NV_ERROR(dev, "obj offset exceeds 32-bits\n");
++			return -EINVAL;
++		}
++		if (o_ret)
++			*o_ret = (uint32_t)offset;
++		ret = (*gpuobj != NULL) ? 0 : -EINVAL;
++	} else {
++		NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
++		return -EINVAL;
++	}
++
++	return ret;
++}
++
++/* Context objects in the instance RAM have the following structure.
++ * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
++
++   NV4 - NV30:
++
++   entry[0]
++   11:0 class
++   12   chroma key enable
++   13   user clip enable
++   14   swizzle enable
++   17:15 patch config:
++       scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
++   18   synchronize enable
++   19   endian: 1 big, 0 little
++   21:20 dither mode
++   23    single step enable
++   24    patch status: 0 invalid, 1 valid
++   25    context_surface 0: 1 valid
++   26    context surface 1: 1 valid
++   27    context pattern: 1 valid
++   28    context rop: 1 valid
++   29,30 context beta, beta4
++   entry[1]
++   7:0   mono format
++   15:8  color format
++   31:16 notify instance address
++   entry[2]
++   15:0  dma 0 instance address
++   31:16 dma 1 instance address
++   entry[3]
++   dma method traps
++
++   NV40:
++   No idea what the exact format is. Here's what can be deducted:
++
++   entry[0]:
++   11:0  class  (maybe uses more bits here?)
++   17    user clip enable
++   21:19 patch config
++   25    patch status valid ?
++   entry[1]:
++   15:0  DMA notifier  (maybe 20:0)
++   entry[2]:
++   15:0  DMA 0 instance (maybe 20:0)
++   24    big endian
++   entry[3]:
++   15:0  DMA 1 instance (maybe 20:0)
++   entry[4]:
++   entry[5]:
++   set to 0?
++*/
++int
++nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
++		      struct nouveau_gpuobj **gpuobj)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int ret;
++
++	NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
++
++	ret = nouveau_gpuobj_new(dev, chan,
++				 nouveau_gpuobj_class_instmem_size(dev, class),
++				 16,
++				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
++				 gpuobj);
++	if (ret) {
++		NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
++		return ret;
++	}
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	if (dev_priv->card_type >= NV_50) {
++		nv_wo32(dev, *gpuobj, 0, class);
++		nv_wo32(dev, *gpuobj, 5, 0x00010000);
++	} else {
++		switch (class) {
++		case NV_CLASS_NULL:
++			nv_wo32(dev, *gpuobj, 0, 0x00001030);
++			nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
++			break;
++		default:
++			if (dev_priv->card_type >= NV_40) {
++				nv_wo32(dev, *gpuobj, 0, class);
++#ifdef __BIG_ENDIAN
++				nv_wo32(dev, *gpuobj, 2, 0x01000000);
++#endif
++			} else {
++#ifdef __BIG_ENDIAN
++				nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
++#else
++				nv_wo32(dev, *gpuobj, 0, class);
++#endif
++			}
++		}
++	}
++	dev_priv->engine.instmem.finish_access(dev);
++
++	(*gpuobj)->engine = NVOBJ_ENGINE_GR;
++	(*gpuobj)->class  = class;
++	return 0;
++}
++
++int
++nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
++		      struct nouveau_gpuobj **gpuobj_ret)
++{
++	struct drm_nouveau_private *dev_priv;
++	struct nouveau_gpuobj *gpuobj;
++
++	if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
++		return -EINVAL;
++	dev_priv = chan->dev->dev_private;
++
++	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
++	if (!gpuobj)
++		return -ENOMEM;
++	gpuobj->engine = NVOBJ_ENGINE_SW;
++	gpuobj->class = class;
++
++	list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
++	*gpuobj_ret = gpuobj;
++	return 0;
++}
++
++static int
++nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *pramin = NULL;
++	uint32_t size;
++	uint32_t base;
++	int ret;
++
++	NV_DEBUG(dev, "ch%d\n", chan->id);
++
++	/* Base amount for object storage (4KiB enough?) */
++	size = 0x1000;
++	base = 0;
++
++	/* PGRAPH context */
++
++	if (dev_priv->card_type == NV_50) {
++		/* Various fixed table thingos */
++		size += 0x1400; /* mostly unknown stuff */
++		size += 0x4000; /* vm pd */
++		base  = 0x6000;
++		/* RAMHT, not sure about setting size yet, 32KiB to be safe */
++		size += 0x8000;
++		/* RAMFC */
++		size += 0x1000;
++		/* PGRAPH context */
++		size += 0x70000;
++	}
++
++	NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
++		 chan->id, size, base);
++	ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
++				     &chan->ramin);
++	if (ret) {
++		NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
++		return ret;
++	}
++	pramin = chan->ramin->gpuobj;
++
++	ret = nouveau_mem_init_heap(&chan->ramin_heap,
++				    pramin->im_pramin->start + base, size);
++	if (ret) {
++		NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
++		nouveau_gpuobj_ref_del(dev, &chan->ramin);
++		return ret;
++	}
++
++	return 0;
++}
++
++int
++nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
++			    uint32_t vram_h, uint32_t tt_h)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
++	struct nouveau_gpuobj *vram = NULL, *tt = NULL;
++	int ret, i;
++
++	INIT_LIST_HEAD(&chan->ramht_refs);
++
++	NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
++
++	/* Reserve a block of PRAMIN for the channel
++	 *XXX: maybe on <NV50 too at some point
++	 */
++	if (0 || dev_priv->card_type == NV_50) {
++		ret = nouveau_gpuobj_channel_init_pramin(chan);
++		if (ret) {
++			NV_ERROR(dev, "init pramin\n");
++			return ret;
++		}
++	}
++
++	/* NV50 VM
++	 *  - Allocate per-channel page-directory
++	 *  - Map GART and VRAM into the channel's address space at the
++	 *    locations determined during init.
++	 */
++	if (dev_priv->card_type >= NV_50) {
++		uint32_t vm_offset, pde;
++
++		instmem->prepare_access(dev, true);
++
++		vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
++		vm_offset += chan->ramin->gpuobj->im_pramin->start;
++
++		ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
++							0, &chan->vm_pd, NULL);
++		if (ret) {
++			instmem->finish_access(dev);
++			return ret;
++		}
++		for (i = 0; i < 0x4000; i += 8) {
++			nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
++			nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
++		}
++
++		pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
++		ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
++					     dev_priv->gart_info.sg_ctxdma,
++					     &chan->vm_gart_pt);
++		if (ret) {
++			instmem->finish_access(dev);
++			return ret;
++		}
++		nv_wo32(dev, chan->vm_pd, pde++,
++			    chan->vm_gart_pt->instance | 0x03);
++		nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
++
++		pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
++		for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
++			ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
++						     dev_priv->vm_vram_pt[i],
++						     &chan->vm_vram_pt[i]);
++			if (ret) {
++				instmem->finish_access(dev);
++				return ret;
++			}
++
++			nv_wo32(dev, chan->vm_pd, pde++,
++				    chan->vm_vram_pt[i]->instance | 0x61);
++			nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
++		}
++
++		instmem->finish_access(dev);
++	}
++
++	/* RAMHT */
++	if (dev_priv->card_type < NV_50) {
++		ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
++					     &chan->ramht);
++		if (ret)
++			return ret;
++	} else {
++		ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
++					     0x8000, 16,
++					     NVOBJ_FLAG_ZERO_ALLOC,
++					     &chan->ramht);
++		if (ret)
++			return ret;
++	}
++
++	/* VRAM ctxdma */
++	if (dev_priv->card_type >= NV_50) {
++		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++					     0, dev_priv->vm_end,
++					     NV_DMA_ACCESS_RW,
++					     NV_DMA_TARGET_AGP, &vram);
++		if (ret) {
++			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
++			return ret;
++		}
++	} else {
++		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
++						0, dev_priv->fb_available_size,
++						NV_DMA_ACCESS_RW,
++						NV_DMA_TARGET_VIDMEM, &vram);
++		if (ret) {
++			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
++			return ret;
++		}
++	}
++
++	ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
++	if (ret) {
++		NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
++		return ret;
++	}
++
++	/* TT memory ctxdma */
++	if (dev_priv->card_type >= NV_50) {
++		tt = vram;
++	} else
++	if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
++		ret = nouveau_gpuobj_gart_dma_new(chan, 0,
++						  dev_priv->gart_info.aper_size,
++						  NV_DMA_ACCESS_RW, &tt, NULL);
++	} else {
++		NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
++		ret = -EINVAL;
++	}
++
++	if (ret) {
++		NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
++		return ret;
++	}
++
++	ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
++	if (ret) {
++		NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
++		return ret;
++	}
++
++	return 0;
++}
++
++void
++nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
++{
++	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
++	struct drm_device *dev = chan->dev;
++	struct list_head *entry, *tmp;
++	struct nouveau_gpuobj_ref *ref;
++	int i;
++
++	NV_DEBUG(dev, "ch%d\n", chan->id);
++
++	if (!chan->ramht_refs.next)
++		return;
++
++	list_for_each_safe(entry, tmp, &chan->ramht_refs) {
++		ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
++
++		nouveau_gpuobj_ref_del(dev, &ref);
++	}
++
++	nouveau_gpuobj_ref_del(dev, &chan->ramht);
++
++	nouveau_gpuobj_del(dev, &chan->vm_pd);
++	nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
++	for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
++		nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
++
++	if (chan->ramin_heap)
++		nouveau_mem_takedown(&chan->ramin_heap);
++	if (chan->ramin)
++		nouveau_gpuobj_ref_del(dev, &chan->ramin);
++
++}
++
++int
++nouveau_gpuobj_suspend(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *gpuobj;
++	int i;
++
++	if (dev_priv->card_type < NV_50) {
++		dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
++		if (!dev_priv->susres.ramin_copy)
++			return -ENOMEM;
++
++		for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
++			dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
++		return 0;
++	}
++
++	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
++		if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
++			continue;
++
++		gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
++		if (!gpuobj->im_backing_suspend) {
++			nouveau_gpuobj_resume(dev);
++			return -ENOMEM;
++		}
++
++		dev_priv->engine.instmem.prepare_access(dev, false);
++		for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
++			gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
++		dev_priv->engine.instmem.finish_access(dev);
++	}
++
++	return 0;
++}
++
++void
++nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *gpuobj;
++
++	if (dev_priv->card_type < NV_50) {
++		vfree(dev_priv->susres.ramin_copy);
++		dev_priv->susres.ramin_copy = NULL;
++		return;
++	}
++
++	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
++		if (!gpuobj->im_backing_suspend)
++			continue;
++
++		vfree(gpuobj->im_backing_suspend);
++		gpuobj->im_backing_suspend = NULL;
++	}
++}
++
++void
++nouveau_gpuobj_resume(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *gpuobj;
++	int i;
++
++	if (dev_priv->card_type < NV_50) {
++		for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
++			nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
++		nouveau_gpuobj_suspend_cleanup(dev);
++		return;
++	}
++
++	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
++		if (!gpuobj->im_backing_suspend)
++			continue;
++
++		dev_priv->engine.instmem.prepare_access(dev, true);
++		for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
++			nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
++		dev_priv->engine.instmem.finish_access(dev);
++	}
++
++	nouveau_gpuobj_suspend_cleanup(dev);
++}
++
++int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
++			      struct drm_file *file_priv)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_nouveau_grobj_alloc *init = data;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	struct nouveau_pgraph_object_class *grc;
++	struct nouveau_gpuobj *gr = NULL;
++	struct nouveau_channel *chan;
++	int ret;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
++
++	if (init->handle == ~0)
++		return -EINVAL;
++
++	grc = pgraph->grclass;
++	while (grc->id) {
++		if (grc->id == init->class)
++			break;
++		grc++;
++	}
++
++	if (!grc->id) {
++		NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
++		return -EPERM;
++	}
++
++	if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
++		return -EEXIST;
++
++	if (!grc->software)
++		ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
++	else
++		ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
++
++	if (ret) {
++		NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
++			 ret, init->channel, init->handle);
++		return ret;
++	}
++
++	ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
++	if (ret) {
++		NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
++			 ret, init->channel, init->handle);
++		nouveau_gpuobj_del(dev, &gr);
++		return ret;
++	}
++
++	return 0;
++}
++
++int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
++			      struct drm_file *file_priv)
++{
++	struct drm_nouveau_gpuobj_free *objfree = data;
++	struct nouveau_gpuobj_ref *ref;
++	struct nouveau_channel *chan;
++	int ret;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
++
++	ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
++	if (ret)
++		return ret;
++	nouveau_gpuobj_ref_del(dev, &ref);
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
+new file mode 100644
+index 0000000..aa9b310
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
+@@ -0,0 +1,837 @@
++
++
++#define NV03_BOOT_0                                        0x00100000
++#    define NV03_BOOT_0_RAM_AMOUNT                         0x00000003
++#    define NV03_BOOT_0_RAM_AMOUNT_8MB                     0x00000000
++#    define NV03_BOOT_0_RAM_AMOUNT_2MB                     0x00000001
++#    define NV03_BOOT_0_RAM_AMOUNT_4MB                     0x00000002
++#    define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM               0x00000003
++#    define NV04_BOOT_0_RAM_AMOUNT_32MB                    0x00000000
++#    define NV04_BOOT_0_RAM_AMOUNT_4MB                     0x00000001
++#    define NV04_BOOT_0_RAM_AMOUNT_8MB                     0x00000002
++#    define NV04_BOOT_0_RAM_AMOUNT_16MB                    0x00000003
++
++#define NV04_FIFO_DATA                                     0x0010020c
++#    define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK              0xfff00000
++#    define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT             20
++
++#define NV_RAMIN                                           0x00700000
++
++#define NV_RAMHT_HANDLE_OFFSET                             0
++#define NV_RAMHT_CONTEXT_OFFSET                            4
++#    define NV_RAMHT_CONTEXT_VALID                         (1<<31)
++#    define NV_RAMHT_CONTEXT_CHANNEL_SHIFT                 24
++#    define NV_RAMHT_CONTEXT_ENGINE_SHIFT                  16
++#        define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE           0
++#        define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS           1
++#    define NV_RAMHT_CONTEXT_INSTANCE_SHIFT                0
++#    define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT               23
++#    define NV40_RAMHT_CONTEXT_ENGINE_SHIFT                20
++#    define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT              0
++
++/* DMA object defines */
++#define NV_DMA_ACCESS_RW 0
++#define NV_DMA_ACCESS_RO 1
++#define NV_DMA_ACCESS_WO 2
++#define NV_DMA_TARGET_VIDMEM 0
++#define NV_DMA_TARGET_PCI    2
++#define NV_DMA_TARGET_AGP    3
++/* The following is not a real value used by the card, it's changed by
++ * nouveau_object_dma_create */
++#define NV_DMA_TARGET_PCI_NONLINEAR 8
++
++/* Some object classes we care about in the drm */
++#define NV_CLASS_DMA_FROM_MEMORY                           0x00000002
++#define NV_CLASS_DMA_TO_MEMORY                             0x00000003
++#define NV_CLASS_NULL                                      0x00000030
++#define NV_CLASS_DMA_IN_MEMORY                             0x0000003D
++
++#define NV03_USER(i)                             (0x00800000+(i*NV03_USER_SIZE))
++#define NV03_USER__SIZE                                                       16
++#define NV10_USER__SIZE                                                       32
++#define NV03_USER_SIZE                                                0x00010000
++#define NV03_USER_DMA_PUT(i)                     (0x00800040+(i*NV03_USER_SIZE))
++#define NV03_USER_DMA_PUT__SIZE                                               16
++#define NV10_USER_DMA_PUT__SIZE                                               32
++#define NV03_USER_DMA_GET(i)                     (0x00800044+(i*NV03_USER_SIZE))
++#define NV03_USER_DMA_GET__SIZE                                               16
++#define NV10_USER_DMA_GET__SIZE                                               32
++#define NV03_USER_REF_CNT(i)                     (0x00800048+(i*NV03_USER_SIZE))
++#define NV03_USER_REF_CNT__SIZE                                               16
++#define NV10_USER_REF_CNT__SIZE                                               32
++
++#define NV40_USER(i)                             (0x00c00000+(i*NV40_USER_SIZE))
++#define NV40_USER_SIZE                                                0x00001000
++#define NV40_USER_DMA_PUT(i)                     (0x00c00040+(i*NV40_USER_SIZE))
++#define NV40_USER_DMA_PUT__SIZE                                               32
++#define NV40_USER_DMA_GET(i)                     (0x00c00044+(i*NV40_USER_SIZE))
++#define NV40_USER_DMA_GET__SIZE                                               32
++#define NV40_USER_REF_CNT(i)                     (0x00c00048+(i*NV40_USER_SIZE))
++#define NV40_USER_REF_CNT__SIZE                                               32
++
++#define NV50_USER(i)                             (0x00c00000+(i*NV50_USER_SIZE))
++#define NV50_USER_SIZE                                                0x00002000
++#define NV50_USER_DMA_PUT(i)                     (0x00c00040+(i*NV50_USER_SIZE))
++#define NV50_USER_DMA_PUT__SIZE                                              128
++#define NV50_USER_DMA_GET(i)                     (0x00c00044+(i*NV50_USER_SIZE))
++#define NV50_USER_DMA_GET__SIZE                                              128
++#define NV50_USER_REF_CNT(i)                     (0x00c00048+(i*NV50_USER_SIZE))
++#define NV50_USER_REF_CNT__SIZE                                              128
++
++#define NV03_FIFO_SIZE                                     0x8000UL
++
++#define NV03_PMC_BOOT_0                                    0x00000000
++#define NV03_PMC_BOOT_1                                    0x00000004
++#define NV03_PMC_INTR_0                                    0x00000100
++#    define NV_PMC_INTR_0_PFIFO_PENDING                        (1<<8)
++#    define NV_PMC_INTR_0_PGRAPH_PENDING                      (1<<12)
++#    define NV_PMC_INTR_0_NV50_I2C_PENDING                    (1<<21)
++#    define NV_PMC_INTR_0_CRTC0_PENDING                       (1<<24)
++#    define NV_PMC_INTR_0_CRTC1_PENDING                       (1<<25)
++#    define NV_PMC_INTR_0_NV50_DISPLAY_PENDING                (1<<26)
++#    define NV_PMC_INTR_0_CRTCn_PENDING                       (3<<24)
++#define NV03_PMC_INTR_EN_0                                 0x00000140
++#    define NV_PMC_INTR_EN_0_MASTER_ENABLE                     (1<<0)
++#define NV03_PMC_ENABLE                                    0x00000200
++#    define NV_PMC_ENABLE_PFIFO                                (1<<8)
++#    define NV_PMC_ENABLE_PGRAPH                              (1<<12)
++/* Disabling the below bit breaks newer (G7X only?) mobile chipsets,
++ * the card will hang early on in the X init process.
++ */
++#    define NV_PMC_ENABLE_UNK13                               (1<<13)
++#define NV40_PMC_GRAPH_UNITS				   0x00001540
++#define NV40_PMC_BACKLIGHT				   0x000015f0
++#	define NV40_PMC_BACKLIGHT_MASK			   0x001f0000
++#define NV40_PMC_1700                                      0x00001700
++#define NV40_PMC_1704                                      0x00001704
++#define NV40_PMC_1708                                      0x00001708
++#define NV40_PMC_170C                                      0x0000170C
++
++/* probably PMC ? */
++#define NV50_PUNK_BAR0_PRAMIN                              0x00001700
++#define NV50_PUNK_BAR_CFG_BASE                             0x00001704
++#define NV50_PUNK_BAR_CFG_BASE_VALID                          (1<<30)
++#define NV50_PUNK_BAR1_CTXDMA                              0x00001708
++#define NV50_PUNK_BAR1_CTXDMA_VALID                           (1<<31)
++#define NV50_PUNK_BAR3_CTXDMA                              0x0000170C
++#define NV50_PUNK_BAR3_CTXDMA_VALID                           (1<<31)
++#define NV50_PUNK_UNK1710                                  0x00001710
++
++#define NV04_PBUS_PCI_NV_1                                 0x00001804
++#define NV04_PBUS_PCI_NV_19                                0x0000184C
++#define NV04_PBUS_PCI_NV_20				0x00001850
++#	define NV04_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED		(0 << 0)
++#	define NV04_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED		(1 << 0)
++
++#define NV04_PTIMER_INTR_0                                 0x00009100
++#define NV04_PTIMER_INTR_EN_0                              0x00009140
++#define NV04_PTIMER_NUMERATOR                              0x00009200
++#define NV04_PTIMER_DENOMINATOR                            0x00009210
++#define NV04_PTIMER_TIME_0                                 0x00009400
++#define NV04_PTIMER_TIME_1                                 0x00009410
++#define NV04_PTIMER_ALARM_0                                0x00009420
++
++#define NV04_PFB_CFG0                                      0x00100200
++#define NV04_PFB_CFG1                                      0x00100204
++#define NV40_PFB_020C                                      0x0010020C
++#define NV10_PFB_TILE(i)                                   (0x00100240 + (i*16))
++#define NV10_PFB_TILE__SIZE                                8
++#define NV10_PFB_TLIMIT(i)                                 (0x00100244 + (i*16))
++#define NV10_PFB_TSIZE(i)                                  (0x00100248 + (i*16))
++#define NV10_PFB_TSTATUS(i)                                (0x0010024C + (i*16))
++#define NV10_PFB_CLOSE_PAGE2                               0x0010033C
++#define NV40_PFB_TILE(i)                                   (0x00100600 + (i*16))
++#define NV40_PFB_TILE__SIZE_0                              12
++#define NV40_PFB_TILE__SIZE_1                              15
++#define NV40_PFB_TLIMIT(i)                                 (0x00100604 + (i*16))
++#define NV40_PFB_TSIZE(i)                                  (0x00100608 + (i*16))
++#define NV40_PFB_TSTATUS(i)                                (0x0010060C + (i*16))
++#define NV40_PFB_UNK_800					0x00100800
++
++#define NV04_PGRAPH_DEBUG_0                                0x00400080
++#define NV04_PGRAPH_DEBUG_1                                0x00400084
++#define NV04_PGRAPH_DEBUG_2                                0x00400088
++#define NV04_PGRAPH_DEBUG_3                                0x0040008c
++#define NV10_PGRAPH_DEBUG_4                                0x00400090
++#define NV03_PGRAPH_INTR                                   0x00400100
++#define NV03_PGRAPH_NSTATUS                                0x00400104
++#    define NV04_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<11)
++#    define NV04_PGRAPH_NSTATUS_INVALID_STATE                 (1<<12)
++#    define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<13)
++#    define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<14)
++#    define NV10_PGRAPH_NSTATUS_STATE_IN_USE                  (1<<23)
++#    define NV10_PGRAPH_NSTATUS_INVALID_STATE                 (1<<24)
++#    define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT                  (1<<25)
++#    define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT              (1<<26)
++#define NV03_PGRAPH_NSOURCE                                0x00400108
++#    define NV03_PGRAPH_NSOURCE_NOTIFICATION                   (1<<0)
++#    define NV03_PGRAPH_NSOURCE_DATA_ERROR                     (1<<1)
++#    define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR               (1<<2)
++#    define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION                (1<<3)
++#    define NV03_PGRAPH_NSOURCE_LIMIT_COLOR                    (1<<4)
++#    define NV03_PGRAPH_NSOURCE_LIMIT_ZETA                     (1<<5)
++#    define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD                   (1<<6)
++#    define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION               (1<<7)
++#    define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION               (1<<8)
++#    define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION               (1<<9)
++#    define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION               (1<<10)
++#    define NV03_PGRAPH_NSOURCE_STATE_INVALID                 (1<<11)
++#    define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY                 (1<<12)
++#    define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE                 (1<<13)
++#    define NV03_PGRAPH_NSOURCE_METHOD_CNT                    (1<<14)
++#    define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION              (1<<15)
++#    define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION            (1<<16)
++#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A                   (1<<17)
++#    define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B                   (1<<18)
++#define NV03_PGRAPH_INTR_EN                                0x00400140
++#define NV40_PGRAPH_INTR_EN                                0x0040013C
++#    define NV_PGRAPH_INTR_NOTIFY                              (1<<0)
++#    define NV_PGRAPH_INTR_MISSING_HW                          (1<<4)
++#    define NV_PGRAPH_INTR_CONTEXT_SWITCH                     (1<<12)
++#    define NV_PGRAPH_INTR_BUFFER_NOTIFY                      (1<<16)
++#    define NV_PGRAPH_INTR_ERROR                              (1<<20)
++#define NV10_PGRAPH_CTX_CONTROL                            0x00400144
++#define NV10_PGRAPH_CTX_USER                               0x00400148
++#define NV10_PGRAPH_CTX_SWITCH1                            0x0040014C
++#define NV10_PGRAPH_CTX_SWITCH2                            0x00400150
++#define NV10_PGRAPH_CTX_SWITCH3                            0x00400154
++#define NV10_PGRAPH_CTX_SWITCH4                            0x00400158
++#define NV10_PGRAPH_CTX_SWITCH5                            0x0040015C
++#define NV04_PGRAPH_CTX_SWITCH1                            0x00400160
++#define NV10_PGRAPH_CTX_CACHE1                             0x00400160
++#define NV04_PGRAPH_CTX_SWITCH2                            0x00400164
++#define NV04_PGRAPH_CTX_SWITCH3                            0x00400168
++#define NV04_PGRAPH_CTX_SWITCH4                            0x0040016C
++#define NV04_PGRAPH_CTX_CONTROL                            0x00400170
++#define NV04_PGRAPH_CTX_USER                               0x00400174
++#define NV04_PGRAPH_CTX_CACHE1                             0x00400180
++#define NV10_PGRAPH_CTX_CACHE2                             0x00400180
++#define NV03_PGRAPH_CTX_CONTROL                            0x00400190
++#define NV03_PGRAPH_CTX_USER                               0x00400194
++#define NV04_PGRAPH_CTX_CACHE2                             0x004001A0
++#define NV10_PGRAPH_CTX_CACHE3                             0x004001A0
++#define NV04_PGRAPH_CTX_CACHE3                             0x004001C0
++#define NV10_PGRAPH_CTX_CACHE4                             0x004001C0
++#define NV04_PGRAPH_CTX_CACHE4                             0x004001E0
++#define NV10_PGRAPH_CTX_CACHE5                             0x004001E0
++#define NV40_PGRAPH_CTXCTL_0304                            0x00400304
++#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX                   0x00000001
++#define NV40_PGRAPH_CTXCTL_UCODE_STAT                      0x00400308
++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK              0xff000000
++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT                     24
++#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK              0x00ffffff
++#define NV40_PGRAPH_CTXCTL_0310                            0x00400310
++#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE                  0x00000020
++#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD                  0x00000040
++#define NV40_PGRAPH_CTXCTL_030C                            0x0040030c
++#define NV40_PGRAPH_CTXCTL_UCODE_INDEX                     0x00400324
++#define NV40_PGRAPH_CTXCTL_UCODE_DATA                      0x00400328
++#define NV40_PGRAPH_CTXCTL_CUR                             0x0040032c
++#define NV40_PGRAPH_CTXCTL_CUR_LOADED                      0x01000000
++#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE                    0x000FFFFF
++#define NV40_PGRAPH_CTXCTL_NEXT                            0x00400330
++#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE                   0x000fffff
++#define NV50_PGRAPH_CTXCTL_CUR                             0x0040032c
++#define NV50_PGRAPH_CTXCTL_CUR_LOADED                      0x80000000
++#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE                    0x00ffffff
++#define NV50_PGRAPH_CTXCTL_NEXT                            0x00400330
++#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE                   0x00ffffff
++#define NV03_PGRAPH_ABS_X_RAM                              0x00400400
++#define NV03_PGRAPH_ABS_Y_RAM                              0x00400480
++#define NV03_PGRAPH_X_MISC                                 0x00400500
++#define NV03_PGRAPH_Y_MISC                                 0x00400504
++#define NV04_PGRAPH_VALID1                                 0x00400508
++#define NV04_PGRAPH_SOURCE_COLOR                           0x0040050C
++#define NV04_PGRAPH_MISC24_0                               0x00400510
++#define NV03_PGRAPH_XY_LOGIC_MISC0                         0x00400514
++#define NV03_PGRAPH_XY_LOGIC_MISC1                         0x00400518
++#define NV03_PGRAPH_XY_LOGIC_MISC2                         0x0040051C
++#define NV03_PGRAPH_XY_LOGIC_MISC3                         0x00400520
++#define NV03_PGRAPH_CLIPX_0                                0x00400524
++#define NV03_PGRAPH_CLIPX_1                                0x00400528
++#define NV03_PGRAPH_CLIPY_0                                0x0040052C
++#define NV03_PGRAPH_CLIPY_1                                0x00400530
++#define NV03_PGRAPH_ABS_ICLIP_XMAX                         0x00400534
++#define NV03_PGRAPH_ABS_ICLIP_YMAX                         0x00400538
++#define NV03_PGRAPH_ABS_UCLIP_XMIN                         0x0040053C
++#define NV03_PGRAPH_ABS_UCLIP_YMIN                         0x00400540
++#define NV03_PGRAPH_ABS_UCLIP_XMAX                         0x00400544
++#define NV03_PGRAPH_ABS_UCLIP_YMAX                         0x00400548
++#define NV03_PGRAPH_ABS_UCLIPA_XMIN                        0x00400560
++#define NV03_PGRAPH_ABS_UCLIPA_YMIN                        0x00400564
++#define NV03_PGRAPH_ABS_UCLIPA_XMAX                        0x00400568
++#define NV03_PGRAPH_ABS_UCLIPA_YMAX                        0x0040056C
++#define NV04_PGRAPH_MISC24_1                               0x00400570
++#define NV04_PGRAPH_MISC24_2                               0x00400574
++#define NV04_PGRAPH_VALID2                                 0x00400578
++#define NV04_PGRAPH_PASSTHRU_0                             0x0040057C
++#define NV04_PGRAPH_PASSTHRU_1                             0x00400580
++#define NV04_PGRAPH_PASSTHRU_2                             0x00400584
++#define NV10_PGRAPH_DIMX_TEXTURE                           0x00400588
++#define NV10_PGRAPH_WDIMX_TEXTURE                          0x0040058C
++#define NV04_PGRAPH_COMBINE_0_ALPHA                        0x00400590
++#define NV04_PGRAPH_COMBINE_0_COLOR                        0x00400594
++#define NV04_PGRAPH_COMBINE_1_ALPHA                        0x00400598
++#define NV04_PGRAPH_COMBINE_1_COLOR                        0x0040059C
++#define NV04_PGRAPH_FORMAT_0                               0x004005A8
++#define NV04_PGRAPH_FORMAT_1                               0x004005AC
++#define NV04_PGRAPH_FILTER_0                               0x004005B0
++#define NV04_PGRAPH_FILTER_1                               0x004005B4
++#define NV03_PGRAPH_MONO_COLOR0                            0x00400600
++#define NV04_PGRAPH_ROP3                                   0x00400604
++#define NV04_PGRAPH_BETA_AND                               0x00400608
++#define NV04_PGRAPH_BETA_PREMULT                           0x0040060C
++#define NV04_PGRAPH_LIMIT_VIOL_PIX                         0x00400610
++#define NV04_PGRAPH_FORMATS                                0x00400618
++#define NV10_PGRAPH_DEBUG_2                                0x00400620
++#define NV04_PGRAPH_BOFFSET0                               0x00400640
++#define NV04_PGRAPH_BOFFSET1                               0x00400644
++#define NV04_PGRAPH_BOFFSET2                               0x00400648
++#define NV04_PGRAPH_BOFFSET3                               0x0040064C
++#define NV04_PGRAPH_BOFFSET4                               0x00400650
++#define NV04_PGRAPH_BOFFSET5                               0x00400654
++#define NV04_PGRAPH_BBASE0                                 0x00400658
++#define NV04_PGRAPH_BBASE1                                 0x0040065C
++#define NV04_PGRAPH_BBASE2                                 0x00400660
++#define NV04_PGRAPH_BBASE3                                 0x00400664
++#define NV04_PGRAPH_BBASE4                                 0x00400668
++#define NV04_PGRAPH_BBASE5                                 0x0040066C
++#define NV04_PGRAPH_BPITCH0                                0x00400670
++#define NV04_PGRAPH_BPITCH1                                0x00400674
++#define NV04_PGRAPH_BPITCH2                                0x00400678
++#define NV04_PGRAPH_BPITCH3                                0x0040067C
++#define NV04_PGRAPH_BPITCH4                                0x00400680
++#define NV04_PGRAPH_BLIMIT0                                0x00400684
++#define NV04_PGRAPH_BLIMIT1                                0x00400688
++#define NV04_PGRAPH_BLIMIT2                                0x0040068C
++#define NV04_PGRAPH_BLIMIT3                                0x00400690
++#define NV04_PGRAPH_BLIMIT4                                0x00400694
++#define NV04_PGRAPH_BLIMIT5                                0x00400698
++#define NV04_PGRAPH_BSWIZZLE2                              0x0040069C
++#define NV04_PGRAPH_BSWIZZLE5                              0x004006A0
++#define NV03_PGRAPH_STATUS                                 0x004006B0
++#define NV04_PGRAPH_STATUS                                 0x00400700
++#define NV04_PGRAPH_TRAPPED_ADDR                           0x00400704
++#define NV04_PGRAPH_TRAPPED_DATA                           0x00400708
++#define NV04_PGRAPH_SURFACE                                0x0040070C
++#define NV10_PGRAPH_TRAPPED_DATA_HIGH                      0x0040070C
++#define NV04_PGRAPH_STATE                                  0x00400710
++#define NV10_PGRAPH_SURFACE                                0x00400710
++#define NV04_PGRAPH_NOTIFY                                 0x00400714
++#define NV10_PGRAPH_STATE                                  0x00400714
++#define NV10_PGRAPH_NOTIFY                                 0x00400718
++
++#define NV04_PGRAPH_FIFO                                   0x00400720
++
++#define NV04_PGRAPH_BPIXEL                                 0x00400724
++#define NV10_PGRAPH_RDI_INDEX                              0x00400750
++#define NV04_PGRAPH_FFINTFC_ST2                            0x00400754
++#define NV10_PGRAPH_RDI_DATA                               0x00400754
++#define NV04_PGRAPH_DMA_PITCH                              0x00400760
++#define NV10_PGRAPH_FFINTFC_ST2                            0x00400764
++#define NV04_PGRAPH_DVD_COLORFMT                           0x00400764
++#define NV04_PGRAPH_SCALED_FORMAT                          0x00400768
++#define NV10_PGRAPH_DMA_PITCH                              0x00400770
++#define NV10_PGRAPH_DVD_COLORFMT                           0x00400774
++#define NV10_PGRAPH_SCALED_FORMAT                          0x00400778
++#define NV20_PGRAPH_CHANNEL_CTX_TABLE                      0x00400780
++#define NV20_PGRAPH_CHANNEL_CTX_POINTER                    0x00400784
++#define NV20_PGRAPH_CHANNEL_CTX_XFER                       0x00400788
++#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD                  0x00000001
++#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE                  0x00000002
++#define NV04_PGRAPH_PATT_COLOR0                            0x00400800
++#define NV04_PGRAPH_PATT_COLOR1                            0x00400804
++#define NV04_PGRAPH_PATTERN                                0x00400808
++#define NV04_PGRAPH_PATTERN_SHAPE                          0x00400810
++#define NV04_PGRAPH_CHROMA                                 0x00400814
++#define NV04_PGRAPH_CONTROL0                               0x00400818
++#define NV04_PGRAPH_CONTROL1                               0x0040081C
++#define NV04_PGRAPH_CONTROL2                               0x00400820
++#define NV04_PGRAPH_BLEND                                  0x00400824
++#define NV04_PGRAPH_STORED_FMT                             0x00400830
++#define NV04_PGRAPH_PATT_COLORRAM                          0x00400900
++#define NV20_PGRAPH_TILE(i)                                (0x00400900 + (i*16))
++#define NV20_PGRAPH_TLIMIT(i)                              (0x00400904 + (i*16))
++#define NV20_PGRAPH_TSIZE(i)                               (0x00400908 + (i*16))
++#define NV20_PGRAPH_TSTATUS(i)                             (0x0040090C + (i*16))
++#define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
++#define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
++#define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
++#define NV10_PGRAPH_TSTATUS(i)                             (0x00400B0C + (i*16))
++#define NV04_PGRAPH_U_RAM                                  0x00400D00
++#define NV47_PGRAPH_TILE(i)                                (0x00400D00 + (i*16))
++#define NV47_PGRAPH_TLIMIT(i)                              (0x00400D04 + (i*16))
++#define NV47_PGRAPH_TSIZE(i)                               (0x00400D08 + (i*16))
++#define NV47_PGRAPH_TSTATUS(i)                             (0x00400D0C + (i*16))
++#define NV04_PGRAPH_V_RAM                                  0x00400D40
++#define NV04_PGRAPH_W_RAM                                  0x00400D80
++#define NV10_PGRAPH_COMBINER0_IN_ALPHA                     0x00400E40
++#define NV10_PGRAPH_COMBINER1_IN_ALPHA                     0x00400E44
++#define NV10_PGRAPH_COMBINER0_IN_RGB                       0x00400E48
++#define NV10_PGRAPH_COMBINER1_IN_RGB                       0x00400E4C
++#define NV10_PGRAPH_COMBINER_COLOR0                        0x00400E50
++#define NV10_PGRAPH_COMBINER_COLOR1                        0x00400E54
++#define NV10_PGRAPH_COMBINER0_OUT_ALPHA                    0x00400E58
++#define NV10_PGRAPH_COMBINER1_OUT_ALPHA                    0x00400E5C
++#define NV10_PGRAPH_COMBINER0_OUT_RGB                      0x00400E60
++#define NV10_PGRAPH_COMBINER1_OUT_RGB                      0x00400E64
++#define NV10_PGRAPH_COMBINER_FINAL0                        0x00400E68
++#define NV10_PGRAPH_COMBINER_FINAL1                        0x00400E6C
++#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL                  0x00400F00
++#define NV10_PGRAPH_WINDOWCLIP_VERTICAL                    0x00400F20
++#define NV10_PGRAPH_XFMODE0                                0x00400F40
++#define NV10_PGRAPH_XFMODE1                                0x00400F44
++#define NV10_PGRAPH_GLOBALSTATE0                           0x00400F48
++#define NV10_PGRAPH_GLOBALSTATE1                           0x00400F4C
++#define NV10_PGRAPH_PIPE_ADDRESS                           0x00400F50
++#define NV10_PGRAPH_PIPE_DATA                              0x00400F54
++#define NV04_PGRAPH_DMA_START_0                            0x00401000
++#define NV04_PGRAPH_DMA_START_1                            0x00401004
++#define NV04_PGRAPH_DMA_LENGTH                             0x00401008
++#define NV04_PGRAPH_DMA_MISC                               0x0040100C
++#define NV04_PGRAPH_DMA_DATA_0                             0x00401020
++#define NV04_PGRAPH_DMA_DATA_1                             0x00401024
++#define NV04_PGRAPH_DMA_RM                                 0x00401030
++#define NV04_PGRAPH_DMA_A_XLATE_INST                       0x00401040
++#define NV04_PGRAPH_DMA_A_CONTROL                          0x00401044
++#define NV04_PGRAPH_DMA_A_LIMIT                            0x00401048
++#define NV04_PGRAPH_DMA_A_TLB_PTE                          0x0040104C
++#define NV04_PGRAPH_DMA_A_TLB_TAG                          0x00401050
++#define NV04_PGRAPH_DMA_A_ADJ_OFFSET                       0x00401054
++#define NV04_PGRAPH_DMA_A_OFFSET                           0x00401058
++#define NV04_PGRAPH_DMA_A_SIZE                             0x0040105C
++#define NV04_PGRAPH_DMA_A_Y_SIZE                           0x00401060
++#define NV04_PGRAPH_DMA_B_XLATE_INST                       0x00401080
++#define NV04_PGRAPH_DMA_B_CONTROL                          0x00401084
++#define NV04_PGRAPH_DMA_B_LIMIT                            0x00401088
++#define NV04_PGRAPH_DMA_B_TLB_PTE                          0x0040108C
++#define NV04_PGRAPH_DMA_B_TLB_TAG                          0x00401090
++#define NV04_PGRAPH_DMA_B_ADJ_OFFSET                       0x00401094
++#define NV04_PGRAPH_DMA_B_OFFSET                           0x00401098
++#define NV04_PGRAPH_DMA_B_SIZE                             0x0040109C
++#define NV04_PGRAPH_DMA_B_Y_SIZE                           0x004010A0
++#define NV40_PGRAPH_TILE1(i)                               (0x00406900 + (i*16))
++#define NV40_PGRAPH_TLIMIT1(i)                             (0x00406904 + (i*16))
++#define NV40_PGRAPH_TSIZE1(i)                              (0x00406908 + (i*16))
++#define NV40_PGRAPH_TSTATUS1(i)                            (0x0040690C + (i*16))
++
++
++/* It's a guess that this works on NV03. Confirmed on NV04, though */
++#define NV04_PFIFO_DELAY_0                                 0x00002040
++#define NV04_PFIFO_DMA_TIMESLICE                           0x00002044
++#define NV04_PFIFO_NEXT_CHANNEL                            0x00002050
++#define NV03_PFIFO_INTR_0                                  0x00002100
++#define NV03_PFIFO_INTR_EN_0                               0x00002140
++#    define NV_PFIFO_INTR_CACHE_ERROR                          (1<<0)
++#    define NV_PFIFO_INTR_RUNOUT                               (1<<4)
++#    define NV_PFIFO_INTR_RUNOUT_OVERFLOW                      (1<<8)
++#    define NV_PFIFO_INTR_DMA_PUSHER                          (1<<12)
++#    define NV_PFIFO_INTR_DMA_PT                              (1<<16)
++#    define NV_PFIFO_INTR_SEMAPHORE                           (1<<20)
++#    define NV_PFIFO_INTR_ACQUIRE_TIMEOUT                     (1<<24)
++#define NV03_PFIFO_RAMHT                                   0x00002210
++#define NV03_PFIFO_RAMFC                                   0x00002214
++#define NV03_PFIFO_RAMRO                                   0x00002218
++#define NV40_PFIFO_RAMFC                                   0x00002220
++#define NV03_PFIFO_CACHES                                  0x00002500
++#define NV04_PFIFO_MODE                                    0x00002504
++#define NV04_PFIFO_DMA                                     0x00002508
++#define NV04_PFIFO_SIZE                                    0x0000250c
++#define NV50_PFIFO_CTX_TABLE(c)                        (0x2600+(c)*4)
++#define NV50_PFIFO_CTX_TABLE__SIZE                                128
++#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED                  (1<<31)
++#define NV50_PFIFO_CTX_TABLE_UNK30_BAD                        (1<<30)
++#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80             0x0FFFFFFF
++#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84             0x00FFFFFF
++#define NV03_PFIFO_CACHE0_PUSH0                            0x00003000
++#define NV03_PFIFO_CACHE0_PULL0                            0x00003040
++#define NV04_PFIFO_CACHE0_PULL0                            0x00003050
++#define NV04_PFIFO_CACHE0_PULL1                            0x00003054
++#define NV03_PFIFO_CACHE1_PUSH0                            0x00003200
++#define NV03_PFIFO_CACHE1_PUSH1                            0x00003204
++#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8)
++#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16)
++#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f
++#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f
++#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f
++#define NV03_PFIFO_CACHE1_PUT                              0x00003210
++#define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220
++#define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES        0x00000008
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES        0x00000010
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES        0x00000018
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES        0x00000020
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES        0x00000028
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES        0x00000030
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES        0x00000038
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES        0x00000040
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES        0x00000048
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES        0x00000050
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES        0x00000058
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES       0x00000060
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES       0x00000068
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES       0x00000070
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES       0x00000078
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES       0x00000080
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES       0x00000088
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES       0x00000090
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES       0x00000098
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES       0x000000A0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES       0x000000A8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES       0x000000B0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES       0x000000B8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES       0x000000C0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES       0x000000C8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES       0x000000D0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES       0x000000D8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES       0x000000E0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES       0x000000E8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES       0x000000F0
++#    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES       0x000000F8
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE                 0x0000E000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES        0x00000000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES        0x00002000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES        0x00004000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES       0x00006000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES       0x00008000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES       0x0000A000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES       0x0000C000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES       0x0000E000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS             0x001F0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0           0x00000000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1           0x00010000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2           0x00020000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3           0x00030000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4           0x00040000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5           0x00050000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6           0x00060000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7           0x00070000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8           0x00080000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9           0x00090000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10          0x000A0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11          0x000B0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12          0x000C0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13          0x000D0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14          0x000E0000
++#    define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15          0x000F0000
++#    define NV_PFIFO_CACHE1_ENDIAN                         0x80000000
++#    define NV_PFIFO_CACHE1_LITTLE_ENDIAN                  0x7FFFFFFF
++#    define NV_PFIFO_CACHE1_BIG_ENDIAN                     0x80000000
++#define NV04_PFIFO_CACHE1_DMA_STATE                        0x00003228
++#define NV04_PFIFO_CACHE1_DMA_INSTANCE                     0x0000322c
++#define NV04_PFIFO_CACHE1_DMA_CTL                          0x00003230
++#define NV04_PFIFO_CACHE1_DMA_PUT                          0x00003240
++#define NV04_PFIFO_CACHE1_DMA_GET                          0x00003244
++#define NV10_PFIFO_CACHE1_REF_CNT                          0x00003248
++#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE                   0x0000324C
++#define NV03_PFIFO_CACHE1_PULL0                            0x00003240
++#define NV04_PFIFO_CACHE1_PULL0                            0x00003250
++#define NV03_PFIFO_CACHE1_PULL1                            0x00003250
++#define NV04_PFIFO_CACHE1_PULL1                            0x00003254
++#define NV04_PFIFO_CACHE1_HASH                             0x00003258
++#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT                  0x00003260
++#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP                0x00003264
++#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE                    0x00003268
++#define NV10_PFIFO_CACHE1_SEMAPHORE                        0x0000326C
++#define NV03_PFIFO_CACHE1_GET                              0x00003270
++#define NV04_PFIFO_CACHE1_ENGINE                           0x00003280
++#define NV04_PFIFO_CACHE1_DMA_DCOUNT                       0x000032A0
++#define NV40_PFIFO_GRCTX_INSTANCE                          0x000032E0
++#define NV40_PFIFO_UNK32E4                                 0x000032E4
++#define NV04_PFIFO_CACHE1_METHOD(i)                (0x00003800+(i*8))
++#define NV04_PFIFO_CACHE1_DATA(i)                  (0x00003804+(i*8))
++#define NV40_PFIFO_CACHE1_METHOD(i)                (0x00090000+(i*8))
++#define NV40_PFIFO_CACHE1_DATA(i)                  (0x00090004+(i*8))
++
++#define NV_CRTC0_INTSTAT                                   0x00600100
++#define NV_CRTC0_INTEN                                     0x00600140
++#define NV_CRTC1_INTSTAT                                   0x00602100
++#define NV_CRTC1_INTEN                                     0x00602140
++#    define NV_CRTC_INTR_VBLANK                                (1<<0)
++
++#define NV04_PRAMIN						0x00700000
++
++/* Fifo commands. These are not regs, neither masks */
++#define NV03_FIFO_CMD_JUMP                                 0x20000000
++#define NV03_FIFO_CMD_JUMP_OFFSET_MASK                     0x1ffffffc
++#define NV03_FIFO_CMD_REWIND                               (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK))
++
++/* This is a partial import from rules-ng, a few things may be duplicated.
++ * Eventually we should completely import everything from rules-ng.
++ * For the moment check rules-ng for docs.
++  */
++
++#define NV50_PMC                                            0x00000000
++#define NV50_PMC__LEN                                              0x1
++#define NV50_PMC__ESIZE                                         0x2000
++#    define NV50_PMC_BOOT_0                                 0x00000000
++#        define NV50_PMC_BOOT_0_REVISION                    0x000000ff
++#        define NV50_PMC_BOOT_0_REVISION__SHIFT                      0
++#        define NV50_PMC_BOOT_0_ARCH                        0x0ff00000
++#        define NV50_PMC_BOOT_0_ARCH__SHIFT                         20
++#    define NV50_PMC_INTR_0                                 0x00000100
++#        define NV50_PMC_INTR_0_PFIFO                           (1<<8)
++#        define NV50_PMC_INTR_0_PGRAPH                         (1<<12)
++#        define NV50_PMC_INTR_0_PTIMER                         (1<<20)
++#        define NV50_PMC_INTR_0_HOTPLUG                        (1<<21)
++#        define NV50_PMC_INTR_0_DISPLAY                        (1<<26)
++#    define NV50_PMC_INTR_EN_0                              0x00000140
++#        define NV50_PMC_INTR_EN_0_MASTER                       (1<<0)
++#            define NV50_PMC_INTR_EN_0_MASTER_DISABLED          (0<<0)
++#            define NV50_PMC_INTR_EN_0_MASTER_ENABLED           (1<<0)
++#    define NV50_PMC_ENABLE                                 0x00000200
++#        define NV50_PMC_ENABLE_PFIFO                           (1<<8)
++#        define NV50_PMC_ENABLE_PGRAPH                         (1<<12)
++
++#define NV50_PCONNECTOR                                     0x0000e000
++#define NV50_PCONNECTOR__LEN                                       0x1
++#define NV50_PCONNECTOR__ESIZE                                  0x1000
++#    define NV50_PCONNECTOR_HOTPLUG_INTR                    0x0000e050
++#        define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C0          (1<<0)
++#        define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C1          (1<<1)
++#        define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C2          (1<<2)
++#        define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C3          (1<<3)
++#        define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C0       (1<<16)
++#        define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C1       (1<<17)
++#        define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C2       (1<<18)
++#        define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C3       (1<<19)
++#    define NV50_PCONNECTOR_HOTPLUG_CTRL                    0x0000e054
++#        define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C0          (1<<0)
++#        define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C1          (1<<1)
++#        define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C2          (1<<2)
++#        define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C3          (1<<3)
++#        define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C0       (1<<16)
++#        define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C1       (1<<17)
++#        define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C2       (1<<18)
++#        define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C3       (1<<19)
++#    define NV50_PCONNECTOR_HOTPLUG_STATE                   0x0000e104
++#        define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C0 (1<<2)
++#        define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C1 (1<<6)
++#        define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C2 (1<<10)
++#        define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C3 (1<<14)
++#    define NV50_PCONNECTOR_I2C_PORT_0                      0x0000e138
++#    define NV50_PCONNECTOR_I2C_PORT_1                      0x0000e150
++#    define NV50_PCONNECTOR_I2C_PORT_2                      0x0000e168
++#    define NV50_PCONNECTOR_I2C_PORT_3                      0x0000e180
++#    define NV50_PCONNECTOR_I2C_PORT_4                      0x0000e240
++#    define NV50_PCONNECTOR_I2C_PORT_5                      0x0000e258
++
++#define NV50_AUXCH_DATA_OUT(i,n)             ((n) * 4 + (i) * 0x50 + 0x0000e4c0)
++#define NV50_AUXCH_DATA_OUT__SIZE                                             4
++#define NV50_AUXCH_DATA_IN(i,n)              ((n) * 4 + (i) * 0x50 + 0x0000e4d0)
++#define NV50_AUXCH_DATA_IN__SIZE                                              4
++#define NV50_AUXCH_ADDR(i)                             ((i) * 0x50 + 0x0000e4e0)
++#define NV50_AUXCH_CTRL(i)                             ((i) * 0x50 + 0x0000e4e4)
++#define NV50_AUXCH_CTRL_LINKSTAT                                     0x01000000
++#define NV50_AUXCH_CTRL_LINKSTAT_NOT_READY                           0x00000000
++#define NV50_AUXCH_CTRL_LINKSTAT_READY                               0x01000000
++#define NV50_AUXCH_CTRL_LINKEN                                       0x00100000
++#define NV50_AUXCH_CTRL_LINKEN_DISABLED                              0x00000000
++#define NV50_AUXCH_CTRL_LINKEN_ENABLED                               0x00100000
++#define NV50_AUXCH_CTRL_EXEC                                         0x00010000
++#define NV50_AUXCH_CTRL_EXEC_COMPLETE                                0x00000000
++#define NV50_AUXCH_CTRL_EXEC_IN_PROCESS                              0x00010000
++#define NV50_AUXCH_CTRL_CMD                                          0x0000f000
++#define NV50_AUXCH_CTRL_CMD_SHIFT                                            12
++#define NV50_AUXCH_CTRL_LEN                                          0x0000000f
++#define NV50_AUXCH_CTRL_LEN_SHIFT                                             0
++#define NV50_AUXCH_STAT(i)                             ((i) * 0x50 + 0x0000e4e8)
++#define NV50_AUXCH_STAT_STATE                                        0x10000000
++#define NV50_AUXCH_STAT_STATE_NOT_READY                              0x00000000
++#define NV50_AUXCH_STAT_STATE_READY                                  0x10000000
++#define NV50_AUXCH_STAT_REPLY                                        0x000f0000
++#define NV50_AUXCH_STAT_REPLY_AUX                                    0x00030000
++#define NV50_AUXCH_STAT_REPLY_AUX_ACK                                0x00000000
++#define NV50_AUXCH_STAT_REPLY_AUX_NACK                               0x00010000
++#define NV50_AUXCH_STAT_REPLY_AUX_DEFER                              0x00020000
++#define NV50_AUXCH_STAT_REPLY_I2C                                    0x000c0000
++#define NV50_AUXCH_STAT_REPLY_I2C_ACK                                0x00000000
++#define NV50_AUXCH_STAT_REPLY_I2C_NACK                               0x00040000
++#define NV50_AUXCH_STAT_REPLY_I2C_DEFER                              0x00080000
++#define NV50_AUXCH_STAT_COUNT                                        0x0000001f
++
++#define NV50_PBUS                                           0x00088000
++#define NV50_PBUS__LEN                                             0x1
++#define NV50_PBUS__ESIZE                                        0x1000
++#    define NV50_PBUS_PCI_ID                                0x00088000
++#        define NV50_PBUS_PCI_ID_VENDOR_ID                  0x0000ffff
++#        define NV50_PBUS_PCI_ID_VENDOR_ID__SHIFT                    0
++#        define NV50_PBUS_PCI_ID_DEVICE_ID                  0xffff0000
++#        define NV50_PBUS_PCI_ID_DEVICE_ID__SHIFT                   16
++
++#define NV50_PFB                                            0x00100000
++#define NV50_PFB__LEN                                              0x1
++#define NV50_PFB__ESIZE                                         0x1000
++
++#define NV50_PEXTDEV                                        0x00101000
++#define NV50_PEXTDEV__LEN                                          0x1
++#define NV50_PEXTDEV__ESIZE                                     0x1000
++
++#define NV50_PROM                                           0x00300000
++#define NV50_PROM__LEN                                             0x1
++#define NV50_PROM__ESIZE                                       0x10000
++
++#define NV50_PGRAPH                                         0x00400000
++#define NV50_PGRAPH__LEN                                           0x1
++#define NV50_PGRAPH__ESIZE                                     0x10000
++
++#define NV50_PDISPLAY                                                0x00610000
++#define NV50_PDISPLAY_OBJECTS                                        0x00610010
++#define NV50_PDISPLAY_INTR_0                                         0x00610020
++#define NV50_PDISPLAY_INTR_1                                         0x00610024
++#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC                             0x0000000c
++#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_SHIFT                                2
++#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(n)                   (1 << ((n) + 2))
++#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0                           0x00000004
++#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1                           0x00000008
++#define NV50_PDISPLAY_INTR_1_CLK_UNK10                               0x00000010
++#define NV50_PDISPLAY_INTR_1_CLK_UNK20                               0x00000020
++#define NV50_PDISPLAY_INTR_1_CLK_UNK40                               0x00000040
++#define NV50_PDISPLAY_INTR_EN                                        0x0061002c
++#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC                            0x0000000c
++#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n)                   (1 << ((n) + 2))
++#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0                          0x00000004
++#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1                          0x00000008
++#define NV50_PDISPLAY_INTR_EN_CLK_UNK10                              0x00000010
++#define NV50_PDISPLAY_INTR_EN_CLK_UNK20                              0x00000020
++#define NV50_PDISPLAY_INTR_EN_CLK_UNK40                              0x00000040
++#define NV50_PDISPLAY_UNK30_CTRL                                     0x00610030
++#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0                        0x00000200
++#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1                        0x00000400
++#define NV50_PDISPLAY_UNK30_CTRL_PENDING                             0x80000000
++#define NV50_PDISPLAY_TRAPPED_ADDR                                   0x00610080
++#define NV50_PDISPLAY_TRAPPED_DATA                                   0x00610084
++#define NV50_PDISPLAY_CHANNEL_STAT(i)                  ((i) * 0x10 + 0x00610200)
++#define NV50_PDISPLAY_CHANNEL_STAT_DMA                               0x00000010
++#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED                      0x00000000
++#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED                       0x00000010
++#define NV50_PDISPLAY_CHANNEL_DMA_CB(i)                ((i) * 0x10 + 0x00610204)
++#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION                        0x00000002
++#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM                   0x00000000
++#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM                 0x00000002
++#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID                           0x00000001
++#define NV50_PDISPLAY_CHANNEL_UNK2(i)                  ((i) * 0x10 + 0x00610208)
++#define NV50_PDISPLAY_CHANNEL_UNK3(i)                  ((i) * 0x10 + 0x0061020c)
++
++#define NV50_PDISPLAY_CURSOR                                         0x00610270
++#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)           ((i) * 0x10 + 0x00610270)
++#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON                         0x00000001
++#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS                     0x00030000
++#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE              0x00010000
++
++#define NV50_PDISPLAY_CTRL_STATE                                     0x00610300
++#define NV50_PDISPLAY_CTRL_STATE_PENDING                             0x80000000
++#define NV50_PDISPLAY_CTRL_STATE_METHOD                              0x00001ffc
++#define NV50_PDISPLAY_CTRL_STATE_ENABLE                              0x00000001
++#define NV50_PDISPLAY_CTRL_VAL                                       0x00610304
++#define NV50_PDISPLAY_UNK_380                                        0x00610380
++#define NV50_PDISPLAY_RAM_AMOUNT                                     0x00610384
++#define NV50_PDISPLAY_UNK_388                                        0x00610388
++#define NV50_PDISPLAY_UNK_38C                                        0x0061038c
++
++#define NV50_PDISPLAY_CRTC_P(i, r)        ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
++#define NV50_PDISPLAY_CRTC_C(i, r)    (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
++#define NV50_PDISPLAY_CRTC_UNK_0A18 /* mthd 0x0900 */                0x00610a18
++#define NV50_PDISPLAY_CRTC_CLUT_MODE                                 0x00610a24
++#define NV50_PDISPLAY_CRTC_INTERLACE                                 0x00610a48
++#define NV50_PDISPLAY_CRTC_SCALE_CTRL                                0x00610a50
++#define NV50_PDISPLAY_CRTC_CURSOR_CTRL                               0x00610a58
++#define NV50_PDISPLAY_CRTC_UNK0A78 /* mthd 0x0904 */                 0x00610a78
++#define NV50_PDISPLAY_CRTC_UNK0AB8                                   0x00610ab8
++#define NV50_PDISPLAY_CRTC_DEPTH                                     0x00610ac8
++#define NV50_PDISPLAY_CRTC_CLOCK                                     0x00610ad0
++#define NV50_PDISPLAY_CRTC_COLOR_CTRL                                0x00610ae0
++#define NV50_PDISPLAY_CRTC_SYNC_START_TO_BLANK_END                   0x00610ae8
++#define NV50_PDISPLAY_CRTC_MODE_UNK1                                 0x00610af0
++#define NV50_PDISPLAY_CRTC_DISPLAY_TOTAL                             0x00610af8
++#define NV50_PDISPLAY_CRTC_SYNC_DURATION                             0x00610b00
++#define NV50_PDISPLAY_CRTC_MODE_UNK2                                 0x00610b08
++#define NV50_PDISPLAY_CRTC_UNK_0B10 /* mthd 0x0828 */                0x00610b10
++#define NV50_PDISPLAY_CRTC_FB_SIZE                                   0x00610b18
++#define NV50_PDISPLAY_CRTC_FB_PITCH                                  0x00610b20
++#define NV50_PDISPLAY_CRTC_FB_PITCH_LINEAR                           0x00100000
++#define NV50_PDISPLAY_CRTC_FB_POS                                    0x00610b28
++#define NV50_PDISPLAY_CRTC_SCALE_CENTER_OFFSET                       0x00610b38
++#define NV50_PDISPLAY_CRTC_REAL_RES                                  0x00610b40
++#define NV50_PDISPLAY_CRTC_SCALE_RES1                                0x00610b48
++#define NV50_PDISPLAY_CRTC_SCALE_RES2                                0x00610b50
++
++#define NV50_PDISPLAY_DAC_MODE_CTRL_P(i)                (0x00610b58 + (i) * 0x8)
++#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i)                (0x00610b5c + (i) * 0x8)
++#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i)                (0x00610b70 + (i) * 0x8)
++#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i)                (0x00610b74 + (i) * 0x8)
++#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i)               (0x00610bdc + (i) * 0x8)
++#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i)               (0x00610be0 + (i) * 0x8)
++
++#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i)                (0x00610794 + (i) * 0x8)
++#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i)                (0x00610798 + (i) * 0x8)
++#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i)                (0x00610b58 + (i) * 0x8)
++#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i)                (0x00610b5c + (i) * 0x8)
++#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i)               (0x00610b80 + (i) * 0x8)
++#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i)               (0x00610b84 + (i) * 0x8)
++
++#define NV50_PDISPLAY_CRTC_CLK                                       0x00614000
++#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i)                 ((i) * 0x800 + 0x614100)
++#define NV50_PDISPLAY_CRTC_CLK_CTRL1_CONNECTED                       0x00000600
++#define NV50_PDISPLAY_CRTC_CLK_VPLL_A(i)                ((i) * 0x800 + 0x614104)
++#define NV50_PDISPLAY_CRTC_CLK_VPLL_B(i)                ((i) * 0x800 + 0x614108)
++#define NV50_PDISPLAY_CRTC_CLK_CTRL2(i)                 ((i) * 0x800 + 0x614200)
++
++#define NV50_PDISPLAY_DAC_CLK                                        0x00614000
++#define NV50_PDISPLAY_DAC_CLK_CTRL2(i)                  ((i) * 0x800 + 0x614280)
++
++#define NV50_PDISPLAY_SOR_CLK                                        0x00614000
++#define NV50_PDISPLAY_SOR_CLK_CTRL2(i)                  ((i) * 0x800 + 0x614300)
++
++#define NV50_PDISPLAY_VGACRTC(r)                                ((r) + 0x619400)
++
++#define NV50_PDISPLAY_DAC                                            0x0061a000
++#define NV50_PDISPLAY_DAC_DPMS_CTRL(i)                (0x0061a004 + (i) * 0x800)
++#define NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF                        0x00000001
++#define NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF                        0x00000004
++#define NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED                          0x00000010
++#define NV50_PDISPLAY_DAC_DPMS_CTRL_OFF                              0x00000040
++#define NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING                          0x80000000
++#define NV50_PDISPLAY_DAC_LOAD_CTRL(i)                (0x0061a00c + (i) * 0x800)
++#define NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE                           0x00100000
++#define NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT                          0x38000000
++#define NV50_PDISPLAY_DAC_LOAD_CTRL_DONE                             0x80000000
++#define NV50_PDISPLAY_DAC_CLK_CTRL1(i)                (0x0061a010 + (i) * 0x800)
++#define NV50_PDISPLAY_DAC_CLK_CTRL1_CONNECTED                        0x00000600
++
++#define NV50_PDISPLAY_SOR                                            0x0061c000
++#define NV50_PDISPLAY_SOR_DPMS_CTRL(i)                (0x0061c004 + (i) * 0x800)
++#define NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING                          0x80000000
++#define NV50_PDISPLAY_SOR_DPMS_CTRL_ON                               0x00000001
++#define NV50_PDISPLAY_SOR_CLK_CTRL1(i)                (0x0061c008 + (i) * 0x800)
++#define NV50_PDISPLAY_SOR_CLK_CTRL1_CONNECTED                        0x00000600
++#define NV50_PDISPLAY_SOR_DPMS_STATE(i)               (0x0061c030 + (i) * 0x800)
++#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE                          0x00030000
++#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED                         0x00080000
++#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT                            0x10000000
++#define NV50_PDISPLAY_SOR_BACKLIGHT                                  0x0061c084
++#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE                           0x80000000
++#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL                            0x00000fff
++#define NV50_SOR_DP_CTRL(i,l)            (0x0061c10c + (i) * 0x800 + (l) * 0x80)
++#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED                      0x00004000
++#define NV50_SOR_DP_CTRL_LANE_MASK                                   0x001f0000
++#define NV50_SOR_DP_CTRL_LANE_0_ENABLED                              0x00010000
++#define NV50_SOR_DP_CTRL_LANE_1_ENABLED                              0x00020000
++#define NV50_SOR_DP_CTRL_LANE_2_ENABLED                              0x00040000
++#define NV50_SOR_DP_CTRL_LANE_3_ENABLED                              0x00080000
++#define NV50_SOR_DP_CTRL_TRAINING_PATTERN                            0x0f000000
++#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED                   0x00000000
++#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1                          0x01000000
++#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2                          0x02000000
++#define NV50_SOR_DP_UNK118(i,l)          (0x0061c118 + (i) * 0x800 + (l) * 0x80)
++#define NV50_SOR_DP_UNK120(i,l)          (0x0061c120 + (i) * 0x800 + (l) * 0x80)
++#define NV50_SOR_DP_UNK130(i,l)          (0x0061c130 + (i) * 0x800 + (l) * 0x80)
++
++#define NV50_PDISPLAY_USER(i)                        ((i) * 0x1000 + 0x00640000)
++#define NV50_PDISPLAY_USER_PUT(i)                    ((i) * 0x1000 + 0x00640000)
++#define NV50_PDISPLAY_USER_GET(i)                    ((i) * 0x1000 + 0x00640004)
++
++#define NV50_PDISPLAY_CURSOR_USER                                    0x00647000
++#define NV50_PDISPLAY_CURSOR_USER_POS_CTRL(i)        ((i) * 0x1000 + 0x00647080)
++#define NV50_PDISPLAY_CURSOR_USER_POS(i)             ((i) * 0x1000 + 0x00647084)
+diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+new file mode 100644
+index 0000000..ed15905
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+@@ -0,0 +1,322 @@
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include <linux/pagemap.h>
++
++#define NV_CTXDMA_PAGE_SHIFT 12
++#define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
++#define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
++
++struct nouveau_sgdma_be {
++	struct ttm_backend backend;
++	struct drm_device *dev;
++
++	dma_addr_t *pages;
++	unsigned nr_pages;
++
++	unsigned pte_start;
++	bool bound;
++};
++
++static int
++nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
++		       struct page **pages, struct page *dummy_read_page)
++{
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++	struct drm_device *dev = nvbe->dev;
++
++	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
++
++	if (nvbe->pages)
++		return -EINVAL;
++
++	nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
++	if (!nvbe->pages)
++		return -ENOMEM;
++
++	nvbe->nr_pages = 0;
++	while (num_pages--) {
++		nvbe->pages[nvbe->nr_pages] =
++			pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
++				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++		if (pci_dma_mapping_error(dev->pdev,
++					  nvbe->pages[nvbe->nr_pages])) {
++			be->func->clear(be);
++			return -EFAULT;
++		}
++
++		nvbe->nr_pages++;
++	}
++
++	return 0;
++}
++
++static void
++nouveau_sgdma_clear(struct ttm_backend *be)
++{
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++	struct drm_device *dev;
++
++	if (nvbe && nvbe->pages) {
++		dev = nvbe->dev;
++		NV_DEBUG(dev, "\n");
++
++		if (nvbe->bound)
++			be->func->unbind(be);
++
++		while (nvbe->nr_pages--) {
++			pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
++				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++		}
++		kfree(nvbe->pages);
++		nvbe->pages = NULL;
++		nvbe->nr_pages = 0;
++	}
++}
++
++static inline unsigned
++nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
++
++	if (dev_priv->card_type < NV_50)
++		return pte + 2;
++
++	return pte << 1;
++}
++
++static int
++nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
++{
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++	struct drm_device *dev = nvbe->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
++	unsigned i, j, pte;
++
++	NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
++
++	dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
++	pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
++	nvbe->pte_start = pte;
++	for (i = 0; i < nvbe->nr_pages; i++) {
++		dma_addr_t dma_offset = nvbe->pages[i];
++		uint32_t offset_l = lower_32_bits(dma_offset);
++		uint32_t offset_h = upper_32_bits(dma_offset);
++
++		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
++			if (dev_priv->card_type < NV_50)
++				nv_wo32(dev, gpuobj, pte++, offset_l | 3);
++			else {
++				nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
++				nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
++			}
++
++			dma_offset += NV_CTXDMA_PAGE_SIZE;
++		}
++	}
++	dev_priv->engine.instmem.finish_access(nvbe->dev);
++
++	if (dev_priv->card_type == NV_50) {
++		nv_wr32(dev, 0x100c80, 0x00050001);
++		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
++			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
++			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
++						nv_rd32(dev, 0x100c80));
++			return -EBUSY;
++		}
++
++		nv_wr32(dev, 0x100c80, 0x00000001);
++		if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
++			NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
++			NV_ERROR(dev, "0x100c80 = 0x%08x\n",
++						nv_rd32(dev, 0x100c80));
++			return -EBUSY;
++		}
++	}
++
++	nvbe->bound = true;
++	return 0;
++}
++
++static int
++nouveau_sgdma_unbind(struct ttm_backend *be)
++{
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++	struct drm_device *dev = nvbe->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
++	unsigned i, j, pte;
++
++	NV_DEBUG(dev, "\n");
++
++	if (!nvbe->bound)
++		return 0;
++
++	dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
++	pte = nvbe->pte_start;
++	for (i = 0; i < nvbe->nr_pages; i++) {
++		dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
++
++		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
++			if (dev_priv->card_type < NV_50)
++				nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
++			else {
++				nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
++				nv_wo32(dev, gpuobj, pte++, 0x00000000);
++			}
++
++			dma_offset += NV_CTXDMA_PAGE_SIZE;
++		}
++	}
++	dev_priv->engine.instmem.finish_access(nvbe->dev);
++
++	nvbe->bound = false;
++	return 0;
++}
++
++static void
++nouveau_sgdma_destroy(struct ttm_backend *be)
++{
++	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
++
++	if (be) {
++		NV_DEBUG(nvbe->dev, "\n");
++
++		if (nvbe) {
++			if (nvbe->pages)
++				be->func->clear(be);
++			kfree(nvbe);
++		}
++	}
++}
++
++static struct ttm_backend_func nouveau_sgdma_backend = {
++	.populate		= nouveau_sgdma_populate,
++	.clear			= nouveau_sgdma_clear,
++	.bind			= nouveau_sgdma_bind,
++	.unbind			= nouveau_sgdma_unbind,
++	.destroy		= nouveau_sgdma_destroy
++};
++
++struct ttm_backend *
++nouveau_sgdma_init_ttm(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_sgdma_be *nvbe;
++
++	if (!dev_priv->gart_info.sg_ctxdma)
++		return NULL;
++
++	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
++	if (!nvbe)
++		return NULL;
++
++	nvbe->dev = dev;
++
++	nvbe->backend.func	= &nouveau_sgdma_backend;
++
++	return &nvbe->backend;
++}
++
++int
++nouveau_sgdma_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *gpuobj = NULL;
++	uint32_t aper_size, obj_size;
++	int i, ret;
++
++	if (dev_priv->card_type < NV_50) {
++		aper_size = (64 * 1024 * 1024);
++		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
++		obj_size += 8; /* ctxdma header */
++	} else {
++		/* 1 entire VM page table */
++		aper_size = (512 * 1024 * 1024);
++		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
++	}
++
++	ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
++				      NVOBJ_FLAG_ALLOW_NO_REFS |
++				      NVOBJ_FLAG_ZERO_ALLOC |
++				      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
++	if (ret) {
++		NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
++		return ret;
++	}
++
++	dev_priv->gart_info.sg_dummy_page =
++		alloc_page(GFP_KERNEL|__GFP_DMA32);
++	set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
++	dev_priv->gart_info.sg_dummy_bus =
++		pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
++			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	if (dev_priv->card_type < NV_50) {
++		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
++		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
++		 * on those cards? */
++		nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
++				       (1 << 12) /* PT present */ |
++				       (0 << 13) /* PT *not* linear */ |
++				       (NV_DMA_ACCESS_RW  << 14) |
++				       (NV_DMA_TARGET_PCI << 16));
++		nv_wo32(dev, gpuobj, 1, aper_size - 1);
++		for (i = 2; i < 2 + (aper_size >> 12); i++) {
++			nv_wo32(dev, gpuobj, i,
++				    dev_priv->gart_info.sg_dummy_bus | 3);
++		}
++	} else {
++		for (i = 0; i < obj_size; i += 8) {
++			nv_wo32(dev, gpuobj, (i+0)/4,
++				    dev_priv->gart_info.sg_dummy_bus | 0x21);
++			nv_wo32(dev, gpuobj, (i+4)/4, 0);
++		}
++	}
++	dev_priv->engine.instmem.finish_access(dev);
++
++	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
++	dev_priv->gart_info.aper_base = 0;
++	dev_priv->gart_info.aper_size = aper_size;
++	dev_priv->gart_info.sg_ctxdma = gpuobj;
++	return 0;
++}
++
++void
++nouveau_sgdma_takedown(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->gart_info.sg_dummy_page) {
++		pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
++			       NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
++		unlock_page(dev_priv->gart_info.sg_dummy_page);
++		__free_page(dev_priv->gart_info.sg_dummy_page);
++		dev_priv->gart_info.sg_dummy_page = NULL;
++		dev_priv->gart_info.sg_dummy_bus = 0;
++	}
++
++	nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
++}
++
++int
++nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
++	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
++	int pte;
++
++	pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
++	if (dev_priv->card_type < NV_50) {
++		instmem->prepare_access(dev, false);
++		*page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
++		instmem->finish_access(dev);
++		return 0;
++	}
++
++	NV_ERROR(dev, "Unimplemented on NV50\n");
++	return -EINVAL;
++}
+diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
+new file mode 100644
+index 0000000..a4851af
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_state.c
+@@ -0,0 +1,897 @@
++/*
++ * Copyright 2005 Stephane Marchesin
++ * Copyright 2008 Stuart Bennett
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include <linux/swab.h>
++#include "drmP.h"
++#include "drm.h"
++#include "drm_sarea.h"
++#include "drm_crtc_helper.h"
++#include <linux/vgaarb.h>
++
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++#include "nv50_display.h"
++
++static int nouveau_stub_init(struct drm_device *dev) { return 0; }
++static void nouveau_stub_takedown(struct drm_device *dev) {}
++
++static int nouveau_init_engine_ptrs(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_engine *engine = &dev_priv->engine;
++
++	switch (dev_priv->chipset & 0xf0) {
++	case 0x00:
++		engine->instmem.init		= nv04_instmem_init;
++		engine->instmem.takedown	= nv04_instmem_takedown;
++		engine->instmem.suspend		= nv04_instmem_suspend;
++		engine->instmem.resume		= nv04_instmem_resume;
++		engine->instmem.populate	= nv04_instmem_populate;
++		engine->instmem.clear		= nv04_instmem_clear;
++		engine->instmem.bind		= nv04_instmem_bind;
++		engine->instmem.unbind		= nv04_instmem_unbind;
++		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
++		engine->instmem.finish_access	= nv04_instmem_finish_access;
++		engine->mc.init			= nv04_mc_init;
++		engine->mc.takedown		= nv04_mc_takedown;
++		engine->timer.init		= nv04_timer_init;
++		engine->timer.read		= nv04_timer_read;
++		engine->timer.takedown		= nv04_timer_takedown;
++		engine->fb.init			= nv04_fb_init;
++		engine->fb.takedown		= nv04_fb_takedown;
++		engine->graph.grclass		= nv04_graph_grclass;
++		engine->graph.init		= nv04_graph_init;
++		engine->graph.takedown		= nv04_graph_takedown;
++		engine->graph.fifo_access	= nv04_graph_fifo_access;
++		engine->graph.channel		= nv04_graph_channel;
++		engine->graph.create_context	= nv04_graph_create_context;
++		engine->graph.destroy_context	= nv04_graph_destroy_context;
++		engine->graph.load_context	= nv04_graph_load_context;
++		engine->graph.unload_context	= nv04_graph_unload_context;
++		engine->fifo.channels		= 16;
++		engine->fifo.init		= nv04_fifo_init;
++		engine->fifo.takedown		= nouveau_stub_takedown;
++		engine->fifo.disable		= nv04_fifo_disable;
++		engine->fifo.enable		= nv04_fifo_enable;
++		engine->fifo.reassign		= nv04_fifo_reassign;
++		engine->fifo.cache_flush	= nv04_fifo_cache_flush;
++		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
++		engine->fifo.channel_id		= nv04_fifo_channel_id;
++		engine->fifo.create_context	= nv04_fifo_create_context;
++		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
++		engine->fifo.load_context	= nv04_fifo_load_context;
++		engine->fifo.unload_context	= nv04_fifo_unload_context;
++		break;
++	case 0x10:
++		engine->instmem.init		= nv04_instmem_init;
++		engine->instmem.takedown	= nv04_instmem_takedown;
++		engine->instmem.suspend		= nv04_instmem_suspend;
++		engine->instmem.resume		= nv04_instmem_resume;
++		engine->instmem.populate	= nv04_instmem_populate;
++		engine->instmem.clear		= nv04_instmem_clear;
++		engine->instmem.bind		= nv04_instmem_bind;
++		engine->instmem.unbind		= nv04_instmem_unbind;
++		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
++		engine->instmem.finish_access	= nv04_instmem_finish_access;
++		engine->mc.init			= nv04_mc_init;
++		engine->mc.takedown		= nv04_mc_takedown;
++		engine->timer.init		= nv04_timer_init;
++		engine->timer.read		= nv04_timer_read;
++		engine->timer.takedown		= nv04_timer_takedown;
++		engine->fb.init			= nv10_fb_init;
++		engine->fb.takedown		= nv10_fb_takedown;
++		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
++		engine->graph.grclass		= nv10_graph_grclass;
++		engine->graph.init		= nv10_graph_init;
++		engine->graph.takedown		= nv10_graph_takedown;
++		engine->graph.channel		= nv10_graph_channel;
++		engine->graph.create_context	= nv10_graph_create_context;
++		engine->graph.destroy_context	= nv10_graph_destroy_context;
++		engine->graph.fifo_access	= nv04_graph_fifo_access;
++		engine->graph.load_context	= nv10_graph_load_context;
++		engine->graph.unload_context	= nv10_graph_unload_context;
++		engine->graph.set_region_tiling	= nv10_graph_set_region_tiling;
++		engine->fifo.channels		= 32;
++		engine->fifo.init		= nv10_fifo_init;
++		engine->fifo.takedown		= nouveau_stub_takedown;
++		engine->fifo.disable		= nv04_fifo_disable;
++		engine->fifo.enable		= nv04_fifo_enable;
++		engine->fifo.reassign		= nv04_fifo_reassign;
++		engine->fifo.cache_flush	= nv04_fifo_cache_flush;
++		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
++		engine->fifo.channel_id		= nv10_fifo_channel_id;
++		engine->fifo.create_context	= nv10_fifo_create_context;
++		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
++		engine->fifo.load_context	= nv10_fifo_load_context;
++		engine->fifo.unload_context	= nv10_fifo_unload_context;
++		break;
++	case 0x20:
++		engine->instmem.init		= nv04_instmem_init;
++		engine->instmem.takedown	= nv04_instmem_takedown;
++		engine->instmem.suspend		= nv04_instmem_suspend;
++		engine->instmem.resume		= nv04_instmem_resume;
++		engine->instmem.populate	= nv04_instmem_populate;
++		engine->instmem.clear		= nv04_instmem_clear;
++		engine->instmem.bind		= nv04_instmem_bind;
++		engine->instmem.unbind		= nv04_instmem_unbind;
++		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
++		engine->instmem.finish_access	= nv04_instmem_finish_access;
++		engine->mc.init			= nv04_mc_init;
++		engine->mc.takedown		= nv04_mc_takedown;
++		engine->timer.init		= nv04_timer_init;
++		engine->timer.read		= nv04_timer_read;
++		engine->timer.takedown		= nv04_timer_takedown;
++		engine->fb.init			= nv10_fb_init;
++		engine->fb.takedown		= nv10_fb_takedown;
++		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
++		engine->graph.grclass		= nv20_graph_grclass;
++		engine->graph.init		= nv20_graph_init;
++		engine->graph.takedown		= nv20_graph_takedown;
++		engine->graph.channel		= nv10_graph_channel;
++		engine->graph.create_context	= nv20_graph_create_context;
++		engine->graph.destroy_context	= nv20_graph_destroy_context;
++		engine->graph.fifo_access	= nv04_graph_fifo_access;
++		engine->graph.load_context	= nv20_graph_load_context;
++		engine->graph.unload_context	= nv20_graph_unload_context;
++		engine->graph.set_region_tiling	= nv20_graph_set_region_tiling;
++		engine->fifo.channels		= 32;
++		engine->fifo.init		= nv10_fifo_init;
++		engine->fifo.takedown		= nouveau_stub_takedown;
++		engine->fifo.disable		= nv04_fifo_disable;
++		engine->fifo.enable		= nv04_fifo_enable;
++		engine->fifo.reassign		= nv04_fifo_reassign;
++		engine->fifo.cache_flush	= nv04_fifo_cache_flush;
++		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
++		engine->fifo.channel_id		= nv10_fifo_channel_id;
++		engine->fifo.create_context	= nv10_fifo_create_context;
++		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
++		engine->fifo.load_context	= nv10_fifo_load_context;
++		engine->fifo.unload_context	= nv10_fifo_unload_context;
++		break;
++	case 0x30:
++		engine->instmem.init		= nv04_instmem_init;
++		engine->instmem.takedown	= nv04_instmem_takedown;
++		engine->instmem.suspend		= nv04_instmem_suspend;
++		engine->instmem.resume		= nv04_instmem_resume;
++		engine->instmem.populate	= nv04_instmem_populate;
++		engine->instmem.clear		= nv04_instmem_clear;
++		engine->instmem.bind		= nv04_instmem_bind;
++		engine->instmem.unbind		= nv04_instmem_unbind;
++		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
++		engine->instmem.finish_access	= nv04_instmem_finish_access;
++		engine->mc.init			= nv04_mc_init;
++		engine->mc.takedown		= nv04_mc_takedown;
++		engine->timer.init		= nv04_timer_init;
++		engine->timer.read		= nv04_timer_read;
++		engine->timer.takedown		= nv04_timer_takedown;
++		engine->fb.init			= nv10_fb_init;
++		engine->fb.takedown		= nv10_fb_takedown;
++		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
++		engine->graph.grclass		= nv30_graph_grclass;
++		engine->graph.init		= nv30_graph_init;
++		engine->graph.takedown		= nv20_graph_takedown;
++		engine->graph.fifo_access	= nv04_graph_fifo_access;
++		engine->graph.channel		= nv10_graph_channel;
++		engine->graph.create_context	= nv20_graph_create_context;
++		engine->graph.destroy_context	= nv20_graph_destroy_context;
++		engine->graph.load_context	= nv20_graph_load_context;
++		engine->graph.unload_context	= nv20_graph_unload_context;
++		engine->graph.set_region_tiling	= nv20_graph_set_region_tiling;
++		engine->fifo.channels		= 32;
++		engine->fifo.init		= nv10_fifo_init;
++		engine->fifo.takedown		= nouveau_stub_takedown;
++		engine->fifo.disable		= nv04_fifo_disable;
++		engine->fifo.enable		= nv04_fifo_enable;
++		engine->fifo.reassign		= nv04_fifo_reassign;
++		engine->fifo.cache_flush	= nv04_fifo_cache_flush;
++		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
++		engine->fifo.channel_id		= nv10_fifo_channel_id;
++		engine->fifo.create_context	= nv10_fifo_create_context;
++		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
++		engine->fifo.load_context	= nv10_fifo_load_context;
++		engine->fifo.unload_context	= nv10_fifo_unload_context;
++		break;
++	case 0x40:
++	case 0x60:
++		engine->instmem.init		= nv04_instmem_init;
++		engine->instmem.takedown	= nv04_instmem_takedown;
++		engine->instmem.suspend		= nv04_instmem_suspend;
++		engine->instmem.resume		= nv04_instmem_resume;
++		engine->instmem.populate	= nv04_instmem_populate;
++		engine->instmem.clear		= nv04_instmem_clear;
++		engine->instmem.bind		= nv04_instmem_bind;
++		engine->instmem.unbind		= nv04_instmem_unbind;
++		engine->instmem.prepare_access	= nv04_instmem_prepare_access;
++		engine->instmem.finish_access	= nv04_instmem_finish_access;
++		engine->mc.init			= nv40_mc_init;
++		engine->mc.takedown		= nv40_mc_takedown;
++		engine->timer.init		= nv04_timer_init;
++		engine->timer.read		= nv04_timer_read;
++		engine->timer.takedown		= nv04_timer_takedown;
++		engine->fb.init			= nv40_fb_init;
++		engine->fb.takedown		= nv40_fb_takedown;
++		engine->fb.set_region_tiling	= nv40_fb_set_region_tiling;
++		engine->graph.grclass		= nv40_graph_grclass;
++		engine->graph.init		= nv40_graph_init;
++		engine->graph.takedown		= nv40_graph_takedown;
++		engine->graph.fifo_access	= nv04_graph_fifo_access;
++		engine->graph.channel		= nv40_graph_channel;
++		engine->graph.create_context	= nv40_graph_create_context;
++		engine->graph.destroy_context	= nv40_graph_destroy_context;
++		engine->graph.load_context	= nv40_graph_load_context;
++		engine->graph.unload_context	= nv40_graph_unload_context;
++		engine->graph.set_region_tiling	= nv40_graph_set_region_tiling;
++		engine->fifo.channels		= 32;
++		engine->fifo.init		= nv40_fifo_init;
++		engine->fifo.takedown		= nouveau_stub_takedown;
++		engine->fifo.disable		= nv04_fifo_disable;
++		engine->fifo.enable		= nv04_fifo_enable;
++		engine->fifo.reassign		= nv04_fifo_reassign;
++		engine->fifo.cache_flush	= nv04_fifo_cache_flush;
++		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
++		engine->fifo.channel_id		= nv10_fifo_channel_id;
++		engine->fifo.create_context	= nv40_fifo_create_context;
++		engine->fifo.destroy_context	= nv40_fifo_destroy_context;
++		engine->fifo.load_context	= nv40_fifo_load_context;
++		engine->fifo.unload_context	= nv40_fifo_unload_context;
++		break;
++	case 0x50:
++	case 0x80: /* gotta love NVIDIA's consistency.. */
++	case 0x90:
++	case 0xA0:
++		engine->instmem.init		= nv50_instmem_init;
++		engine->instmem.takedown	= nv50_instmem_takedown;
++		engine->instmem.suspend		= nv50_instmem_suspend;
++		engine->instmem.resume		= nv50_instmem_resume;
++		engine->instmem.populate	= nv50_instmem_populate;
++		engine->instmem.clear		= nv50_instmem_clear;
++		engine->instmem.bind		= nv50_instmem_bind;
++		engine->instmem.unbind		= nv50_instmem_unbind;
++		engine->instmem.prepare_access	= nv50_instmem_prepare_access;
++		engine->instmem.finish_access	= nv50_instmem_finish_access;
++		engine->mc.init			= nv50_mc_init;
++		engine->mc.takedown		= nv50_mc_takedown;
++		engine->timer.init		= nv04_timer_init;
++		engine->timer.read		= nv04_timer_read;
++		engine->timer.takedown		= nv04_timer_takedown;
++		engine->fb.init			= nouveau_stub_init;
++		engine->fb.takedown		= nouveau_stub_takedown;
++		engine->graph.grclass		= nv50_graph_grclass;
++		engine->graph.init		= nv50_graph_init;
++		engine->graph.takedown		= nv50_graph_takedown;
++		engine->graph.fifo_access	= nv50_graph_fifo_access;
++		engine->graph.channel		= nv50_graph_channel;
++		engine->graph.create_context	= nv50_graph_create_context;
++		engine->graph.destroy_context	= nv50_graph_destroy_context;
++		engine->graph.load_context	= nv50_graph_load_context;
++		engine->graph.unload_context	= nv50_graph_unload_context;
++		engine->fifo.channels		= 128;
++		engine->fifo.init		= nv50_fifo_init;
++		engine->fifo.takedown		= nv50_fifo_takedown;
++		engine->fifo.disable		= nv04_fifo_disable;
++		engine->fifo.enable		= nv04_fifo_enable;
++		engine->fifo.reassign		= nv04_fifo_reassign;
++		engine->fifo.channel_id		= nv50_fifo_channel_id;
++		engine->fifo.create_context	= nv50_fifo_create_context;
++		engine->fifo.destroy_context	= nv50_fifo_destroy_context;
++		engine->fifo.load_context	= nv50_fifo_load_context;
++		engine->fifo.unload_context	= nv50_fifo_unload_context;
++		break;
++	default:
++		NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
++		return 1;
++	}
++
++	return 0;
++}
++
++static unsigned int
++nouveau_vga_set_decode(void *priv, bool state)
++{
++	struct drm_device *dev = priv;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->chipset >= 0x40)
++		nv_wr32(dev, 0x88054, state);
++	else
++		nv_wr32(dev, 0x1854, state);
++
++	if (state)
++		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
++		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
++	else
++		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
++}
++
++static int
++nouveau_card_init_channel(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *gpuobj;
++	int ret;
++
++	ret = nouveau_channel_alloc(dev, &dev_priv->channel,
++				    (struct drm_file *)-2,
++				    NvDmaFB, NvDmaTT);
++	if (ret)
++		return ret;
++
++	gpuobj = NULL;
++	ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
++				     0, nouveau_mem_fb_amount(dev),
++				     NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
++				     &gpuobj);
++	if (ret)
++		goto out_err;
++
++	ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
++				     gpuobj, NULL);
++	if (ret)
++		goto out_err;
++
++	gpuobj = NULL;
++	ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
++					  dev_priv->gart_info.aper_size,
++					  NV_DMA_ACCESS_RW, &gpuobj, NULL);
++	if (ret)
++		goto out_err;
++
++	ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
++				     gpuobj, NULL);
++	if (ret)
++		goto out_err;
++
++	return 0;
++out_err:
++	nouveau_gpuobj_del(dev, &gpuobj);
++	nouveau_channel_free(dev_priv->channel);
++	dev_priv->channel = NULL;
++	return ret;
++}
++
++int
++nouveau_card_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_engine *engine;
++	int ret;
++
++	NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
++
++	if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
++		return 0;
++
++	vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
++
++	/* Initialise internal driver API hooks */
++	ret = nouveau_init_engine_ptrs(dev);
++	if (ret)
++		goto out;
++	engine = &dev_priv->engine;
++	dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
++
++	/* Parse BIOS tables / Run init tables if card not POSTed */
++	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		ret = nouveau_bios_init(dev);
++		if (ret)
++			goto out;
++	}
++
++	ret = nouveau_gpuobj_early_init(dev);
++	if (ret)
++		goto out_bios;
++
++	/* Initialise instance memory, must happen before mem_init so we
++	 * know exactly how much VRAM we're able to use for "normal"
++	 * purposes.
++	 */
++	ret = engine->instmem.init(dev);
++	if (ret)
++		goto out_gpuobj_early;
++
++	/* Setup the memory manager */
++	ret = nouveau_mem_init(dev);
++	if (ret)
++		goto out_instmem;
++
++	ret = nouveau_gpuobj_init(dev);
++	if (ret)
++		goto out_mem;
++
++	/* PMC */
++	ret = engine->mc.init(dev);
++	if (ret)
++		goto out_gpuobj;
++
++	/* PTIMER */
++	ret = engine->timer.init(dev);
++	if (ret)
++		goto out_mc;
++
++	/* PFB */
++	ret = engine->fb.init(dev);
++	if (ret)
++		goto out_timer;
++
++	if (nouveau_noaccel)
++		engine->graph.accel_blocked = true;
++	else {
++		/* PGRAPH */
++		ret = engine->graph.init(dev);
++		if (ret)
++			goto out_fb;
++
++		/* PFIFO */
++		ret = engine->fifo.init(dev);
++		if (ret)
++			goto out_graph;
++	}
++
++	/* this call irq_preinstall, register irq handler and
++	 * call irq_postinstall
++	 */
++	ret = drm_irq_install(dev);
++	if (ret)
++		goto out_fifo;
++
++	ret = drm_vblank_init(dev, 0);
++	if (ret)
++		goto out_irq;
++
++	/* what about PVIDEO/PCRTC/PRAMDAC etc? */
++
++	if (!engine->graph.accel_blocked) {
++		ret = nouveau_card_init_channel(dev);
++		if (ret)
++			goto out_irq;
++	}
++
++	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		if (dev_priv->card_type >= NV_50)
++			ret = nv50_display_create(dev);
++		else
++			ret = nv04_display_create(dev);
++		if (ret)
++			goto out_irq;
++	}
++
++	ret = nouveau_backlight_init(dev);
++	if (ret)
++		NV_ERROR(dev, "Error %d registering backlight\n", ret);
++
++	dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
++
++	if (drm_core_check_feature(dev, DRIVER_MODESET))
++		drm_helper_initial_config(dev);
++
++	return 0;
++
++out_irq:
++	drm_irq_uninstall(dev);
++out_fifo:
++	if (!nouveau_noaccel)
++		engine->fifo.takedown(dev);
++out_graph:
++	if (!nouveau_noaccel)
++		engine->graph.takedown(dev);
++out_fb:
++	engine->fb.takedown(dev);
++out_timer:
++	engine->timer.takedown(dev);
++out_mc:
++	engine->mc.takedown(dev);
++out_gpuobj:
++	nouveau_gpuobj_takedown(dev);
++out_mem:
++	nouveau_mem_close(dev);
++out_instmem:
++	engine->instmem.takedown(dev);
++out_gpuobj_early:
++	nouveau_gpuobj_late_takedown(dev);
++out_bios:
++	nouveau_bios_takedown(dev);
++out:
++	vga_client_register(dev->pdev, NULL, NULL, NULL);
++	return ret;
++}
++
++static void nouveau_card_takedown(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_engine *engine = &dev_priv->engine;
++
++	NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
++
++	if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
++		nouveau_backlight_exit(dev);
++
++		if (dev_priv->channel) {
++			nouveau_channel_free(dev_priv->channel);
++			dev_priv->channel = NULL;
++		}
++
++		if (!nouveau_noaccel) {
++			engine->fifo.takedown(dev);
++			engine->graph.takedown(dev);
++		}
++		engine->fb.takedown(dev);
++		engine->timer.takedown(dev);
++		engine->mc.takedown(dev);
++
++		mutex_lock(&dev->struct_mutex);
++		ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
++		ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
++		mutex_unlock(&dev->struct_mutex);
++		nouveau_sgdma_takedown(dev);
++
++		nouveau_gpuobj_takedown(dev);
++		nouveau_mem_close(dev);
++		engine->instmem.takedown(dev);
++
++		if (drm_core_check_feature(dev, DRIVER_MODESET))
++			drm_irq_uninstall(dev);
++
++		nouveau_gpuobj_late_takedown(dev);
++		nouveau_bios_takedown(dev);
++
++		vga_client_register(dev->pdev, NULL, NULL, NULL);
++
++		dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
++	}
++}
++
++/* here a client dies, release the stuff that was allocated for its
++ * file_priv */
++void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
++{
++	nouveau_channel_cleanup(dev, file_priv);
++}
++
++/* first module load, setup the mmio/fb mapping */
++/* KMS: we need mmio at load time, not when the first drm client opens. */
++int nouveau_firstopen(struct drm_device *dev)
++{
++	return 0;
++}
++
++/* if we have an OF card, copy vbios to RAMIN */
++static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
++{
++#if defined(__powerpc__)
++	int size, i;
++	const uint32_t *bios;
++	struct device_node *dn = pci_device_to_OF_node(dev->pdev);
++	if (!dn) {
++		NV_INFO(dev, "Unable to get the OF node\n");
++		return;
++	}
++
++	bios = of_get_property(dn, "NVDA,BMP", &size);
++	if (bios) {
++		for (i = 0; i < size; i += 4)
++			nv_wi32(dev, i, bios[i/4]);
++		NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
++	} else {
++		NV_INFO(dev, "Unable to get the OF bios\n");
++	}
++#endif
++}
++
++int nouveau_load(struct drm_device *dev, unsigned long flags)
++{
++	struct drm_nouveau_private *dev_priv;
++	uint32_t reg0;
++	resource_size_t mmio_start_offs;
++
++	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
++	if (!dev_priv)
++		return -ENOMEM;
++	dev->dev_private = dev_priv;
++	dev_priv->dev = dev;
++
++	dev_priv->flags = flags & NOUVEAU_FLAGS;
++	dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
++
++	NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
++		 dev->pci_vendor, dev->pci_device, dev->pdev->class);
++
++	dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
++
++	if (dev_priv->acpi_dsm)
++		nouveau_hybrid_setup(dev);
++
++	dev_priv->wq = create_workqueue("nouveau");
++	if (!dev_priv->wq)
++		return -EINVAL;
++
++	/* resource 0 is mmio regs */
++	/* resource 1 is linear FB */
++	/* resource 2 is RAMIN (mmio regs + 0x1000000) */
++	/* resource 6 is bios */
++
++	/* map the mmio regs */
++	mmio_start_offs = pci_resource_start(dev->pdev, 0);
++	dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000);
++	if (!dev_priv->mmio) {
++		NV_ERROR(dev, "Unable to initialize the mmio mapping. "
++			 "Please report your setup to " DRIVER_EMAIL "\n");
++		return -EINVAL;
++	}
++	NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
++					(unsigned long long)mmio_start_offs);
++
++#ifdef __BIG_ENDIAN
++	/* Put the card in BE mode if it's not */
++	if (nv_rd32(dev, NV03_PMC_BOOT_1))
++		nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
++
++	DRM_MEMORYBARRIER();
++#endif
++
++	/* Time to determine the card architecture */
++	reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
++
++	/* We're dealing with >=NV10 */
++	if ((reg0 & 0x0f000000) > 0) {
++		/* Bit 27-20 contain the architecture in hex */
++		dev_priv->chipset = (reg0 & 0xff00000) >> 20;
++	/* NV04 or NV05 */
++	} else if ((reg0 & 0xff00fff0) == 0x20004000) {
++		if (reg0 & 0x00f00000)
++			dev_priv->chipset = 0x05;
++		else
++			dev_priv->chipset = 0x04;
++	} else
++		dev_priv->chipset = 0xff;
++
++	switch (dev_priv->chipset & 0xf0) {
++	case 0x00:
++	case 0x10:
++	case 0x20:
++	case 0x30:
++		dev_priv->card_type = dev_priv->chipset & 0xf0;
++		break;
++	case 0x40:
++	case 0x60:
++		dev_priv->card_type = NV_40;
++		break;
++	case 0x50:
++	case 0x80:
++	case 0x90:
++	case 0xa0:
++		dev_priv->card_type = NV_50;
++		break;
++	default:
++		NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
++		return -EINVAL;
++	}
++
++	NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
++		dev_priv->card_type, reg0);
++
++	/* map larger RAMIN aperture on NV40 cards */
++	dev_priv->ramin  = NULL;
++	if (dev_priv->card_type >= NV_40) {
++		int ramin_bar = 2;
++		if (pci_resource_len(dev->pdev, ramin_bar) == 0)
++			ramin_bar = 3;
++
++		dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
++		dev_priv->ramin = ioremap(
++				pci_resource_start(dev->pdev, ramin_bar),
++				dev_priv->ramin_size);
++		if (!dev_priv->ramin) {
++			NV_ERROR(dev, "Failed to init RAMIN mapping, "
++				      "limited instance memory available\n");
++		}
++	}
++
++	/* On older cards (or if the above failed), create a map covering
++	 * the BAR0 PRAMIN aperture */
++	if (!dev_priv->ramin) {
++		dev_priv->ramin_size = 1 * 1024 * 1024;
++		dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
++							dev_priv->ramin_size);
++		if (!dev_priv->ramin) {
++			NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
++			return -ENOMEM;
++		}
++	}
++
++	nouveau_OF_copy_vbios_to_ramin(dev);
++
++	/* Special flags */
++	if (dev->pci_device == 0x01a0)
++		dev_priv->flags |= NV_NFORCE;
++	else if (dev->pci_device == 0x01f0)
++		dev_priv->flags |= NV_NFORCE2;
++
++	/* For kernel modesetting, init card now and bring up fbcon */
++	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		int ret = nouveau_card_init(dev);
++		if (ret)
++			return ret;
++	}
++
++	return 0;
++}
++
++static void nouveau_close(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	/* In the case of an error dev_priv may not be allocated yet */
++	if (dev_priv)
++		nouveau_card_takedown(dev);
++}
++
++/* KMS: we need mmio at load time, not when the first drm client opens. */
++void nouveau_lastclose(struct drm_device *dev)
++{
++	if (drm_core_check_feature(dev, DRIVER_MODESET))
++		return;
++
++	nouveau_close(dev);
++}
++
++int nouveau_unload(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
++		if (dev_priv->card_type >= NV_50)
++			nv50_display_destroy(dev);
++		else
++			nv04_display_destroy(dev);
++		nouveau_close(dev);
++	}
++
++	iounmap(dev_priv->mmio);
++	iounmap(dev_priv->ramin);
++
++	kfree(dev_priv);
++	dev->dev_private = NULL;
++	return 0;
++}
++
++int
++nouveau_ioctl_card_init(struct drm_device *dev, void *data,
++			struct drm_file *file_priv)
++{
++	return nouveau_card_init(dev);
++}
++
++int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
++						struct drm_file *file_priv)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_nouveau_getparam *getparam = data;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++	switch (getparam->param) {
++	case NOUVEAU_GETPARAM_CHIPSET_ID:
++		getparam->value = dev_priv->chipset;
++		break;
++	case NOUVEAU_GETPARAM_PCI_VENDOR:
++		getparam->value = dev->pci_vendor;
++		break;
++	case NOUVEAU_GETPARAM_PCI_DEVICE:
++		getparam->value = dev->pci_device;
++		break;
++	case NOUVEAU_GETPARAM_BUS_TYPE:
++		if (drm_device_is_agp(dev))
++			getparam->value = NV_AGP;
++		else if (drm_device_is_pcie(dev))
++			getparam->value = NV_PCIE;
++		else
++			getparam->value = NV_PCI;
++		break;
++	case NOUVEAU_GETPARAM_FB_PHYSICAL:
++		getparam->value = dev_priv->fb_phys;
++		break;
++	case NOUVEAU_GETPARAM_AGP_PHYSICAL:
++		getparam->value = dev_priv->gart_info.aper_base;
++		break;
++	case NOUVEAU_GETPARAM_PCI_PHYSICAL:
++		if (dev->sg) {
++			getparam->value = (unsigned long)dev->sg->virtual;
++		} else {
++			NV_ERROR(dev, "Requested PCIGART address, "
++					"while no PCIGART was created\n");
++			return -EINVAL;
++		}
++		break;
++	case NOUVEAU_GETPARAM_FB_SIZE:
++		getparam->value = dev_priv->fb_available_size;
++		break;
++	case NOUVEAU_GETPARAM_AGP_SIZE:
++		getparam->value = dev_priv->gart_info.aper_size;
++		break;
++	case NOUVEAU_GETPARAM_VM_VRAM_BASE:
++		getparam->value = dev_priv->vm_vram_base;
++		break;
++	case NOUVEAU_GETPARAM_GRAPH_UNITS:
++		/* NV40 and NV50 versions are quite different, but register
++		 * address is the same. User is supposed to know the card
++		 * family anyway... */
++		if (dev_priv->chipset >= 0x40) {
++			getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
++			break;
++		}
++		/* FALLTHRU */
++	default:
++		NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++int
++nouveau_ioctl_setparam(struct drm_device *dev, void *data,
++		       struct drm_file *file_priv)
++{
++	struct drm_nouveau_setparam *setparam = data;
++
++	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
++
++	switch (setparam->param) {
++	default:
++		NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++/* Wait until (value(reg) & mask) == val, up until timeout has hit */
++bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
++			uint32_t reg, uint32_t mask, uint32_t val)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
++	uint64_t start = ptimer->read(dev);
++
++	do {
++		if ((nv_rd32(dev, reg) & mask) == val)
++			return true;
++	} while (ptimer->read(dev) - start < timeout);
++
++	return false;
++}
++
++/* Waits for PGRAPH to go completely idle */
++bool nouveau_wait_for_idle(struct drm_device *dev)
++{
++	if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
++		NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
++			 nv_rd32(dev, NV04_PGRAPH_STATUS));
++		return false;
++	}
++
++	return true;
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
+new file mode 100644
+index 0000000..c385d50
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
+@@ -0,0 +1,103 @@
++/*
++ * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
++ * All Rights Reserved.
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sub license,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++
++#include "nouveau_drv.h"
++
++int
++nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++	struct drm_file *file_priv = filp->private_data;
++	struct drm_nouveau_private *dev_priv =
++		file_priv->minor->dev->dev_private;
++
++	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
++		return drm_mmap(filp, vma);
++
++	return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
++}
++
++static int
++nouveau_ttm_mem_global_init(struct ttm_global_reference *ref)
++{
++	return ttm_mem_global_init(ref->object);
++}
++
++static void
++nouveau_ttm_mem_global_release(struct ttm_global_reference *ref)
++{
++	ttm_mem_global_release(ref->object);
++}
++
++int
++nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
++{
++	struct ttm_global_reference *global_ref;
++	int ret;
++
++	global_ref = &dev_priv->ttm.mem_global_ref;
++	global_ref->global_type = TTM_GLOBAL_TTM_MEM;
++	global_ref->size = sizeof(struct ttm_mem_global);
++	global_ref->init = &nouveau_ttm_mem_global_init;
++	global_ref->release = &nouveau_ttm_mem_global_release;
++
++	ret = ttm_global_item_ref(global_ref);
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Failed setting up TTM memory accounting\n");
++		dev_priv->ttm.mem_global_ref.release = NULL;
++		return ret;
++	}
++
++	dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
++	global_ref = &dev_priv->ttm.bo_global_ref.ref;
++	global_ref->global_type = TTM_GLOBAL_TTM_BO;
++	global_ref->size = sizeof(struct ttm_bo_global);
++	global_ref->init = &ttm_bo_global_init;
++	global_ref->release = &ttm_bo_global_release;
++
++	ret = ttm_global_item_ref(global_ref);
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Failed setting up TTM BO subsystem\n");
++		ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
++		dev_priv->ttm.mem_global_ref.release = NULL;
++		return ret;
++	}
++
++	return 0;
++}
++
++void
++nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
++{
++	if (dev_priv->ttm.mem_global_ref.release == NULL)
++		return;
++
++	ttm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
++	ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
++	dev_priv->ttm.mem_global_ref.release = NULL;
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
+new file mode 100644
+index 0000000..d2f143e
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
+@@ -0,0 +1,1002 @@
++/*
++ * Copyright 1993-2003 NVIDIA, Corporation
++ * Copyright 2006 Dave Airlie
++ * Copyright 2007 Maarten Maathuis
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm_crtc_helper.h"
++
++#include "nouveau_drv.h"
++#include "nouveau_encoder.h"
++#include "nouveau_connector.h"
++#include "nouveau_crtc.h"
++#include "nouveau_fb.h"
++#include "nouveau_hw.h"
++#include "nvreg.h"
++
++static int
++nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
++			struct drm_framebuffer *old_fb);
++
++static void
++crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
++{
++	NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
++		       crtcstate->CRTC[index]);
++}
++
++static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
++	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
++
++	regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level;
++	if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) {
++		regp->CRTC[NV_CIO_CRE_CSB] = 0x80;
++		regp->CRTC[NV_CIO_CRE_5B] = nv_crtc->saturation << 2;
++		crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_5B);
++	}
++	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_CSB);
++}
++
++static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
++	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
++
++	nv_crtc->sharpness = level;
++	if (level < 0)	/* blur is in hw range 0x3f -> 0x20 */
++		level += 0x40;
++	regp->ramdac_634 = level;
++	NVWriteRAMDAC(crtc->dev, nv_crtc->index, NV_PRAMDAC_634, regp->ramdac_634);
++}
++
++#define PLLSEL_VPLL1_MASK				\
++	(NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL	\
++	 | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2)
++#define PLLSEL_VPLL2_MASK				\
++	(NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2		\
++	 | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2)
++#define PLLSEL_TV_MASK					\
++	(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1		\
++	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1		\
++	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2	\
++	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
++
++/* NV4x 0x40.. pll notes:
++ * gpu pll: 0x4000 + 0x4004
++ * ?gpu? pll: 0x4008 + 0x400c
++ * vpll1: 0x4010 + 0x4014
++ * vpll2: 0x4018 + 0x401c
++ * mpll: 0x4020 + 0x4024
++ * mpll: 0x4038 + 0x403c
++ *
++ * the first register of each pair has some unknown details:
++ * bits 0-7: redirected values from elsewhere? (similar to PLL_SETUP_CONTROL?)
++ * bits 20-23: (mpll) something to do with post divider?
++ * bits 28-31: related to single stage mode? (bit 8/12)
++ */
++
++static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct nv04_mode_state *state = &dev_priv->mode_reg;
++	struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
++	struct nouveau_pll_vals *pv = &regp->pllvals;
++	struct pll_lims pll_lim;
++
++	if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim))
++		return;
++
++	/* NM2 == 0 is used to determine single stage mode on two stage plls */
++	pv->NM2 = 0;
++
++	/* for newer nv4x the blob uses only the first stage of the vpll below a
++	 * certain clock.  for a certain nv4b this is 150MHz.  since the max
++	 * output frequency of the first stage for this card is 300MHz, it is
++	 * assumed the threshold is given by vco1 maxfreq/2
++	 */
++	/* for early nv4x, specifically nv40 and *some* nv43 (devids 0 and 6,
++	 * not 8, others unknown), the blob always uses both plls.  no problem
++	 * has yet been observed in allowing the use a single stage pll on all
++	 * nv43 however.  the behaviour of single stage use is untested on nv40
++	 */
++	if (dev_priv->chipset > 0x40 && dot_clock <= (pll_lim.vco1.maxfreq / 2))
++		memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
++
++	if (!nouveau_calc_pll_mnp(dev, &pll_lim, dot_clock, pv))
++		return;
++
++	state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
++
++	/* The blob uses this always, so let's do the same */
++	if (dev_priv->card_type == NV_40)
++		state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
++	/* again nv40 and some nv43 act more like nv3x as described above */
++	if (dev_priv->chipset < 0x41)
++		state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
++				 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
++	state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
++
++	if (pv->NM2)
++		NV_DEBUG_KMS(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
++			 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
++	else
++		NV_DEBUG_KMS(dev, "vpll: n %d m %d log2p %d\n",
++			 pv->N1, pv->M1, pv->log2P);
++
++	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
++}
++
++static void
++nv_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct drm_device *dev = crtc->dev;
++	unsigned char seq1 = 0, crtc17 = 0;
++	unsigned char crtc1A;
++
++	NV_DEBUG_KMS(dev, "Setting dpms mode %d on CRTC %d\n", mode,
++							nv_crtc->index);
++
++	if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */
++		return;
++
++	nv_crtc->last_dpms = mode;
++
++	if (nv_two_heads(dev))
++		NVSetOwner(dev, nv_crtc->index);
++
++	/* nv4ref indicates these two RPC1 bits inhibit h/v sync */
++	crtc1A = NVReadVgaCrtc(dev, nv_crtc->index,
++					NV_CIO_CRE_RPC1_INDEX) & ~0xC0;
++	switch (mode) {
++	case DRM_MODE_DPMS_STANDBY:
++		/* Screen: Off; HSync: Off, VSync: On -- Not Supported */
++		seq1 = 0x20;
++		crtc17 = 0x80;
++		crtc1A |= 0x80;
++		break;
++	case DRM_MODE_DPMS_SUSPEND:
++		/* Screen: Off; HSync: On, VSync: Off -- Not Supported */
++		seq1 = 0x20;
++		crtc17 = 0x80;
++		crtc1A |= 0x40;
++		break;
++	case DRM_MODE_DPMS_OFF:
++		/* Screen: Off; HSync: Off, VSync: Off */
++		seq1 = 0x20;
++		crtc17 = 0x00;
++		crtc1A |= 0xC0;
++		break;
++	case DRM_MODE_DPMS_ON:
++	default:
++		/* Screen: On; HSync: On, VSync: On */
++		seq1 = 0x00;
++		crtc17 = 0x80;
++		break;
++	}
++
++	NVVgaSeqReset(dev, nv_crtc->index, true);
++	/* Each head has it's own sequencer, so we can turn it off when we want */
++	seq1 |= (NVReadVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX) & ~0x20);
++	NVWriteVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX, seq1);
++	crtc17 |= (NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX) & ~0x80);
++	mdelay(10);
++	NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX, crtc17);
++	NVVgaSeqReset(dev, nv_crtc->index, false);
++
++	NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
++}
++
++static bool
++nv_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
++		   struct drm_display_mode *adjusted_mode)
++{
++	return true;
++}
++
++static void
++nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
++	struct drm_framebuffer *fb = crtc->fb;
++
++	/* Calculate our timings */
++	int horizDisplay	= (mode->crtc_hdisplay >> 3) 	- 1;
++	int horizStart		= (mode->crtc_hsync_start >> 3) 	- 1;
++	int horizEnd		= (mode->crtc_hsync_end >> 3) 	- 1;
++	int horizTotal		= (mode->crtc_htotal >> 3)		- 5;
++	int horizBlankStart	= (mode->crtc_hdisplay >> 3)		- 1;
++	int horizBlankEnd	= (mode->crtc_htotal >> 3)		- 1;
++	int vertDisplay		= mode->crtc_vdisplay			- 1;
++	int vertStart		= mode->crtc_vsync_start 		- 1;
++	int vertEnd		= mode->crtc_vsync_end			- 1;
++	int vertTotal		= mode->crtc_vtotal 			- 2;
++	int vertBlankStart	= mode->crtc_vdisplay 			- 1;
++	int vertBlankEnd	= mode->crtc_vtotal			- 1;
++
++	struct drm_encoder *encoder;
++	bool fp_output = false;
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++
++		if (encoder->crtc == crtc &&
++		    (nv_encoder->dcb->type == OUTPUT_LVDS ||
++		     nv_encoder->dcb->type == OUTPUT_TMDS))
++			fp_output = true;
++	}
++
++	if (fp_output) {
++		vertStart = vertTotal - 3;
++		vertEnd = vertTotal - 2;
++		vertBlankStart = vertStart;
++		horizStart = horizTotal - 5;
++		horizEnd = horizTotal - 2;
++		horizBlankEnd = horizTotal + 4;
++#if 0
++		if (dev->overlayAdaptor && dev_priv->card_type >= NV_10)
++			/* This reportedly works around some video overlay bandwidth problems */
++			horizTotal += 2;
++#endif
++	}
++
++	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++		vertTotal |= 1;
++
++#if 0
++	ErrorF("horizDisplay: 0x%X \n", horizDisplay);
++	ErrorF("horizStart: 0x%X \n", horizStart);
++	ErrorF("horizEnd: 0x%X \n", horizEnd);
++	ErrorF("horizTotal: 0x%X \n", horizTotal);
++	ErrorF("horizBlankStart: 0x%X \n", horizBlankStart);
++	ErrorF("horizBlankEnd: 0x%X \n", horizBlankEnd);
++	ErrorF("vertDisplay: 0x%X \n", vertDisplay);
++	ErrorF("vertStart: 0x%X \n", vertStart);
++	ErrorF("vertEnd: 0x%X \n", vertEnd);
++	ErrorF("vertTotal: 0x%X \n", vertTotal);
++	ErrorF("vertBlankStart: 0x%X \n", vertBlankStart);
++	ErrorF("vertBlankEnd: 0x%X \n", vertBlankEnd);
++#endif
++
++	/*
++	* compute correct Hsync & Vsync polarity
++	*/
++	if ((mode->flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))
++		&& (mode->flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) {
++
++		regp->MiscOutReg = 0x23;
++		if (mode->flags & DRM_MODE_FLAG_NHSYNC)
++			regp->MiscOutReg |= 0x40;
++		if (mode->flags & DRM_MODE_FLAG_NVSYNC)
++			regp->MiscOutReg |= 0x80;
++	} else {
++		int vdisplay = mode->vdisplay;
++		if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++			vdisplay *= 2;
++		if (mode->vscan > 1)
++			vdisplay *= mode->vscan;
++		if (vdisplay < 400)
++			regp->MiscOutReg = 0xA3;	/* +hsync -vsync */
++		else if (vdisplay < 480)
++			regp->MiscOutReg = 0x63;	/* -hsync +vsync */
++		else if (vdisplay < 768)
++			regp->MiscOutReg = 0xE3;	/* -hsync -vsync */
++		else
++			regp->MiscOutReg = 0x23;	/* +hsync +vsync */
++	}
++
++	regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
++
++	/*
++	 * Time Sequencer
++	 */
++	regp->Sequencer[NV_VIO_SR_RESET_INDEX] = 0x00;
++	/* 0x20 disables the sequencer */
++	if (mode->flags & DRM_MODE_FLAG_CLKDIV2)
++		regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x29;
++	else
++		regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x21;
++	regp->Sequencer[NV_VIO_SR_PLANE_MASK_INDEX] = 0x0F;
++	regp->Sequencer[NV_VIO_SR_CHAR_MAP_INDEX] = 0x00;
++	regp->Sequencer[NV_VIO_SR_MEM_MODE_INDEX] = 0x0E;
++
++	/*
++	 * CRTC
++	 */
++	regp->CRTC[NV_CIO_CR_HDT_INDEX] = horizTotal;
++	regp->CRTC[NV_CIO_CR_HDE_INDEX] = horizDisplay;
++	regp->CRTC[NV_CIO_CR_HBS_INDEX] = horizBlankStart;
++	regp->CRTC[NV_CIO_CR_HBE_INDEX] = (1 << 7) |
++					  XLATE(horizBlankEnd, 0, NV_CIO_CR_HBE_4_0);
++	regp->CRTC[NV_CIO_CR_HRS_INDEX] = horizStart;
++	regp->CRTC[NV_CIO_CR_HRE_INDEX] = XLATE(horizBlankEnd, 5, NV_CIO_CR_HRE_HBE_5) |
++					  XLATE(horizEnd, 0, NV_CIO_CR_HRE_4_0);
++	regp->CRTC[NV_CIO_CR_VDT_INDEX] = vertTotal;
++	regp->CRTC[NV_CIO_CR_OVL_INDEX] = XLATE(vertStart, 9, NV_CIO_CR_OVL_VRS_9) |
++					  XLATE(vertDisplay, 9, NV_CIO_CR_OVL_VDE_9) |
++					  XLATE(vertTotal, 9, NV_CIO_CR_OVL_VDT_9) |
++					  (1 << 4) |
++					  XLATE(vertBlankStart, 8, NV_CIO_CR_OVL_VBS_8) |
++					  XLATE(vertStart, 8, NV_CIO_CR_OVL_VRS_8) |
++					  XLATE(vertDisplay, 8, NV_CIO_CR_OVL_VDE_8) |
++					  XLATE(vertTotal, 8, NV_CIO_CR_OVL_VDT_8);
++	regp->CRTC[NV_CIO_CR_RSAL_INDEX] = 0x00;
++	regp->CRTC[NV_CIO_CR_CELL_HT_INDEX] = ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? MASK(NV_CIO_CR_CELL_HT_SCANDBL) : 0) |
++					      1 << 6 |
++					      XLATE(vertBlankStart, 9, NV_CIO_CR_CELL_HT_VBS_9);
++	regp->CRTC[NV_CIO_CR_CURS_ST_INDEX] = 0x00;
++	regp->CRTC[NV_CIO_CR_CURS_END_INDEX] = 0x00;
++	regp->CRTC[NV_CIO_CR_SA_HI_INDEX] = 0x00;
++	regp->CRTC[NV_CIO_CR_SA_LO_INDEX] = 0x00;
++	regp->CRTC[NV_CIO_CR_TCOFF_HI_INDEX] = 0x00;
++	regp->CRTC[NV_CIO_CR_TCOFF_LO_INDEX] = 0x00;
++	regp->CRTC[NV_CIO_CR_VRS_INDEX] = vertStart;
++	regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
++	regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
++	/* framebuffer can be larger than crtc scanout area. */
++	regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
++	regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
++	regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
++	regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
++	regp->CRTC[NV_CIO_CR_MODE_INDEX] = 0x43;
++	regp->CRTC[NV_CIO_CR_LCOMP_INDEX] = 0xff;
++
++	/*
++	 * Some extended CRTC registers (they are not saved with the rest of the vga regs).
++	 */
++
++	/* framebuffer can be larger than crtc scanout area. */
++	regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
++	regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
++					    MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
++	regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
++					   XLATE(vertBlankStart, 10, NV_CIO_CRE_LSR_VBS_10) |
++					   XLATE(vertStart, 10, NV_CIO_CRE_LSR_VRS_10) |
++					   XLATE(vertDisplay, 10, NV_CIO_CRE_LSR_VDE_10) |
++					   XLATE(vertTotal, 10, NV_CIO_CRE_LSR_VDT_10);
++	regp->CRTC[NV_CIO_CRE_HEB__INDEX] = XLATE(horizStart, 8, NV_CIO_CRE_HEB_HRS_8) |
++					    XLATE(horizBlankStart, 8, NV_CIO_CRE_HEB_HBS_8) |
++					    XLATE(horizDisplay, 8, NV_CIO_CRE_HEB_HDE_8) |
++					    XLATE(horizTotal, 8, NV_CIO_CRE_HEB_HDT_8);
++	regp->CRTC[NV_CIO_CRE_EBR_INDEX] = XLATE(vertBlankStart, 11, NV_CIO_CRE_EBR_VBS_11) |
++					   XLATE(vertStart, 11, NV_CIO_CRE_EBR_VRS_11) |
++					   XLATE(vertDisplay, 11, NV_CIO_CRE_EBR_VDE_11) |
++					   XLATE(vertTotal, 11, NV_CIO_CRE_EBR_VDT_11);
++
++	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
++		horizTotal = (horizTotal >> 1) & ~1;
++		regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = horizTotal;
++		regp->CRTC[NV_CIO_CRE_HEB__INDEX] |= XLATE(horizTotal, 8, NV_CIO_CRE_HEB_ILC_8);
++	} else
++		regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = 0xff;  /* interlace off */
++
++	/*
++	* Graphics Display Controller
++	*/
++	regp->Graphics[NV_VIO_GX_SR_INDEX] = 0x00;
++	regp->Graphics[NV_VIO_GX_SREN_INDEX] = 0x00;
++	regp->Graphics[NV_VIO_GX_CCOMP_INDEX] = 0x00;
++	regp->Graphics[NV_VIO_GX_ROP_INDEX] = 0x00;
++	regp->Graphics[NV_VIO_GX_READ_MAP_INDEX] = 0x00;
++	regp->Graphics[NV_VIO_GX_MODE_INDEX] = 0x40; /* 256 color mode */
++	regp->Graphics[NV_VIO_GX_MISC_INDEX] = 0x05; /* map 64k mem + graphic mode */
++	regp->Graphics[NV_VIO_GX_DONT_CARE_INDEX] = 0x0F;
++	regp->Graphics[NV_VIO_GX_BIT_MASK_INDEX] = 0xFF;
++
++	regp->Attribute[0]  = 0x00; /* standard colormap translation */
++	regp->Attribute[1]  = 0x01;
++	regp->Attribute[2]  = 0x02;
++	regp->Attribute[3]  = 0x03;
++	regp->Attribute[4]  = 0x04;
++	regp->Attribute[5]  = 0x05;
++	regp->Attribute[6]  = 0x06;
++	regp->Attribute[7]  = 0x07;
++	regp->Attribute[8]  = 0x08;
++	regp->Attribute[9]  = 0x09;
++	regp->Attribute[10] = 0x0A;
++	regp->Attribute[11] = 0x0B;
++	regp->Attribute[12] = 0x0C;
++	regp->Attribute[13] = 0x0D;
++	regp->Attribute[14] = 0x0E;
++	regp->Attribute[15] = 0x0F;
++	regp->Attribute[NV_CIO_AR_MODE_INDEX] = 0x01; /* Enable graphic mode */
++	/* Non-vga */
++	regp->Attribute[NV_CIO_AR_OSCAN_INDEX] = 0x00;
++	regp->Attribute[NV_CIO_AR_PLANE_INDEX] = 0x0F; /* enable all color planes */
++	regp->Attribute[NV_CIO_AR_HPP_INDEX] = 0x00;
++	regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00;
++}
++
++/**
++ * Sets up registers for the given mode/adjusted_mode pair.
++ *
++ * The clocks, CRTCs and outputs attached to this CRTC must be off.
++ *
++ * This shouldn't enable any clocks, CRTCs, or outputs, but they should
++ * be easily turned on/off after this.
++ */
++static void
++nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
++	struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
++	struct drm_encoder *encoder;
++	bool lvds_output = false, tmds_output = false, tv_output = false,
++		off_chip_digital = false;
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++		bool digital = false;
++
++		if (encoder->crtc != crtc)
++			continue;
++
++		if (nv_encoder->dcb->type == OUTPUT_LVDS)
++			digital = lvds_output = true;
++		if (nv_encoder->dcb->type == OUTPUT_TV)
++			tv_output = true;
++		if (nv_encoder->dcb->type == OUTPUT_TMDS)
++			digital = tmds_output = true;
++		if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital)
++			off_chip_digital = true;
++	}
++
++	/* Registers not directly related to the (s)vga mode */
++
++	/* What is the meaning of this register? */
++	/* A few popular values are 0x18, 0x1c, 0x38, 0x3c */
++	regp->CRTC[NV_CIO_CRE_ENH_INDEX] = savep->CRTC[NV_CIO_CRE_ENH_INDEX] & ~(1<<5);
++
++	regp->crtc_eng_ctrl = 0;
++	/* Except for rare conditions I2C is enabled on the primary crtc */
++	if (nv_crtc->index == 0)
++		regp->crtc_eng_ctrl |= NV_CRTC_FSEL_I2C;
++#if 0
++	/* Set overlay to desired crtc. */
++	if (dev->overlayAdaptor) {
++		NVPortPrivPtr pPriv = GET_OVERLAY_PRIVATE(dev);
++		if (pPriv->overlayCRTC == nv_crtc->index)
++			regp->crtc_eng_ctrl |= NV_CRTC_FSEL_OVERLAY;
++	}
++#endif
++
++	/* ADDRESS_SPACE_PNVM is the same as setting HCUR_ASI */
++	regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
++			     NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
++			     NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
++	if (dev_priv->chipset >= 0x11)
++		regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
++	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++		regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
++
++	/* Unblock some timings */
++	regp->CRTC[NV_CIO_CRE_53] = 0;
++	regp->CRTC[NV_CIO_CRE_54] = 0;
++
++	/* 0x00 is disabled, 0x11 is lvds, 0x22 crt and 0x88 tmds */
++	if (lvds_output)
++		regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x11;
++	else if (tmds_output)
++		regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x88;
++	else
++		regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x22;
++
++	/* These values seem to vary */
++	/* This register seems to be used by the bios to make certain decisions on some G70 cards? */
++	regp->CRTC[NV_CIO_CRE_SCRATCH4__INDEX] = savep->CRTC[NV_CIO_CRE_SCRATCH4__INDEX];
++
++	nv_crtc_set_digital_vibrance(crtc, nv_crtc->saturation);
++
++	/* probably a scratch reg, but kept for cargo-cult purposes:
++	 * bit0: crtc0?, head A
++	 * bit6: lvds, head A
++	 * bit7: (only in X), head A
++	 */
++	if (nv_crtc->index == 0)
++		regp->CRTC[NV_CIO_CRE_4B] = savep->CRTC[NV_CIO_CRE_4B] | 0x80;
++
++	/* The blob seems to take the current value from crtc 0, add 4 to that
++	 * and reuse the old value for crtc 1 */
++	regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = dev_priv->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
++	if (!nv_crtc->index)
++		regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
++
++	/* the blob sometimes sets |= 0x10 (which is the same as setting |=
++	 * 1 << 30 on 0x60.830), for no apparent reason */
++	regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
++
++	regp->crtc_830 = mode->crtc_vdisplay - 3;
++	regp->crtc_834 = mode->crtc_vdisplay - 1;
++
++	if (dev_priv->card_type == NV_40)
++		/* This is what the blob does */
++		regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
++
++	if (dev_priv->card_type >= NV_30)
++		regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
++
++	regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
++
++	/* Some misc regs */
++	if (dev_priv->card_type == NV_40) {
++		regp->CRTC[NV_CIO_CRE_85] = 0xFF;
++		regp->CRTC[NV_CIO_CRE_86] = 0x1;
++	}
++
++	regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->fb->depth + 1) / 8;
++	/* Enable slaved mode (called MODE_TV in nv4ref.h) */
++	if (lvds_output || tmds_output || tv_output)
++		regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
++
++	/* Generic PRAMDAC regs */
++
++	if (dev_priv->card_type >= NV_10)
++		/* Only bit that bios and blob set. */
++		regp->nv10_cursync = (1 << 25);
++
++	regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
++				NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL |
++				NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
++	if (crtc->fb->depth == 16)
++		regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
++	if (dev_priv->chipset >= 0x11)
++		regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
++
++	regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
++	regp->tv_setup = 0;
++
++	nv_crtc_set_image_sharpening(crtc, nv_crtc->sharpness);
++
++	/* Some values the blob sets */
++	regp->ramdac_8c0 = 0x100;
++	regp->ramdac_a20 = 0x0;
++	regp->ramdac_a24 = 0xfffff;
++	regp->ramdac_a34 = 0x1;
++}
++
++/**
++ * Sets up registers for the given mode/adjusted_mode pair.
++ *
++ * The clocks, CRTCs and outputs attached to this CRTC must be off.
++ *
++ * This shouldn't enable any clocks, CRTCs, or outputs, but they should
++ * be easily turned on/off after this.
++ */
++static int
++nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
++		 struct drm_display_mode *adjusted_mode,
++		 int x, int y, struct drm_framebuffer *old_fb)
++{
++	struct drm_device *dev = crtc->dev;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	NV_DEBUG_KMS(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
++	drm_mode_debug_printmodeline(adjusted_mode);
++
++	/* unlock must come after turning off FP_TG_CONTROL in output_prepare */
++	nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
++
++	nv_crtc_mode_set_vga(crtc, adjusted_mode);
++	/* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
++	if (dev_priv->card_type == NV_40)
++		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
++	nv_crtc_mode_set_regs(crtc, adjusted_mode);
++	nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
++	return 0;
++}
++
++static void nv_crtc_save(struct drm_crtc *crtc)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
++	struct nv04_mode_state *state = &dev_priv->mode_reg;
++	struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index];
++	struct nv04_mode_state *saved = &dev_priv->saved_reg;
++	struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index];
++
++	if (nv_two_heads(crtc->dev))
++		NVSetOwner(crtc->dev, nv_crtc->index);
++
++	nouveau_hw_save_state(crtc->dev, nv_crtc->index, saved);
++
++	/* init some state to saved value */
++	state->sel_clk = saved->sel_clk & ~(0x5 << 16);
++	crtc_state->CRTC[NV_CIO_CRE_LCD__INDEX] = crtc_saved->CRTC[NV_CIO_CRE_LCD__INDEX];
++	state->pllsel = saved->pllsel & ~(PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK);
++	crtc_state->gpio_ext = crtc_saved->gpio_ext;
++}
++
++static void nv_crtc_restore(struct drm_crtc *crtc)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
++	int head = nv_crtc->index;
++	uint8_t saved_cr21 = dev_priv->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
++
++	if (nv_two_heads(crtc->dev))
++		NVSetOwner(crtc->dev, head);
++
++	nouveau_hw_load_state(crtc->dev, head, &dev_priv->saved_reg);
++	nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21);
++
++	nv_crtc->last_dpms = NV_DPMS_CLEARED;
++}
++
++static void nv_crtc_prepare(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
++
++	if (nv_two_heads(dev))
++		NVSetOwner(dev, nv_crtc->index);
++
++	funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
++
++	NVBlankScreen(dev, nv_crtc->index, true);
++
++	/* Some more preperation. */
++	NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
++	if (dev_priv->card_type == NV_40) {
++		uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
++		NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
++	}
++}
++
++static void nv_crtc_commit(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
++	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++
++	nouveau_hw_load_state(dev, nv_crtc->index, &dev_priv->mode_reg);
++	nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL);
++
++#ifdef __BIG_ENDIAN
++	/* turn on LFB swapping */
++	{
++		uint8_t tmp = NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR);
++		tmp |= MASK(NV_CIO_CRE_RCR_ENDIAN_BIG);
++		NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR, tmp);
++	}
++#endif
++
++	funcs->dpms(crtc, DRM_MODE_DPMS_ON);
++}
++
++static void nv_crtc_destroy(struct drm_crtc *crtc)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++
++	NV_DEBUG_KMS(crtc->dev, "\n");
++
++	if (!nv_crtc)
++		return;
++
++	drm_crtc_cleanup(crtc);
++
++	nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
++	kfree(nv_crtc);
++}
++
++static void
++nv_crtc_gamma_load(struct drm_crtc *crtc)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct drm_device *dev = nv_crtc->base.dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
++	int i;
++
++	rgbs = (struct rgb *)dev_priv->mode_reg.crtc_reg[nv_crtc->index].DAC;
++	for (i = 0; i < 256; i++) {
++		rgbs[i].r = nv_crtc->lut.r[i] >> 8;
++		rgbs[i].g = nv_crtc->lut.g[i] >> 8;
++		rgbs[i].b = nv_crtc->lut.b[i] >> 8;
++	}
++
++	nouveau_hw_load_state_palette(dev, nv_crtc->index, &dev_priv->mode_reg);
++}
++
++static void
++nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	int i;
++
++	if (size != 256)
++		return;
++
++	for (i = 0; i < 256; i++) {
++		nv_crtc->lut.r[i] = r[i];
++		nv_crtc->lut.g[i] = g[i];
++		nv_crtc->lut.b[i] = b[i];
++	}
++
++	/* We need to know the depth before we upload, but it's possible to
++	 * get called before a framebuffer is bound.  If this is the case,
++	 * mark the lut values as dirty by setting depth==0, and it'll be
++	 * uploaded on the first mode_set_base()
++	 */
++	if (!nv_crtc->base.fb) {
++		nv_crtc->lut.depth = 0;
++		return;
++	}
++
++	nv_crtc_gamma_load(crtc);
++}
++
++static int
++nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
++			struct drm_framebuffer *old_fb)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct drm_device *dev = crtc->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
++	struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
++	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
++	int arb_burst, arb_lwm;
++	int ret;
++
++	ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
++	if (ret)
++		return ret;
++
++	if (old_fb) {
++		struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
++		nouveau_bo_unpin(ofb->nvbo);
++	}
++
++	nv_crtc->fb.offset = fb->nvbo->bo.offset;
++
++	if (nv_crtc->lut.depth != drm_fb->depth) {
++		nv_crtc->lut.depth = drm_fb->depth;
++		nv_crtc_gamma_load(crtc);
++	}
++
++	/* Update the framebuffer format. */
++	regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
++	regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->fb->depth + 1) / 8;
++	regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
++	if (crtc->fb->depth == 16)
++		regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
++	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
++	NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
++		      regp->ramdac_gen_ctrl);
++
++	regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
++	regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
++		XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
++	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
++	crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
++
++	/* Update the framebuffer location. */
++	regp->fb_start = nv_crtc->fb.offset & ~3;
++	regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
++	NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start);
++
++	/* Update the arbitration parameters. */
++	nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
++			 &arb_burst, &arb_lwm);
++
++	regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst;
++	regp->CRTC[NV_CIO_CRE_FFLWM__INDEX] = arb_lwm & 0xff;
++	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
++	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
++
++	if (dev_priv->card_type >= NV_30) {
++		regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
++		crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
++	}
++
++	return 0;
++}
++
++static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
++			       struct nouveau_bo *dst)
++{
++	int width = nv_cursor_width(dev);
++	uint32_t pixel;
++	int i, j;
++
++	for (i = 0; i < width; i++) {
++		for (j = 0; j < width; j++) {
++			pixel = nouveau_bo_rd32(src, i*64 + j);
++
++			nouveau_bo_wr16(dst, i*width + j, (pixel & 0x80000000) >> 16
++				     | (pixel & 0xf80000) >> 9
++				     | (pixel & 0xf800) >> 6
++				     | (pixel & 0xf8) >> 3);
++		}
++	}
++}
++
++static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
++			       struct nouveau_bo *dst)
++{
++	uint32_t pixel;
++	int alpha, i;
++
++	/* nv11+ supports premultiplied (PM), or non-premultiplied (NPM) alpha
++	 * cursors (though NPM in combination with fp dithering may not work on
++	 * nv11, from "nv" driver history)
++	 * NPM mode needs NV_PCRTC_CURSOR_CONFIG_ALPHA_BLEND set and is what the
++	 * blob uses, however we get given PM cursors so we use PM mode
++	 */
++	for (i = 0; i < 64 * 64; i++) {
++		pixel = nouveau_bo_rd32(src, i);
++
++		/* hw gets unhappy if alpha <= rgb values.  for a PM image "less
++		 * than" shouldn't happen; fix "equal to" case by adding one to
++		 * alpha channel (slightly inaccurate, but so is attempting to
++		 * get back to NPM images, due to limits of integer precision)
++		 */
++		alpha = pixel >> 24;
++		if (alpha > 0 && alpha < 255)
++			pixel = (pixel & 0x00ffffff) | ((alpha + 1) << 24);
++
++#ifdef __BIG_ENDIAN
++		{
++			struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++			if (dev_priv->chipset == 0x11) {
++				pixel = ((pixel & 0x000000ff) << 24) |
++					((pixel & 0x0000ff00) << 8) |
++					((pixel & 0x00ff0000) >> 8) |
++					((pixel & 0xff000000) >> 24);
++			}
++		}
++#endif
++
++		nouveau_bo_wr32(dst, i, pixel);
++	}
++}
++
++static int
++nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
++		     uint32_t buffer_handle, uint32_t width, uint32_t height)
++{
++	struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
++	struct drm_device *dev = dev_priv->dev;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct nouveau_bo *cursor = NULL;
++	struct drm_gem_object *gem;
++	int ret = 0;
++
++	if (width != 64 || height != 64)
++		return -EINVAL;
++
++	if (!buffer_handle) {
++		nv_crtc->cursor.hide(nv_crtc, true);
++		return 0;
++	}
++
++	gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
++	if (!gem)
++		return -EINVAL;
++	cursor = nouveau_gem_object(gem);
++
++	ret = nouveau_bo_map(cursor);
++	if (ret)
++		goto out;
++
++	if (dev_priv->chipset >= 0x11)
++		nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
++	else
++		nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
++
++	nouveau_bo_unmap(cursor);
++	nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->bo.offset;
++	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
++	nv_crtc->cursor.show(nv_crtc, true);
++out:
++	mutex_lock(&dev->struct_mutex);
++	drm_gem_object_unreference(gem);
++	mutex_unlock(&dev->struct_mutex);
++	return ret;
++}
++
++static int
++nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++
++	nv_crtc->cursor.set_pos(nv_crtc, x, y);
++	return 0;
++}
++
++static const struct drm_crtc_funcs nv04_crtc_funcs = {
++	.save = nv_crtc_save,
++	.restore = nv_crtc_restore,
++	.cursor_set = nv04_crtc_cursor_set,
++	.cursor_move = nv04_crtc_cursor_move,
++	.gamma_set = nv_crtc_gamma_set,
++	.set_config = drm_crtc_helper_set_config,
++	.destroy = nv_crtc_destroy,
++};
++
++static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
++	.dpms = nv_crtc_dpms,
++	.prepare = nv_crtc_prepare,
++	.commit = nv_crtc_commit,
++	.mode_fixup = nv_crtc_mode_fixup,
++	.mode_set = nv_crtc_mode_set,
++	.mode_set_base = nv04_crtc_mode_set_base,
++	.load_lut = nv_crtc_gamma_load,
++};
++
++int
++nv04_crtc_create(struct drm_device *dev, int crtc_num)
++{
++	struct nouveau_crtc *nv_crtc;
++	int ret, i;
++
++	nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
++	if (!nv_crtc)
++		return -ENOMEM;
++
++	for (i = 0; i < 256; i++) {
++		nv_crtc->lut.r[i] = i << 8;
++		nv_crtc->lut.g[i] = i << 8;
++		nv_crtc->lut.b[i] = i << 8;
++	}
++	nv_crtc->lut.depth = 0;
++
++	nv_crtc->index = crtc_num;
++	nv_crtc->last_dpms = NV_DPMS_CLEARED;
++
++	drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
++	drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
++	drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
++
++	ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
++			     0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
++	if (!ret) {
++		ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
++		if (!ret)
++			ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
++		if (ret)
++			nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
++	}
++
++	nv04_cursor_init(nv_crtc);
++
++	return 0;
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
+new file mode 100644
+index 0000000..89a91b9
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
+@@ -0,0 +1,70 @@
++#include "drmP.h"
++#include "drm_mode.h"
++#include "nouveau_reg.h"
++#include "nouveau_drv.h"
++#include "nouveau_crtc.h"
++#include "nouveau_hw.h"
++
++static void
++nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
++{
++	nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, true);
++}
++
++static void
++nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
++{
++	nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, false);
++}
++
++static void
++nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
++{
++	NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
++		      NV_PRAMDAC_CU_START_POS,
++		      XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
++		      XLATE(x, 0, NV_PRAMDAC_CU_START_POS_X));
++}
++
++static void
++crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
++{
++	NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
++		       crtcstate->CRTC[index]);
++}
++
++static void
++nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
++{
++	struct drm_device *dev = nv_crtc->base.dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
++	struct drm_crtc *crtc = &nv_crtc->base;
++
++	regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] =
++		MASK(NV_CIO_CRE_HCUR_ASI) |
++		XLATE(offset, 17, NV_CIO_CRE_HCUR_ADDR0_ADR);
++	regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] =
++		XLATE(offset, 11, NV_CIO_CRE_HCUR_ADDR1_ADR);
++	if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
++		regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] |=
++			MASK(NV_CIO_CRE_HCUR_ADDR1_CUR_DBL);
++	regp->CRTC[NV_CIO_CRE_HCUR_ADDR2_INDEX] = offset >> 24;
++
++	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
++	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
++	crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
++	if (dev_priv->card_type == NV_40)
++		nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
++}
++
++int
++nv04_cursor_init(struct nouveau_crtc *crtc)
++{
++	crtc->cursor.set_offset = nv04_cursor_set_offset;
++	crtc->cursor.set_pos = nv04_cursor_set_pos;
++	crtc->cursor.hide = nv04_cursor_hide;
++	crtc->cursor.show = nv04_cursor_show;
++	return 0;
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
+new file mode 100644
+index 0000000..1d73b15
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_dac.c
+@@ -0,0 +1,531 @@
++/*
++ * Copyright 2003 NVIDIA, Corporation
++ * Copyright 2006 Dave Airlie
++ * Copyright 2007 Maarten Maathuis
++ * Copyright 2007-2009 Stuart Bennett
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm_crtc_helper.h"
++
++#include "nouveau_drv.h"
++#include "nouveau_encoder.h"
++#include "nouveau_connector.h"
++#include "nouveau_crtc.h"
++#include "nouveau_hw.h"
++#include "nvreg.h"
++
++int nv04_dac_output_offset(struct drm_encoder *encoder)
++{
++	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
++	int offset = 0;
++
++	if (dcb->or & (8 | OUTPUT_C))
++		offset += 0x68;
++	if (dcb->or & (8 | OUTPUT_B))
++		offset += 0x2000;
++
++	return offset;
++}
++
++/*
++ * arbitrary limit to number of sense oscillations tolerated in one sample
++ * period (observed to be at least 13 in "nvidia")
++ */
++#define MAX_HBLANK_OSC 20
++
++/*
++ * arbitrary limit to number of conflicting sample pairs to tolerate at a
++ * voltage step (observed to be at least 5 in "nvidia")
++ */
++#define MAX_SAMPLE_PAIRS 10
++
++static int sample_load_twice(struct drm_device *dev, bool sense[2])
++{
++	int i;
++
++	for (i = 0; i < 2; i++) {
++		bool sense_a, sense_b, sense_b_prime;
++		int j = 0;
++
++		/*
++		 * wait for bit 0 clear -- out of hblank -- (say reg value 0x4),
++		 * then wait for transition 0x4->0x5->0x4: enter hblank, leave
++		 * hblank again
++		 * use a 10ms timeout (guards against crtc being inactive, in
++		 * which case blank state would never change)
++		 */
++		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
++					0x00000001, 0x00000000))
++			return -EBUSY;
++		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
++					0x00000001, 0x00000001))
++			return -EBUSY;
++		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
++					0x00000001, 0x00000000))
++			return -EBUSY;
++
++		udelay(100);
++		/* when level triggers, sense is _LO_ */
++		sense_a = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
++
++		/* take another reading until it agrees with sense_a... */
++		do {
++			udelay(100);
++			sense_b = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
++			if (sense_a != sense_b) {
++				sense_b_prime =
++					nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
++				if (sense_b == sense_b_prime) {
++					/* ... unless two consecutive subsequent
++					 * samples agree; sense_a is replaced */
++					sense_a = sense_b;
++					/* force mis-match so we loop */
++					sense_b = !sense_a;
++				}
++			}
++		} while ((sense_a != sense_b) && ++j < MAX_HBLANK_OSC);
++
++		if (j == MAX_HBLANK_OSC)
++			/* with so much oscillation, default to sense:LO */
++			sense[i] = false;
++		else
++			sense[i] = sense_a;
++	}
++
++	return 0;
++}
++
++static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
++						 struct drm_connector *connector)
++{
++	struct drm_device *dev = encoder->dev;
++	uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
++	uint8_t saved_palette0[3], saved_palette_mask;
++	uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
++	int i;
++	uint8_t blue;
++	bool sense = true;
++
++	/*
++	 * for this detection to work, there needs to be a mode set up on the
++	 * CRTC.  this is presumed to be the case
++	 */
++
++	if (nv_two_heads(dev))
++		/* only implemented for head A for now */
++		NVSetOwner(dev, 0);
++
++	saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
++	NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
++
++	saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
++	NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
++
++	saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL);
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL,
++		      saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
++
++	msleep(10);
++
++	saved_pi = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX);
++	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX,
++		       saved_pi & ~(0x80 | MASK(NV_CIO_CRE_PIXEL_FORMAT)));
++	saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
++	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
++
++	nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
++	for (i = 0; i < 3; i++)
++		saved_palette0[i] = nv_rd08(dev, NV_PRMDIO_PALETTE_DATA);
++	saved_palette_mask = nv_rd08(dev, NV_PRMDIO_PIXEL_MASK);
++	nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, 0);
++
++	saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
++		      (saved_rgen_ctrl & ~(NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
++					   NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM)) |
++		      NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON);
++
++	blue = 8;	/* start of test range */
++
++	do {
++		bool sense_pair[2];
++
++		nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
++		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
++		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
++		/* testing blue won't find monochrome monitors.  I don't care */
++		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, blue);
++
++		i = 0;
++		/* take sample pairs until both samples in the pair agree */
++		do {
++			if (sample_load_twice(dev, sense_pair))
++				goto out;
++		} while ((sense_pair[0] != sense_pair[1]) &&
++							++i < MAX_SAMPLE_PAIRS);
++
++		if (i == MAX_SAMPLE_PAIRS)
++			/* too much oscillation defaults to LO */
++			sense = false;
++		else
++			sense = sense_pair[0];
++
++	/*
++	 * if sense goes LO before blue ramps to 0x18, monitor is not connected.
++	 * ergo, if blue gets to 0x18, monitor must be connected
++	 */
++	} while (++blue < 0x18 && sense);
++
++out:
++	nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
++	nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
++	for (i = 0; i < 3; i++)
++		nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
++	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
++	NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
++	NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
++	NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
++
++	if (blue == 0x18) {
++		NV_INFO(dev, "Load detected on head A\n");
++		return connector_status_connected;
++	}
++
++	return connector_status_disconnected;
++}
++
++uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
++	uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
++	uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
++		saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
++	int head;
++
++#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
++	if (dcb->type == OUTPUT_TV) {
++		testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
++
++		if (dev_priv->vbios->tvdactestval)
++			testval = dev_priv->vbios->tvdactestval;
++	} else {
++		testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
++
++		if (dev_priv->vbios->dactestval)
++			testval = dev_priv->vbios->dactestval;
++	}
++
++	saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
++		      saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
++
++	saved_powerctrl_2 = nvReadMC(dev, NV_PBUS_POWERCTRL_2);
++
++	nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
++	if (regoffset == 0x68) {
++		saved_powerctrl_4 = nvReadMC(dev, NV_PBUS_POWERCTRL_4);
++		nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
++	}
++
++	saved_gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
++	saved_gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
++
++	nv17_gpio_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
++	nv17_gpio_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
++
++	msleep(4);
++
++	saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
++	head = (saved_routput & 0x100) >> 8;
++#if 0
++	/* if there's a spare crtc, using it will minimise flicker for the case
++	 * where the in-use crtc is in use by an off-chip tmds encoder */
++	if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled)
++		head ^= 1;
++#endif
++	/* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
++	routput = (saved_routput & 0xfffffece) | head << 8;
++
++	if (dev_priv->card_type >= NV_40) {
++		if (dcb->type == OUTPUT_TV)
++			routput |= 0x1a << 16;
++		else
++			routput &= ~(0x1a << 16);
++	}
++
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, routput);
++	msleep(1);
++
++	temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, temp | 1);
++
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA,
++		      NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK | testval);
++	temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
++		      temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
++	msleep(5);
++
++	sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
++
++	temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
++		      temp & ~NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0);
++
++	/* bios does something more complex for restoring, but I think this is good enough */
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
++	if (regoffset == 0x68)
++		nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
++	nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
++
++	nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
++	nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
++
++	return sample;
++}
++
++static enum drm_connector_status
++nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
++{
++	struct drm_device *dev = encoder->dev;
++	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
++	uint32_t sample = nv17_dac_sample_load(encoder);
++
++	if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
++		NV_INFO(dev, "Load detected on output %c\n",
++			'@' + ffs(dcb->or));
++		return connector_status_connected;
++	} else {
++		return connector_status_disconnected;
++	}
++}
++
++static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
++				struct drm_display_mode *mode,
++				struct drm_display_mode *adjusted_mode)
++{
++	return true;
++}
++
++static void nv04_dac_prepare(struct drm_encoder *encoder)
++{
++	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int head = nouveau_crtc(encoder->crtc)->index;
++	struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
++
++	helper->dpms(encoder, DRM_MODE_DPMS_OFF);
++
++	nv04_dfp_disable(dev, head);
++
++	/* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
++	 * at LCD__INDEX which we don't alter
++	 */
++	if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44))
++		crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
++}
++
++
++static void nv04_dac_mode_set(struct drm_encoder *encoder,
++			      struct drm_display_mode *mode,
++			      struct drm_display_mode *adjusted_mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int head = nouveau_crtc(encoder->crtc)->index;
++
++	if (nv_gf4_disp_arch(dev)) {
++		struct drm_encoder *rebind;
++		uint32_t dac_offset = nv04_dac_output_offset(encoder);
++		uint32_t otherdac;
++
++		/* bit 16-19 are bits that are set on some G70 cards,
++		 * but don't seem to have much effect */
++		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
++			      head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK);
++		/* force any other vga encoders to bind to the other crtc */
++		list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) {
++			if (rebind == encoder
++			    || nouveau_encoder(rebind)->dcb->type != OUTPUT_ANALOG)
++				continue;
++
++			dac_offset = nv04_dac_output_offset(rebind);
++			otherdac = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset);
++			NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
++				      (otherdac & ~0x0100) | (head ^ 1) << 8);
++		}
++	}
++
++	/* This could use refinement for flatpanels, but it should work this way */
++	if (dev_priv->chipset < 0x44)
++		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
++	else
++		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
++}
++
++static void nv04_dac_commit(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
++	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
++
++	helper->dpms(encoder, DRM_MODE_DPMS_ON);
++
++	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
++		drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
++		nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
++}
++
++void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
++
++	if (nv_gf4_disp_arch(dev)) {
++		uint32_t *dac_users = &dev_priv->dac_users[ffs(dcb->or) - 1];
++		int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder);
++		uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off);
++
++		if (enable) {
++			*dac_users |= 1 << dcb->index;
++			NVWriteRAMDAC(dev, 0, dacclk_off, dacclk | NV_PRAMDAC_DACCLK_SEL_DACCLK);
++
++		} else {
++			*dac_users &= ~(1 << dcb->index);
++			if (!*dac_users)
++				NVWriteRAMDAC(dev, 0, dacclk_off,
++					dacclk & ~NV_PRAMDAC_DACCLK_SEL_DACCLK);
++		}
++	}
++}
++
++static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++
++	if (nv_encoder->last_dpms == mode)
++		return;
++	nv_encoder->last_dpms = mode;
++
++	NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n",
++		     mode, nv_encoder->dcb->index);
++
++	nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
++}
++
++static void nv04_dac_save(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++
++	if (nv_gf4_disp_arch(dev))
++		nv_encoder->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
++							  nv04_dac_output_offset(encoder));
++}
++
++static void nv04_dac_restore(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++
++	if (nv_gf4_disp_arch(dev))
++		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder),
++			      nv_encoder->restore.output);
++
++	nv_encoder->last_dpms = NV_DPMS_CLEARED;
++}
++
++static void nv04_dac_destroy(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++
++	NV_DEBUG_KMS(encoder->dev, "\n");
++
++	drm_encoder_cleanup(encoder);
++	kfree(nv_encoder);
++}
++
++static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
++	.dpms = nv04_dac_dpms,
++	.save = nv04_dac_save,
++	.restore = nv04_dac_restore,
++	.mode_fixup = nv04_dac_mode_fixup,
++	.prepare = nv04_dac_prepare,
++	.commit = nv04_dac_commit,
++	.mode_set = nv04_dac_mode_set,
++	.detect = nv04_dac_detect
++};
++
++static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = {
++	.dpms = nv04_dac_dpms,
++	.save = nv04_dac_save,
++	.restore = nv04_dac_restore,
++	.mode_fixup = nv04_dac_mode_fixup,
++	.prepare = nv04_dac_prepare,
++	.commit = nv04_dac_commit,
++	.mode_set = nv04_dac_mode_set,
++	.detect = nv17_dac_detect
++};
++
++static const struct drm_encoder_funcs nv04_dac_funcs = {
++	.destroy = nv04_dac_destroy,
++};
++
++int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry)
++{
++	const struct drm_encoder_helper_funcs *helper;
++	struct drm_encoder *encoder;
++	struct nouveau_encoder *nv_encoder = NULL;
++
++	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
++	if (!nv_encoder)
++		return -ENOMEM;
++
++	encoder = to_drm_encoder(nv_encoder);
++
++	nv_encoder->dcb = entry;
++	nv_encoder->or = ffs(entry->or) - 1;
++
++	if (nv_gf4_disp_arch(dev))
++		helper = &nv17_dac_helper_funcs;
++	else
++		helper = &nv04_dac_helper_funcs;
++
++	drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC);
++	drm_encoder_helper_add(encoder, helper);
++
++	encoder->possible_crtcs = entry->heads;
++	encoder->possible_clones = 0;
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
+new file mode 100644
+index 0000000..483f875
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
+@@ -0,0 +1,623 @@
++/*
++ * Copyright 2003 NVIDIA, Corporation
++ * Copyright 2006 Dave Airlie
++ * Copyright 2007 Maarten Maathuis
++ * Copyright 2007-2009 Stuart Bennett
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm_crtc_helper.h"
++
++#include "nouveau_drv.h"
++#include "nouveau_encoder.h"
++#include "nouveau_connector.h"
++#include "nouveau_crtc.h"
++#include "nouveau_hw.h"
++#include "nvreg.h"
++
++#define FP_TG_CONTROL_ON  (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |	\
++			   NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |		\
++			   NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
++#define FP_TG_CONTROL_OFF (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE |	\
++			   NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE |	\
++			   NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE)
++
++static inline bool is_fpc_off(uint32_t fpc)
++{
++	return ((fpc & (FP_TG_CONTROL_ON | FP_TG_CONTROL_OFF)) ==
++			FP_TG_CONTROL_OFF);
++}
++
++int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent)
++{
++	/* special case of nv_read_tmds to find crtc associated with an output.
++	 * this does not give a correct answer for off-chip dvi, but there's no
++	 * use for such an answer anyway
++	 */
++	int ramdac = (dcbent->or & OUTPUT_C) >> 2;
++
++	NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL,
++	NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4);
++	return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac;
++}
++
++void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
++			int head, bool dl)
++{
++	/* The BIOS scripts don't do this for us, sadly
++	 * Luckily we do know the values ;-)
++	 *
++	 * head < 0 indicates we wish to force a setting with the overrideval
++	 * (for VT restore etc.)
++	 */
++
++	int ramdac = (dcbent->or & OUTPUT_C) >> 2;
++	uint8_t tmds04 = 0x80;
++
++	if (head != ramdac)
++		tmds04 = 0x88;
++
++	if (dcbent->type == OUTPUT_LVDS)
++		tmds04 |= 0x01;
++
++	nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04);
++
++	if (dl)	/* dual link */
++		nv_write_tmds(dev, dcbent->or, 1, 0x04, tmds04 ^ 0x08);
++}
++
++void nv04_dfp_disable(struct drm_device *dev, int head)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
++
++	if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) &
++	    FP_TG_CONTROL_ON) {
++		/* digital remnants must be cleaned before new crtc
++		 * values programmed.  delay is time for the vga stuff
++		 * to realise it's in control again
++		 */
++		NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
++			      FP_TG_CONTROL_OFF);
++		msleep(50);
++	}
++	/* don't inadvertently turn it on when state written later */
++	crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
++}
++
++void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_crtc *crtc;
++	struct nouveau_crtc *nv_crtc;
++	uint32_t *fpc;
++
++	if (mode == DRM_MODE_DPMS_ON) {
++		nv_crtc = nouveau_crtc(encoder->crtc);
++		fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
++
++		if (is_fpc_off(*fpc)) {
++			/* using saved value is ok, as (is_digital && dpms_on &&
++			 * fp_control==OFF) is (at present) *only* true when
++			 * fpc's most recent change was by below "off" code
++			 */
++			*fpc = nv_crtc->dpms_saved_fp_control;
++		}
++
++		nv_crtc->fp_users |= 1 << nouveau_encoder(encoder)->dcb->index;
++		NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_FP_TG_CONTROL, *fpc);
++	} else {
++		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++			nv_crtc = nouveau_crtc(crtc);
++			fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
++
++			nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index);
++			if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) {
++				nv_crtc->dpms_saved_fp_control = *fpc;
++				/* cut the FP output */
++				*fpc &= ~FP_TG_CONTROL_ON;
++				*fpc |= FP_TG_CONTROL_OFF;
++				NVWriteRAMDAC(dev, nv_crtc->index,
++					      NV_PRAMDAC_FP_TG_CONTROL, *fpc);
++			}
++		}
++	}
++}
++
++static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
++				struct drm_display_mode *mode,
++				struct drm_display_mode *adjusted_mode)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
++
++	/* For internal panels and gpu scaling on DVI we need the native mode */
++	if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
++		if (!nv_connector->native_mode)
++			return false;
++		nv_encoder->mode = *nv_connector->native_mode;
++		adjusted_mode->clock = nv_connector->native_mode->clock;
++	} else {
++		nv_encoder->mode = *adjusted_mode;
++	}
++
++	return true;
++}
++
++static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
++				     struct nouveau_encoder *nv_encoder, int head)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv04_mode_state *state = &dev_priv->mode_reg;
++	uint32_t bits1618 = nv_encoder->dcb->or & OUTPUT_A ? 0x10000 : 0x40000;
++
++	if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP)
++		return;
++
++	/* SEL_CLK is only used on the primary ramdac
++	 * It toggles spread spectrum PLL output and sets the bindings of PLLs
++	 * to heads on digital outputs
++	 */
++	if (head)
++		state->sel_clk |= bits1618;
++	else
++		state->sel_clk &= ~bits1618;
++
++	/* nv30:
++	 *	bit 0		NVClk spread spectrum on/off
++	 *	bit 2		MemClk spread spectrum on/off
++	 * 	bit 4		PixClk1 spread spectrum on/off toggle
++	 * 	bit 6		PixClk2 spread spectrum on/off toggle
++	 *
++	 * nv40 (observations from bios behaviour and mmio traces):
++	 * 	bits 4&6	as for nv30
++	 * 	bits 5&7	head dependent as for bits 4&6, but do not appear with 4&6;
++	 * 			maybe a different spread mode
++	 * 	bits 8&10	seen on dual-link dvi outputs, purpose unknown (set by POST scripts)
++	 * 	The logic behind turning spread spectrum on/off in the first place,
++	 * 	and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table
++	 * 	entry has the necessary info)
++	 */
++	if (nv_encoder->dcb->type == OUTPUT_LVDS && dev_priv->saved_reg.sel_clk & 0xf0) {
++		int shift = (dev_priv->saved_reg.sel_clk & 0x50) ? 0 : 1;
++
++		state->sel_clk &= ~0xf0;
++		state->sel_clk |= (head ? 0x40 : 0x10) << shift;
++	}
++}
++
++static void nv04_dfp_prepare(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int head = nouveau_crtc(encoder->crtc)->index;
++	struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
++	uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX];
++	uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX];
++
++	helper->dpms(encoder, DRM_MODE_DPMS_OFF);
++
++	nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
++
++	/* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
++	 * at LCD__INDEX which we don't alter
++	 */
++	if (!(*cr_lcd & 0x44)) {
++		*cr_lcd = 0x3;
++
++		if (nv_two_heads(dev)) {
++			if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
++				*cr_lcd |= head ? 0x0 : 0x8;
++			else {
++				*cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
++				if (nv_encoder->dcb->type == OUTPUT_LVDS)
++					*cr_lcd |= 0x30;
++				if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
++					/* avoid being connected to both crtcs */
++					*cr_lcd_oth &= ~0x30;
++					NVWriteVgaCrtc(dev, head ^ 1,
++						       NV_CIO_CRE_LCD__INDEX,
++						       *cr_lcd_oth);
++				}
++			}
++		}
++	}
++}
++
++
++static void nv04_dfp_mode_set(struct drm_encoder *encoder,
++			      struct drm_display_mode *mode,
++			      struct drm_display_mode *adjusted_mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
++	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
++	struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
++	struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_display_mode *output_mode = &nv_encoder->mode;
++	uint32_t mode_ratio, panel_ratio;
++
++	NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
++	drm_mode_debug_printmodeline(output_mode);
++
++	/* Initialize the FP registers in this CRTC. */
++	regp->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
++	regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
++	if (!nv_gf4_disp_arch(dev) ||
++	    (output_mode->hsync_start - output_mode->hdisplay) >=
++					dev_priv->vbios->digital_min_front_porch)
++		regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
++	else
++		regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1;
++	regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
++	regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
++	regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
++	regp->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - 1;
++
++	regp->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
++	regp->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
++	regp->fp_vert_regs[FP_CRTC] = output_mode->vtotal - 5 - 1;
++	regp->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1;
++	regp->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
++	regp->fp_vert_regs[FP_VALID_START] = 0;
++	regp->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - 1;
++
++	/* bit26: a bit seen on some g7x, no as yet discernable purpose */
++	regp->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
++			   (savep->fp_control & (1 << 26 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG));
++	/* Deal with vsync/hsync polarity */
++	/* LVDS screens do set this, but modes with +ve syncs are very rare */
++	if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
++		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
++	if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
++		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
++	/* panel scaling first, as native would get set otherwise */
++	if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
++	    nv_connector->scaling_mode == DRM_MODE_SCALE_CENTER)	/* panel handles it */
++		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER;
++	else if (adjusted_mode->hdisplay == output_mode->hdisplay &&
++		 adjusted_mode->vdisplay == output_mode->vdisplay) /* native mode */
++		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
++	else /* gpu needs to scale */
++		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
++	if (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
++		regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
++	if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
++	    output_mode->clock > 165000)
++		regp->fp_control |= (2 << 24);
++	if (nv_encoder->dcb->type == OUTPUT_LVDS) {
++		bool duallink, dummy;
++
++		nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
++					      clock, &duallink, &dummy);
++		if (duallink)
++			regp->fp_control |= (8 << 28);
++	} else
++	if (output_mode->clock > 165000)
++		regp->fp_control |= (8 << 28);
++
++	regp->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
++			   NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
++			   NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
++			   NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
++			   NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
++			   NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
++			   NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
++
++	/* We want automatic scaling */
++	regp->fp_debug_1 = 0;
++	/* This can override HTOTAL and VTOTAL */
++	regp->fp_debug_2 = 0;
++
++	/* Use 20.12 fixed point format to avoid floats */
++	mode_ratio = (1 << 12) * adjusted_mode->hdisplay / adjusted_mode->vdisplay;
++	panel_ratio = (1 << 12) * output_mode->hdisplay / output_mode->vdisplay;
++	/* if ratios are equal, SCALE_ASPECT will automatically (and correctly)
++	 * get treated the same as SCALE_FULLSCREEN */
++	if (nv_connector->scaling_mode == DRM_MODE_SCALE_ASPECT &&
++	    mode_ratio != panel_ratio) {
++		uint32_t diff, scale;
++		bool divide_by_2 = nv_gf4_disp_arch(dev);
++
++		if (mode_ratio < panel_ratio) {
++			/* vertical needs to expand to glass size (automatic)
++			 * horizontal needs to be scaled at vertical scale factor
++			 * to maintain aspect */
++
++			scale = (1 << 12) * adjusted_mode->vdisplay / output_mode->vdisplay;
++			regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
++					   XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
++
++			/* restrict area of screen used, horizontally */
++			diff = output_mode->hdisplay -
++			       output_mode->vdisplay * mode_ratio / (1 << 12);
++			regp->fp_horiz_regs[FP_VALID_START] += diff / 2;
++			regp->fp_horiz_regs[FP_VALID_END] -= diff / 2;
++		}
++
++		if (mode_ratio > panel_ratio) {
++			/* horizontal needs to expand to glass size (automatic)
++			 * vertical needs to be scaled at horizontal scale factor
++			 * to maintain aspect */
++
++			scale = (1 << 12) * adjusted_mode->hdisplay / output_mode->hdisplay;
++			regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
++					   XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE);
++
++			/* restrict area of screen used, vertically */
++			diff = output_mode->vdisplay -
++			       (1 << 12) * output_mode->hdisplay / mode_ratio;
++			regp->fp_vert_regs[FP_VALID_START] += diff / 2;
++			regp->fp_vert_regs[FP_VALID_END] -= diff / 2;
++		}
++	}
++
++	/* Output property. */
++	if (nv_connector->use_dithering) {
++		if (dev_priv->chipset == 0x11)
++			regp->dither = savep->dither | 0x00010000;
++		else {
++			int i;
++			regp->dither = savep->dither | 0x00000001;
++			for (i = 0; i < 3; i++) {
++				regp->dither_regs[i] = 0xe4e4e4e4;
++				regp->dither_regs[i + 3] = 0x44444444;
++			}
++		}
++	} else {
++		if (dev_priv->chipset != 0x11) {
++			/* reset them */
++			int i;
++			for (i = 0; i < 3; i++) {
++				regp->dither_regs[i] = savep->dither_regs[i];
++				regp->dither_regs[i + 3] = savep->dither_regs[i + 3];
++			}
++		}
++		regp->dither = savep->dither;
++	}
++
++	regp->fp_margin_color = 0;
++}
++
++static void nv04_dfp_commit(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct dcb_entry *dcbe = nv_encoder->dcb;
++	int head = nouveau_crtc(encoder->crtc)->index;
++
++	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
++		drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
++		nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
++
++	if (dcbe->type == OUTPUT_TMDS)
++		run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
++	else if (dcbe->type == OUTPUT_LVDS)
++		call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock);
++
++	/* update fp_control state for any changes made by scripts,
++	 * so correct value is written at DPMS on */
++	dev_priv->mode_reg.crtc_reg[head].fp_control =
++		NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
++
++	/* This could use refinement for flatpanels, but it should work this way */
++	if (dev_priv->chipset < 0x44)
++		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
++	else
++		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
++
++	helper->dpms(encoder, DRM_MODE_DPMS_ON);
++
++	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
++		drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
++		nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
++}
++
++static inline bool is_powersaving_dpms(int mode)
++{
++	return (mode != DRM_MODE_DPMS_ON);
++}
++
++static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_crtc *crtc = encoder->crtc;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms);
++
++	if (nv_encoder->last_dpms == mode)
++		return;
++	nv_encoder->last_dpms = mode;
++
++	NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n",
++		     mode, nv_encoder->dcb->index);
++
++	if (was_powersaving && is_powersaving_dpms(mode))
++		return;
++
++	if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
++		struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
++
++		/* when removing an output, crtc may not be set, but PANEL_OFF
++		 * must still be run
++		 */
++		int head = crtc ? nouveau_crtc(crtc)->index :
++			   nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
++
++		if (mode == DRM_MODE_DPMS_ON) {
++			if (!nv_connector->native_mode) {
++				NV_ERROR(dev, "Not turning on LVDS without native mode\n");
++				return;
++			}
++			call_lvds_script(dev, nv_encoder->dcb, head,
++					 LVDS_PANEL_ON, nv_connector->native_mode->clock);
++		} else
++			/* pxclk of 0 is fine for PANEL_OFF, and for a
++			 * disconnected LVDS encoder there is no native_mode
++			 */
++			call_lvds_script(dev, nv_encoder->dcb, head,
++					 LVDS_PANEL_OFF, 0);
++	}
++
++	nv04_dfp_update_fp_control(encoder, mode);
++
++	if (mode == DRM_MODE_DPMS_ON)
++		nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index);
++	else {
++		dev_priv->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
++		dev_priv->mode_reg.sel_clk &= ~0xf0;
++	}
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
++}
++
++static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++
++	if (nv_encoder->last_dpms == mode)
++		return;
++	nv_encoder->last_dpms = mode;
++
++	NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
++		     mode, nv_encoder->dcb->index);
++
++	nv04_dfp_update_fp_control(encoder, mode);
++}
++
++static void nv04_dfp_save(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++
++	if (nv_two_heads(dev))
++		nv_encoder->restore.head =
++			nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
++}
++
++static void nv04_dfp_restore(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int head = nv_encoder->restore.head;
++
++	if (nv_encoder->dcb->type == OUTPUT_LVDS) {
++		struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
++		if (native_mode)
++			call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
++					 native_mode->clock);
++		else
++			NV_ERROR(dev, "Not restoring LVDS without native mode\n");
++
++	} else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
++		int clock = nouveau_hw_pllvals_to_clk
++					(&dev_priv->saved_reg.crtc_reg[head].pllvals);
++
++		run_tmds_table(dev, nv_encoder->dcb, head, clock);
++	}
++
++	nv_encoder->last_dpms = NV_DPMS_CLEARED;
++}
++
++static void nv04_dfp_destroy(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++
++	NV_DEBUG_KMS(encoder->dev, "\n");
++
++	drm_encoder_cleanup(encoder);
++	kfree(nv_encoder);
++}
++
++static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
++	.dpms = nv04_lvds_dpms,
++	.save = nv04_dfp_save,
++	.restore = nv04_dfp_restore,
++	.mode_fixup = nv04_dfp_mode_fixup,
++	.prepare = nv04_dfp_prepare,
++	.commit = nv04_dfp_commit,
++	.mode_set = nv04_dfp_mode_set,
++	.detect = NULL,
++};
++
++static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = {
++	.dpms = nv04_tmds_dpms,
++	.save = nv04_dfp_save,
++	.restore = nv04_dfp_restore,
++	.mode_fixup = nv04_dfp_mode_fixup,
++	.prepare = nv04_dfp_prepare,
++	.commit = nv04_dfp_commit,
++	.mode_set = nv04_dfp_mode_set,
++	.detect = NULL,
++};
++
++static const struct drm_encoder_funcs nv04_dfp_funcs = {
++	.destroy = nv04_dfp_destroy,
++};
++
++int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry)
++{
++	const struct drm_encoder_helper_funcs *helper;
++	struct drm_encoder *encoder;
++	struct nouveau_encoder *nv_encoder = NULL;
++	int type;
++
++	switch (entry->type) {
++	case OUTPUT_TMDS:
++		type = DRM_MODE_ENCODER_TMDS;
++		helper = &nv04_tmds_helper_funcs;
++		break;
++	case OUTPUT_LVDS:
++		type = DRM_MODE_ENCODER_LVDS;
++		helper = &nv04_lvds_helper_funcs;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
++	if (!nv_encoder)
++		return -ENOMEM;
++
++	encoder = to_drm_encoder(nv_encoder);
++
++	nv_encoder->dcb = entry;
++	nv_encoder->or = ffs(entry->or) - 1;
++
++	drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type);
++	drm_encoder_helper_add(encoder, helper);
++
++	encoder->possible_crtcs = entry->heads;
++	encoder->possible_clones = 0;
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
+new file mode 100644
+index 0000000..ef77215
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_display.c
+@@ -0,0 +1,287 @@
++/*
++ * Copyright 2009 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Ben Skeggs
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "drm_crtc_helper.h"
++
++#include "nouveau_drv.h"
++#include "nouveau_fb.h"
++#include "nouveau_hw.h"
++#include "nouveau_encoder.h"
++#include "nouveau_connector.h"
++
++#define MULTIPLE_ENCODERS(e) (e & (e - 1))
++
++static void
++nv04_display_store_initial_head_owner(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->chipset != 0x11) {
++		dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
++		goto ownerknown;
++	}
++
++	/* reading CR44 is broken on nv11, so we attempt to infer it */
++	if (nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28))	/* heads tied, restore both */
++		dev_priv->crtc_owner = 0x4;
++	else {
++		uint8_t slaved_on_A, slaved_on_B;
++		bool tvA = false;
++		bool tvB = false;
++
++		NVLockVgaCrtcs(dev, false);
++
++		slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
++									0x80;
++		if (slaved_on_B)
++			tvB = !(NVReadVgaCrtc(dev, 1, NV_CIO_CRE_LCD__INDEX) &
++					MASK(NV_CIO_CRE_LCD_LCD_SELECT));
++
++		slaved_on_A = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX) &
++									0x80;
++		if (slaved_on_A)
++			tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
++					MASK(NV_CIO_CRE_LCD_LCD_SELECT));
++
++		NVLockVgaCrtcs(dev, true);
++
++		if (slaved_on_A && !tvA)
++			dev_priv->crtc_owner = 0x0;
++		else if (slaved_on_B && !tvB)
++			dev_priv->crtc_owner = 0x3;
++		else if (slaved_on_A)
++			dev_priv->crtc_owner = 0x0;
++		else if (slaved_on_B)
++			dev_priv->crtc_owner = 0x3;
++		else
++			dev_priv->crtc_owner = 0x0;
++	}
++
++ownerknown:
++	NV_INFO(dev, "Initial CRTC_OWNER is %d\n", dev_priv->crtc_owner);
++
++	/* we need to ensure the heads are not tied henceforth, or reading any
++	 * 8 bit reg on head B will fail
++	 * setting a single arbitrary head solves that */
++	NVSetOwner(dev, 0);
++}
++
++int
++nv04_display_create(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct parsed_dcb *dcb = dev_priv->vbios->dcb;
++	struct drm_encoder *encoder;
++	struct drm_crtc *crtc;
++	uint16_t connector[16] = { 0 };
++	int i, ret;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	if (nv_two_heads(dev))
++		nv04_display_store_initial_head_owner(dev);
++	nouveau_hw_save_vga_fonts(dev, 1);
++
++	drm_mode_config_init(dev);
++	drm_mode_create_scaling_mode_property(dev);
++	drm_mode_create_dithering_property(dev);
++
++	dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
++
++	dev->mode_config.min_width = 0;
++	dev->mode_config.min_height = 0;
++	switch (dev_priv->card_type) {
++	case NV_04:
++		dev->mode_config.max_width = 2048;
++		dev->mode_config.max_height = 2048;
++		break;
++	default:
++		dev->mode_config.max_width = 4096;
++		dev->mode_config.max_height = 4096;
++		break;
++	}
++
++	dev->mode_config.fb_base = dev_priv->fb_phys;
++
++	nv04_crtc_create(dev, 0);
++	if (nv_two_heads(dev))
++		nv04_crtc_create(dev, 1);
++
++	for (i = 0; i < dcb->entries; i++) {
++		struct dcb_entry *dcbent = &dcb->entry[i];
++
++		switch (dcbent->type) {
++		case OUTPUT_ANALOG:
++			ret = nv04_dac_create(dev, dcbent);
++			break;
++		case OUTPUT_LVDS:
++		case OUTPUT_TMDS:
++			ret = nv04_dfp_create(dev, dcbent);
++			break;
++		case OUTPUT_TV:
++			if (dcbent->location == DCB_LOC_ON_CHIP)
++				ret = nv17_tv_create(dev, dcbent);
++			else
++				ret = nv04_tv_create(dev, dcbent);
++			break;
++		default:
++			NV_WARN(dev, "DCB type %d not known\n", dcbent->type);
++			continue;
++		}
++
++		if (ret)
++			continue;
++
++		connector[dcbent->connector] |= (1 << dcbent->type);
++	}
++
++	for (i = 0; i < dcb->entries; i++) {
++		struct dcb_entry *dcbent = &dcb->entry[i];
++		uint16_t encoders;
++		int type;
++
++		encoders = connector[dcbent->connector];
++		if (!(encoders & (1 << dcbent->type)))
++			continue;
++		connector[dcbent->connector] = 0;
++
++		switch (dcbent->type) {
++		case OUTPUT_ANALOG:
++			if (!MULTIPLE_ENCODERS(encoders))
++				type = DRM_MODE_CONNECTOR_VGA;
++			else
++				type = DRM_MODE_CONNECTOR_DVII;
++			break;
++		case OUTPUT_TMDS:
++			if (!MULTIPLE_ENCODERS(encoders))
++				type = DRM_MODE_CONNECTOR_DVID;
++			else
++				type = DRM_MODE_CONNECTOR_DVII;
++			break;
++		case OUTPUT_LVDS:
++			type = DRM_MODE_CONNECTOR_LVDS;
++#if 0
++			/* don't create i2c adapter when lvds ddc not allowed */
++			if (dcbent->lvdsconf.use_straps_for_mode ||
++			    dev_priv->vbios->fp_no_ddc)
++				i2c_index = 0xf;
++#endif
++			break;
++		case OUTPUT_TV:
++			type = DRM_MODE_CONNECTOR_TV;
++			break;
++		default:
++			type = DRM_MODE_CONNECTOR_Unknown;
++			continue;
++		}
++
++		nouveau_connector_create(dev, dcbent->connector, type);
++	}
++
++	/* Save previous state */
++	NVLockVgaCrtcs(dev, false);
++
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
++		crtc->funcs->save(crtc);
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		struct drm_encoder_helper_funcs *func = encoder->helper_private;
++
++		func->save(encoder);
++	}
++
++	return 0;
++}
++
++void
++nv04_display_destroy(struct drm_device *dev)
++{
++	struct drm_encoder *encoder;
++	struct drm_crtc *crtc;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	/* Turn every CRTC off. */
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		struct drm_mode_set modeset = {
++			.crtc = crtc,
++		};
++
++		crtc->funcs->set_config(&modeset);
++	}
++
++	/* Restore state */
++	NVLockVgaCrtcs(dev, false);
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		struct drm_encoder_helper_funcs *func = encoder->helper_private;
++
++		func->restore(encoder);
++	}
++
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
++		crtc->funcs->restore(crtc);
++
++	drm_mode_config_cleanup(dev);
++
++	nouveau_hw_save_vga_fonts(dev, 0);
++}
++
++void
++nv04_display_restore(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_encoder *encoder;
++	struct drm_crtc *crtc;
++
++	NVLockVgaCrtcs(dev, false);
++
++	/* meh.. modeset apparently doesn't setup all the regs and depends
++	 * on pre-existing state, for now load the state of the card *before*
++	 * nouveau was loaded, and then do a modeset.
++	 *
++	 * best thing to do probably is to make save/restore routines not
++	 * save/restore "pre-load" state, but more general so we can save
++	 * on suspend too.
++	 */
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		struct drm_encoder_helper_funcs *func = encoder->helper_private;
++
++		func->restore(encoder);
++	}
++
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
++		crtc->funcs->restore(crtc);
++
++	if (nv_two_heads(dev)) {
++		NV_INFO(dev, "Restoring CRTC_OWNER to %d.\n",
++			dev_priv->crtc_owner);
++		NVSetOwner(dev, dev_priv->crtc_owner);
++	}
++
++	NVLockVgaCrtcs(dev, true);
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
+new file mode 100644
+index 0000000..638cf60
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_fb.c
+@@ -0,0 +1,21 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv04_fb_init(struct drm_device *dev)
++{
++	/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
++	 * nvidia reading PFB_CFG_0, then writing back its original value.
++	 * (which was 0x701114 in this case)
++	 */
++
++	nv_wr32(dev, NV04_PFB_CFG0, 0x1114);
++	return 0;
++}
++
++void
++nv04_fb_takedown(struct drm_device *dev)
++{
++}
+diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
+new file mode 100644
+index 0000000..fd01caa
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
+@@ -0,0 +1,312 @@
++/*
++ * Copyright 2009 Ben Skeggs
++ * Copyright 2008 Stuart Bennett
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++#include "nouveau_fbcon.h"
++
++void
++nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
++{
++	struct nouveau_fbcon_par *par = info->par;
++	struct drm_device *dev = par->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = dev_priv->channel;
++
++	if (info->state != FBINFO_STATE_RUNNING)
++		return;
++
++	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
++		nouveau_fbcon_gpu_lockup(info);
++	}
++
++	if (info->flags & FBINFO_HWACCEL_DISABLED) {
++		cfb_copyarea(info, region);
++		return;
++	}
++
++	BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
++	OUT_RING(chan, (region->sy << 16) | region->sx);
++	OUT_RING(chan, (region->dy << 16) | region->dx);
++	OUT_RING(chan, (region->height << 16) | region->width);
++	FIRE_RING(chan);
++}
++
++void
++nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
++{
++	struct nouveau_fbcon_par *par = info->par;
++	struct drm_device *dev = par->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = dev_priv->channel;
++
++	if (info->state != FBINFO_STATE_RUNNING)
++		return;
++
++	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
++		nouveau_fbcon_gpu_lockup(info);
++	}
++
++	if (info->flags & FBINFO_HWACCEL_DISABLED) {
++		cfb_fillrect(info, rect);
++		return;
++	}
++
++	BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
++	OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
++	BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
++	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
++	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
++		OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
++	else
++		OUT_RING(chan, rect->color);
++	BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
++	OUT_RING(chan, (rect->dx << 16) | rect->dy);
++	OUT_RING(chan, (rect->width << 16) | rect->height);
++	FIRE_RING(chan);
++}
++
++void
++nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
++{
++	struct nouveau_fbcon_par *par = info->par;
++	struct drm_device *dev = par->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = dev_priv->channel;
++	uint32_t fg;
++	uint32_t bg;
++	uint32_t dsize;
++	uint32_t width;
++	uint32_t *data = (uint32_t *)image->data;
++
++	if (info->state != FBINFO_STATE_RUNNING)
++		return;
++
++	if (image->depth != 1) {
++		cfb_imageblit(info, image);
++		return;
++	}
++
++	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
++		nouveau_fbcon_gpu_lockup(info);
++	}
++
++	if (info->flags & FBINFO_HWACCEL_DISABLED) {
++		cfb_imageblit(info, image);
++		return;
++	}
++
++	width = (image->width + 31) & ~31;
++	dsize = (width * image->height) >> 5;
++
++	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
++	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++		fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
++		bg = ((uint32_t *) info->pseudo_palette)[image->bg_color];
++	} else {
++		fg = image->fg_color;
++		bg = image->bg_color;
++	}
++
++	BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7);
++	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
++	OUT_RING(chan, ((image->dy + image->height) << 16) |
++			 ((image->dx + image->width) & 0xffff));
++	OUT_RING(chan, bg);
++	OUT_RING(chan, fg);
++	OUT_RING(chan, (image->height << 16) | image->width);
++	OUT_RING(chan, (image->height << 16) | width);
++	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
++
++	while (dsize) {
++		int iter_len = dsize > 128 ? 128 : dsize;
++
++		if (RING_SPACE(chan, iter_len + 1)) {
++			nouveau_fbcon_gpu_lockup(info);
++			cfb_imageblit(info, image);
++			return;
++		}
++
++		BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
++		OUT_RINGp(chan, data, iter_len);
++		data += iter_len;
++		dsize -= iter_len;
++	}
++
++	FIRE_RING(chan);
++}
++
++static int
++nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *obj = NULL;
++	int ret;
++
++	ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
++	if (ret)
++		return ret;
++
++	ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
++int
++nv04_fbcon_accel_init(struct fb_info *info)
++{
++	struct nouveau_fbcon_par *par = info->par;
++	struct drm_device *dev = par->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = dev_priv->channel;
++	const int sub = NvSubCtxSurf2D;
++	int surface_fmt, pattern_fmt, rect_fmt;
++	int ret;
++
++	switch (info->var.bits_per_pixel) {
++	case 8:
++		surface_fmt = 1;
++		pattern_fmt = 3;
++		rect_fmt = 3;
++		break;
++	case 16:
++		surface_fmt = 4;
++		pattern_fmt = 1;
++		rect_fmt = 1;
++		break;
++	case 32:
++		switch (info->var.transp.length) {
++		case 0: /* depth 24 */
++		case 8: /* depth 32 */
++			break;
++		default:
++			return -EINVAL;
++		}
++
++		surface_fmt = 6;
++		pattern_fmt = 3;
++		rect_fmt = 3;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
++				   0x0062 : 0x0042, NvCtxSurf2D);
++	if (ret)
++		return ret;
++
++	ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
++	if (ret)
++		return ret;
++
++	ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
++	if (ret)
++		return ret;
++
++	ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
++	if (ret)
++		return ret;
++
++	ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
++	if (ret)
++		return ret;
++
++	ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
++				   0x009f : 0x005f, NvImageBlit);
++	if (ret)
++		return ret;
++
++	if (RING_SPACE(chan, 49)) {
++		nouveau_fbcon_gpu_lockup(info);
++		return 0;
++	}
++
++	BEGIN_RING(chan, sub, 0x0000, 1);
++	OUT_RING(chan, NvCtxSurf2D);
++	BEGIN_RING(chan, sub, 0x0184, 2);
++	OUT_RING(chan, NvDmaFB);
++	OUT_RING(chan, NvDmaFB);
++	BEGIN_RING(chan, sub, 0x0300, 4);
++	OUT_RING(chan, surface_fmt);
++	OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
++	OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
++	OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
++
++	BEGIN_RING(chan, sub, 0x0000, 1);
++	OUT_RING(chan, NvRop);
++	BEGIN_RING(chan, sub, 0x0300, 1);
++	OUT_RING(chan, 0x55);
++
++	BEGIN_RING(chan, sub, 0x0000, 1);
++	OUT_RING(chan, NvImagePatt);
++	BEGIN_RING(chan, sub, 0x0300, 8);
++	OUT_RING(chan, pattern_fmt);
++#ifdef __BIG_ENDIAN
++	OUT_RING(chan, 2);
++#else
++	OUT_RING(chan, 1);
++#endif
++	OUT_RING(chan, 0);
++	OUT_RING(chan, 1);
++	OUT_RING(chan, ~0);
++	OUT_RING(chan, ~0);
++	OUT_RING(chan, ~0);
++	OUT_RING(chan, ~0);
++
++	BEGIN_RING(chan, sub, 0x0000, 1);
++	OUT_RING(chan, NvClipRect);
++	BEGIN_RING(chan, sub, 0x0300, 2);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
++
++	BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1);
++	OUT_RING(chan, NvImageBlit);
++	BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1);
++	OUT_RING(chan, NvCtxSurf2D);
++	BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1);
++	OUT_RING(chan, 3);
++
++	BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1);
++	OUT_RING(chan, NvGdiRect);
++	BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1);
++	OUT_RING(chan, NvCtxSurf2D);
++	BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2);
++	OUT_RING(chan, NvImagePatt);
++	OUT_RING(chan, NvRop);
++	BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1);
++	OUT_RING(chan, 1);
++	BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1);
++	OUT_RING(chan, rect_fmt);
++	BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
++	OUT_RING(chan, 3);
++
++	FIRE_RING(chan);
++
++	return 0;
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
+new file mode 100644
+index 0000000..f31347b
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
+@@ -0,0 +1,305 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
++#define NV04_RAMFC__SIZE 32
++#define NV04_RAMFC_DMA_PUT                                       0x00
++#define NV04_RAMFC_DMA_GET                                       0x04
++#define NV04_RAMFC_DMA_INSTANCE                                  0x08
++#define NV04_RAMFC_DMA_STATE                                     0x0C
++#define NV04_RAMFC_DMA_FETCH                                     0x10
++#define NV04_RAMFC_ENGINE                                        0x14
++#define NV04_RAMFC_PULL1_ENGINE                                  0x18
++
++#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \
++					 NV04_RAMFC_##offset/4, (val))
++#define RAMFC_RD(offset)      nv_ro32(dev, chan->ramfc->gpuobj, \
++					 NV04_RAMFC_##offset/4)
++
++void
++nv04_fifo_disable(struct drm_device *dev)
++{
++	uint32_t tmp;
++
++	tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH);
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1);
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
++	tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1);
++	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1);
++}
++
++void
++nv04_fifo_enable(struct drm_device *dev)
++{
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
++	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
++}
++
++bool
++nv04_fifo_reassign(struct drm_device *dev, bool enable)
++{
++	uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
++
++	nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
++	return (reassign == 1);
++}
++
++bool
++nv04_fifo_cache_flush(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
++	uint64_t start = ptimer->read(dev);
++
++	do {
++		if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
++		    nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
++			return true;
++
++	} while (ptimer->read(dev) - start < 100000000);
++
++	NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
++
++	return false;
++}
++
++bool
++nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
++{
++	uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0);
++
++	if (enable) {
++		nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
++	} else {
++		nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
++		nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
++	}
++
++	return !!(pull & 1);
++}
++
++int
++nv04_fifo_channel_id(struct drm_device *dev)
++{
++	return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
++			NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
++}
++
++int
++nv04_fifo_create_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int ret;
++
++	ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
++						NV04_RAMFC__SIZE,
++						NVOBJ_FLAG_ZERO_ALLOC |
++						NVOBJ_FLAG_ZERO_FREE,
++						NULL, &chan->ramfc);
++	if (ret)
++		return ret;
++
++	/* Setup initial state */
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	RAMFC_WR(DMA_PUT, chan->pushbuf_base);
++	RAMFC_WR(DMA_GET, chan->pushbuf_base);
++	RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
++	RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
++			     NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
++			     NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
++#ifdef __BIG_ENDIAN
++			     NV_PFIFO_CACHE1_BIG_ENDIAN |
++#endif
++			     0));
++	dev_priv->engine.instmem.finish_access(dev);
++
++	/* enable the fifo dma operation */
++	nv_wr32(dev, NV04_PFIFO_MODE,
++		nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
++	return 0;
++}
++
++void
++nv04_fifo_destroy_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++
++	nv_wr32(dev, NV04_PFIFO_MODE,
++		nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
++
++	nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++}
++
++static void
++nv04_fifo_do_load_context(struct drm_device *dev, int chid)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t fc = NV04_RAMFC(chid), tmp;
++
++	dev_priv->engine.instmem.prepare_access(dev, false);
++
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
++	tmp = nv_ri32(dev, fc + 8);
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
++
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
++}
++
++int
++nv04_fifo_load_context(struct nouveau_channel *chan)
++{
++	uint32_t tmp;
++
++	nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
++			   NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
++	nv04_fifo_do_load_context(chan->dev, chan->id);
++	nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
++
++	/* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
++	tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
++	nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
++
++	return 0;
++}
++
++int
++nv04_fifo_unload_context(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	struct nouveau_channel *chan = NULL;
++	uint32_t tmp;
++	int chid;
++
++	chid = pfifo->channel_id(dev);
++	if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
++		return 0;
++
++	chan = dev_priv->fifos[chid];
++	if (!chan) {
++		NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
++		return -EINVAL;
++	}
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
++	RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
++	tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
++	tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
++	RAMFC_WR(DMA_INSTANCE, tmp);
++	RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
++	RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
++	RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
++	RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nv04_fifo_do_load_context(dev, pfifo->channels - 1);
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
++	return 0;
++}
++
++static void
++nv04_fifo_init_reset(struct drm_device *dev)
++{
++	nv_wr32(dev, NV03_PMC_ENABLE,
++		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
++	nv_wr32(dev, NV03_PMC_ENABLE,
++		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PFIFO);
++
++	nv_wr32(dev, 0x003224, 0x000f0078);
++	nv_wr32(dev, 0x002044, 0x0101ffff);
++	nv_wr32(dev, 0x002040, 0x000000ff);
++	nv_wr32(dev, 0x002500, 0x00000000);
++	nv_wr32(dev, 0x003000, 0x00000000);
++	nv_wr32(dev, 0x003050, 0x00000000);
++	nv_wr32(dev, 0x003200, 0x00000000);
++	nv_wr32(dev, 0x003250, 0x00000000);
++	nv_wr32(dev, 0x003220, 0x00000000);
++
++	nv_wr32(dev, 0x003250, 0x00000000);
++	nv_wr32(dev, 0x003270, 0x00000000);
++	nv_wr32(dev, 0x003210, 0x00000000);
++}
++
++static void
++nv04_fifo_init_ramxx(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
++				       ((dev_priv->ramht_bits - 9) << 16) |
++				       (dev_priv->ramht_offset >> 8));
++	nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
++	nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
++}
++
++static void
++nv04_fifo_init_intr(struct drm_device *dev)
++{
++	nv_wr32(dev, 0x002100, 0xffffffff);
++	nv_wr32(dev, 0x002140, 0xffffffff);
++}
++
++int
++nv04_fifo_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	int i;
++
++	nv04_fifo_init_reset(dev);
++	nv04_fifo_init_ramxx(dev);
++
++	nv04_fifo_do_load_context(dev, pfifo->channels - 1);
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
++
++	nv04_fifo_init_intr(dev);
++	pfifo->enable(dev);
++
++	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
++		if (dev_priv->fifos[i]) {
++			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
++			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
++		}
++	}
++
++	return 0;
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
+new file mode 100644
+index 0000000..e260986
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_graph.c
+@@ -0,0 +1,584 @@
++/*
++ * Copyright 2007 Stephane Marchesin
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++
++static uint32_t nv04_graph_ctx_regs[] = {
++	0x0040053c,
++	0x00400544,
++	0x00400540,
++	0x00400548,
++	NV04_PGRAPH_CTX_SWITCH1,
++	NV04_PGRAPH_CTX_SWITCH2,
++	NV04_PGRAPH_CTX_SWITCH3,
++	NV04_PGRAPH_CTX_SWITCH4,
++	NV04_PGRAPH_CTX_CACHE1,
++	NV04_PGRAPH_CTX_CACHE2,
++	NV04_PGRAPH_CTX_CACHE3,
++	NV04_PGRAPH_CTX_CACHE4,
++	0x00400184,
++	0x004001a4,
++	0x004001c4,
++	0x004001e4,
++	0x00400188,
++	0x004001a8,
++	0x004001c8,
++	0x004001e8,
++	0x0040018c,
++	0x004001ac,
++	0x004001cc,
++	0x004001ec,
++	0x00400190,
++	0x004001b0,
++	0x004001d0,
++	0x004001f0,
++	0x00400194,
++	0x004001b4,
++	0x004001d4,
++	0x004001f4,
++	0x00400198,
++	0x004001b8,
++	0x004001d8,
++	0x004001f8,
++	0x0040019c,
++	0x004001bc,
++	0x004001dc,
++	0x004001fc,
++	0x00400174,
++	NV04_PGRAPH_DMA_START_0,
++	NV04_PGRAPH_DMA_START_1,
++	NV04_PGRAPH_DMA_LENGTH,
++	NV04_PGRAPH_DMA_MISC,
++	NV04_PGRAPH_DMA_PITCH,
++	NV04_PGRAPH_BOFFSET0,
++	NV04_PGRAPH_BBASE0,
++	NV04_PGRAPH_BLIMIT0,
++	NV04_PGRAPH_BOFFSET1,
++	NV04_PGRAPH_BBASE1,
++	NV04_PGRAPH_BLIMIT1,
++	NV04_PGRAPH_BOFFSET2,
++	NV04_PGRAPH_BBASE2,
++	NV04_PGRAPH_BLIMIT2,
++	NV04_PGRAPH_BOFFSET3,
++	NV04_PGRAPH_BBASE3,
++	NV04_PGRAPH_BLIMIT3,
++	NV04_PGRAPH_BOFFSET4,
++	NV04_PGRAPH_BBASE4,
++	NV04_PGRAPH_BLIMIT4,
++	NV04_PGRAPH_BOFFSET5,
++	NV04_PGRAPH_BBASE5,
++	NV04_PGRAPH_BLIMIT5,
++	NV04_PGRAPH_BPITCH0,
++	NV04_PGRAPH_BPITCH1,
++	NV04_PGRAPH_BPITCH2,
++	NV04_PGRAPH_BPITCH3,
++	NV04_PGRAPH_BPITCH4,
++	NV04_PGRAPH_SURFACE,
++	NV04_PGRAPH_STATE,
++	NV04_PGRAPH_BSWIZZLE2,
++	NV04_PGRAPH_BSWIZZLE5,
++	NV04_PGRAPH_BPIXEL,
++	NV04_PGRAPH_NOTIFY,
++	NV04_PGRAPH_PATT_COLOR0,
++	NV04_PGRAPH_PATT_COLOR1,
++	NV04_PGRAPH_PATT_COLORRAM+0x00,
++	NV04_PGRAPH_PATT_COLORRAM+0x04,
++	NV04_PGRAPH_PATT_COLORRAM+0x08,
++	NV04_PGRAPH_PATT_COLORRAM+0x0c,
++	NV04_PGRAPH_PATT_COLORRAM+0x10,
++	NV04_PGRAPH_PATT_COLORRAM+0x14,
++	NV04_PGRAPH_PATT_COLORRAM+0x18,
++	NV04_PGRAPH_PATT_COLORRAM+0x1c,
++	NV04_PGRAPH_PATT_COLORRAM+0x20,
++	NV04_PGRAPH_PATT_COLORRAM+0x24,
++	NV04_PGRAPH_PATT_COLORRAM+0x28,
++	NV04_PGRAPH_PATT_COLORRAM+0x2c,
++	NV04_PGRAPH_PATT_COLORRAM+0x30,
++	NV04_PGRAPH_PATT_COLORRAM+0x34,
++	NV04_PGRAPH_PATT_COLORRAM+0x38,
++	NV04_PGRAPH_PATT_COLORRAM+0x3c,
++	NV04_PGRAPH_PATT_COLORRAM+0x40,
++	NV04_PGRAPH_PATT_COLORRAM+0x44,
++	NV04_PGRAPH_PATT_COLORRAM+0x48,
++	NV04_PGRAPH_PATT_COLORRAM+0x4c,
++	NV04_PGRAPH_PATT_COLORRAM+0x50,
++	NV04_PGRAPH_PATT_COLORRAM+0x54,
++	NV04_PGRAPH_PATT_COLORRAM+0x58,
++	NV04_PGRAPH_PATT_COLORRAM+0x5c,
++	NV04_PGRAPH_PATT_COLORRAM+0x60,
++	NV04_PGRAPH_PATT_COLORRAM+0x64,
++	NV04_PGRAPH_PATT_COLORRAM+0x68,
++	NV04_PGRAPH_PATT_COLORRAM+0x6c,
++	NV04_PGRAPH_PATT_COLORRAM+0x70,
++	NV04_PGRAPH_PATT_COLORRAM+0x74,
++	NV04_PGRAPH_PATT_COLORRAM+0x78,
++	NV04_PGRAPH_PATT_COLORRAM+0x7c,
++	NV04_PGRAPH_PATT_COLORRAM+0x80,
++	NV04_PGRAPH_PATT_COLORRAM+0x84,
++	NV04_PGRAPH_PATT_COLORRAM+0x88,
++	NV04_PGRAPH_PATT_COLORRAM+0x8c,
++	NV04_PGRAPH_PATT_COLORRAM+0x90,
++	NV04_PGRAPH_PATT_COLORRAM+0x94,
++	NV04_PGRAPH_PATT_COLORRAM+0x98,
++	NV04_PGRAPH_PATT_COLORRAM+0x9c,
++	NV04_PGRAPH_PATT_COLORRAM+0xa0,
++	NV04_PGRAPH_PATT_COLORRAM+0xa4,
++	NV04_PGRAPH_PATT_COLORRAM+0xa8,
++	NV04_PGRAPH_PATT_COLORRAM+0xac,
++	NV04_PGRAPH_PATT_COLORRAM+0xb0,
++	NV04_PGRAPH_PATT_COLORRAM+0xb4,
++	NV04_PGRAPH_PATT_COLORRAM+0xb8,
++	NV04_PGRAPH_PATT_COLORRAM+0xbc,
++	NV04_PGRAPH_PATT_COLORRAM+0xc0,
++	NV04_PGRAPH_PATT_COLORRAM+0xc4,
++	NV04_PGRAPH_PATT_COLORRAM+0xc8,
++	NV04_PGRAPH_PATT_COLORRAM+0xcc,
++	NV04_PGRAPH_PATT_COLORRAM+0xd0,
++	NV04_PGRAPH_PATT_COLORRAM+0xd4,
++	NV04_PGRAPH_PATT_COLORRAM+0xd8,
++	NV04_PGRAPH_PATT_COLORRAM+0xdc,
++	NV04_PGRAPH_PATT_COLORRAM+0xe0,
++	NV04_PGRAPH_PATT_COLORRAM+0xe4,
++	NV04_PGRAPH_PATT_COLORRAM+0xe8,
++	NV04_PGRAPH_PATT_COLORRAM+0xec,
++	NV04_PGRAPH_PATT_COLORRAM+0xf0,
++	NV04_PGRAPH_PATT_COLORRAM+0xf4,
++	NV04_PGRAPH_PATT_COLORRAM+0xf8,
++	NV04_PGRAPH_PATT_COLORRAM+0xfc,
++	NV04_PGRAPH_PATTERN,
++	0x0040080c,
++	NV04_PGRAPH_PATTERN_SHAPE,
++	0x00400600,
++	NV04_PGRAPH_ROP3,
++	NV04_PGRAPH_CHROMA,
++	NV04_PGRAPH_BETA_AND,
++	NV04_PGRAPH_BETA_PREMULT,
++	NV04_PGRAPH_CONTROL0,
++	NV04_PGRAPH_CONTROL1,
++	NV04_PGRAPH_CONTROL2,
++	NV04_PGRAPH_BLEND,
++	NV04_PGRAPH_STORED_FMT,
++	NV04_PGRAPH_SOURCE_COLOR,
++	0x00400560,
++	0x00400568,
++	0x00400564,
++	0x0040056c,
++	0x00400400,
++	0x00400480,
++	0x00400404,
++	0x00400484,
++	0x00400408,
++	0x00400488,
++	0x0040040c,
++	0x0040048c,
++	0x00400410,
++	0x00400490,
++	0x00400414,
++	0x00400494,
++	0x00400418,
++	0x00400498,
++	0x0040041c,
++	0x0040049c,
++	0x00400420,
++	0x004004a0,
++	0x00400424,
++	0x004004a4,
++	0x00400428,
++	0x004004a8,
++	0x0040042c,
++	0x004004ac,
++	0x00400430,
++	0x004004b0,
++	0x00400434,
++	0x004004b4,
++	0x00400438,
++	0x004004b8,
++	0x0040043c,
++	0x004004bc,
++	0x00400440,
++	0x004004c0,
++	0x00400444,
++	0x004004c4,
++	0x00400448,
++	0x004004c8,
++	0x0040044c,
++	0x004004cc,
++	0x00400450,
++	0x004004d0,
++	0x00400454,
++	0x004004d4,
++	0x00400458,
++	0x004004d8,
++	0x0040045c,
++	0x004004dc,
++	0x00400460,
++	0x004004e0,
++	0x00400464,
++	0x004004e4,
++	0x00400468,
++	0x004004e8,
++	0x0040046c,
++	0x004004ec,
++	0x00400470,
++	0x004004f0,
++	0x00400474,
++	0x004004f4,
++	0x00400478,
++	0x004004f8,
++	0x0040047c,
++	0x004004fc,
++	0x00400534,
++	0x00400538,
++	0x00400514,
++	0x00400518,
++	0x0040051c,
++	0x00400520,
++	0x00400524,
++	0x00400528,
++	0x0040052c,
++	0x00400530,
++	0x00400d00,
++	0x00400d40,
++	0x00400d80,
++	0x00400d04,
++	0x00400d44,
++	0x00400d84,
++	0x00400d08,
++	0x00400d48,
++	0x00400d88,
++	0x00400d0c,
++	0x00400d4c,
++	0x00400d8c,
++	0x00400d10,
++	0x00400d50,
++	0x00400d90,
++	0x00400d14,
++	0x00400d54,
++	0x00400d94,
++	0x00400d18,
++	0x00400d58,
++	0x00400d98,
++	0x00400d1c,
++	0x00400d5c,
++	0x00400d9c,
++	0x00400d20,
++	0x00400d60,
++	0x00400da0,
++	0x00400d24,
++	0x00400d64,
++	0x00400da4,
++	0x00400d28,
++	0x00400d68,
++	0x00400da8,
++	0x00400d2c,
++	0x00400d6c,
++	0x00400dac,
++	0x00400d30,
++	0x00400d70,
++	0x00400db0,
++	0x00400d34,
++	0x00400d74,
++	0x00400db4,
++	0x00400d38,
++	0x00400d78,
++	0x00400db8,
++	0x00400d3c,
++	0x00400d7c,
++	0x00400dbc,
++	0x00400590,
++	0x00400594,
++	0x00400598,
++	0x0040059c,
++	0x004005a8,
++	0x004005ac,
++	0x004005b0,
++	0x004005b4,
++	0x004005c0,
++	0x004005c4,
++	0x004005c8,
++	0x004005cc,
++	0x004005d0,
++	0x004005d4,
++	0x004005d8,
++	0x004005dc,
++	0x004005e0,
++	NV04_PGRAPH_PASSTHRU_0,
++	NV04_PGRAPH_PASSTHRU_1,
++	NV04_PGRAPH_PASSTHRU_2,
++	NV04_PGRAPH_DVD_COLORFMT,
++	NV04_PGRAPH_SCALED_FORMAT,
++	NV04_PGRAPH_MISC24_0,
++	NV04_PGRAPH_MISC24_1,
++	NV04_PGRAPH_MISC24_2,
++	0x00400500,
++	0x00400504,
++	NV04_PGRAPH_VALID1,
++	NV04_PGRAPH_VALID2,
++	NV04_PGRAPH_DEBUG_3
++};
++
++struct graph_state {
++	int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
++};
++
++struct nouveau_channel *
++nv04_graph_channel(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int chid = dev_priv->engine.fifo.channels;
++
++	if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
++		chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
++
++	if (chid >= dev_priv->engine.fifo.channels)
++		return NULL;
++
++	return dev_priv->fifos[chid];
++}
++
++void
++nv04_graph_context_switch(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	struct nouveau_channel *chan = NULL;
++	int chid;
++
++	pgraph->fifo_access(dev, false);
++	nouveau_wait_for_idle(dev);
++
++	/* If previous context is valid, we need to save it */
++	pgraph->unload_context(dev);
++
++	/* Load context for next channel */
++	chid = dev_priv->engine.fifo.channel_id(dev);
++	chan = dev_priv->fifos[chid];
++	if (chan)
++		nv04_graph_load_context(chan);
++
++	pgraph->fifo_access(dev, true);
++}
++
++static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
++{
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
++		if (nv04_graph_ctx_regs[i] == reg)
++			return &ctx->nv04[i];
++	}
++
++	return NULL;
++}
++
++int nv04_graph_create_context(struct nouveau_channel *chan)
++{
++	struct graph_state *pgraph_ctx;
++	NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
++
++	chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
++						GFP_KERNEL);
++	if (pgraph_ctx == NULL)
++		return -ENOMEM;
++
++	*ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
++
++	return 0;
++}
++
++void nv04_graph_destroy_context(struct nouveau_channel *chan)
++{
++	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
++
++	kfree(pgraph_ctx);
++	chan->pgraph_ctx = NULL;
++}
++
++int nv04_graph_load_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
++	uint32_t tmp;
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
++		nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
++
++	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
++
++	tmp  = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
++	nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
++
++	tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
++	nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
++
++	return 0;
++}
++
++int
++nv04_graph_unload_context(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	struct nouveau_channel *chan = NULL;
++	struct graph_state *ctx;
++	uint32_t tmp;
++	int i;
++
++	chan = pgraph->channel(dev);
++	if (!chan)
++		return 0;
++	ctx = chan->pgraph_ctx;
++
++	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
++		ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
++
++	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
++	tmp  = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
++	tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
++	nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
++	return 0;
++}
++
++int nv04_graph_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t tmp;
++
++	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
++			~NV_PMC_ENABLE_PGRAPH);
++	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
++			 NV_PMC_ENABLE_PGRAPH);
++
++	/* Enable PGRAPH interrupts */
++	nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
++	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++	nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
++	nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
++	/*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
++	/*1231C000 blob, 001 haiku*/
++	//*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
++	/*0x72111100 blob , 01 haiku*/
++	/*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
++	/*haiku same*/
++
++	/*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
++	/*haiku and blob 10d4*/
++
++	nv_wr32(dev, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
++	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
++	tmp  = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
++	tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
++	nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
++
++	/* These don't belong here, they're part of a per-channel context */
++	nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
++	nv_wr32(dev, NV04_PGRAPH_BETA_AND     , 0xFFFFFFFF);
++
++	return 0;
++}
++
++void nv04_graph_takedown(struct drm_device *dev)
++{
++}
++
++void
++nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
++{
++	if (enabled)
++		nv_wr32(dev, NV04_PGRAPH_FIFO,
++					nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
++	else
++		nv_wr32(dev, NV04_PGRAPH_FIFO,
++					nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
++}
++
++static int
++nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
++			int mthd, uint32_t data)
++{
++	chan->fence.last_sequence_irq = data;
++	nouveau_fence_handler(chan->dev, chan->id);
++	return 0;
++}
++
++static int
++nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
++			      int mthd, uint32_t data)
++{
++	struct drm_device *dev = chan->dev;
++	uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
++	int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
++	uint32_t tmp;
++
++	tmp  = nv_ri32(dev, instance);
++	tmp &= ~0x00038000;
++	tmp |= ((data & 7) << 15);
++
++	nv_wi32(dev, instance, tmp);
++	nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
++	nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
++	return 0;
++}
++
++static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
++	{ 0x0150, nv04_graph_mthd_set_ref },
++	{}
++};
++
++static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
++	{ 0x02fc, nv04_graph_mthd_set_operation },
++	{},
++};
++
++struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
++	{ 0x0039, false, NULL },
++	{ 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
++	{ 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
++	{ 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
++	{ 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
++	{ 0x0030, false, NULL }, /* null */
++	{ 0x0042, false, NULL }, /* surf2d */
++	{ 0x0043, false, NULL }, /* rop */
++	{ 0x0012, false, NULL }, /* beta1 */
++	{ 0x0072, false, NULL }, /* beta4 */
++	{ 0x0019, false, NULL }, /* cliprect */
++	{ 0x0044, false, NULL }, /* pattern */
++	{ 0x0052, false, NULL }, /* swzsurf */
++	{ 0x0053, false, NULL }, /* surf3d */
++	{ 0x0054, false, NULL }, /* tex_tri */
++	{ 0x0055, false, NULL }, /* multitex_tri */
++	{ 0x506e, true, nv04_graph_mthds_sw },
++	{}
++};
++
+diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
+new file mode 100644
+index 0000000..a3b9563
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
+@@ -0,0 +1,208 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++/* returns the size of fifo context */
++static int
++nouveau_fifo_ctx_size(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (dev_priv->chipset >= 0x40)
++		return 128;
++	else
++	if (dev_priv->chipset >= 0x17)
++		return 64;
++
++	return 32;
++}
++
++static void
++nv04_instmem_determine_amount(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int i;
++
++	/* Figure out how much instance memory we need */
++	if (dev_priv->card_type >= NV_40) {
++		/* We'll want more instance memory than this on some NV4x cards.
++		 * There's a 16MB aperture to play with that maps onto the end
++		 * of vram.  For now, only reserve a small piece until we know
++		 * more about what each chipset requires.
++		 */
++		switch (dev_priv->chipset) {
++		case 0x40:
++		case 0x47:
++		case 0x49:
++		case 0x4b:
++			dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
++			break;
++		default:
++			dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
++			break;
++		}
++	} else {
++		/*XXX: what *are* the limits on <NV40 cards?
++		 */
++		dev_priv->ramin_rsvd_vram = (512 * 1024);
++	}
++	NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
++
++	/* Clear all of it, except the BIOS image that's in the first 64KiB */
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
++		nv_wi32(dev, i, 0x00000000);
++	dev_priv->engine.instmem.finish_access(dev);
++}
++
++static void
++nv04_instmem_configure_fixed_tables(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_engine *engine = &dev_priv->engine;
++
++	/* FIFO hash table (RAMHT)
++	 *   use 4k hash table at RAMIN+0x10000
++	 *   TODO: extend the hash table
++	 */
++	dev_priv->ramht_offset = 0x10000;
++	dev_priv->ramht_bits   = 9;
++	dev_priv->ramht_size   = (1 << dev_priv->ramht_bits); /* nr entries */
++	dev_priv->ramht_size  *= 8; /* 2 32-bit values per entry in RAMHT */
++	NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
++						      dev_priv->ramht_size);
++
++	/* FIFO runout table (RAMRO) - 512k at 0x11200 */
++	dev_priv->ramro_offset = 0x11200;
++	dev_priv->ramro_size   = 512;
++	NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
++						      dev_priv->ramro_size);
++
++	/* FIFO context table (RAMFC)
++	 *   NV40  : Not sure exactly how to position RAMFC on some cards,
++	 *           0x30002 seems to position it at RAMIN+0x20000 on these
++	 *           cards.  RAMFC is 4kb (32 fifos, 128byte entries).
++	 *   Others: Position RAMFC at RAMIN+0x11400
++	 */
++	dev_priv->ramfc_size = engine->fifo.channels *
++						nouveau_fifo_ctx_size(dev);
++	switch (dev_priv->card_type) {
++	case NV_40:
++		dev_priv->ramfc_offset = 0x20000;
++		break;
++	case NV_30:
++	case NV_20:
++	case NV_10:
++	case NV_04:
++	default:
++		dev_priv->ramfc_offset = 0x11400;
++		break;
++	}
++	NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
++						      dev_priv->ramfc_size);
++}
++
++int nv04_instmem_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t offset;
++	int ret = 0;
++
++	nv04_instmem_determine_amount(dev);
++	nv04_instmem_configure_fixed_tables(dev);
++
++	/* Create a heap to manage RAMIN allocations, we don't allocate
++	 * the space that was reserved for RAMHT/FC/RO.
++	 */
++	offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
++
++	/* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
++	 * on certain NV4x chipsets as well as RAMFC.  When 0x2230 == 0
++	 * ("new style" control) the upper 16-bits of 0x2220 points at this
++	 * other mysterious table that's clobbering important things.
++	 *
++	 * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
++	 * smashed to pieces on us, so reserve 0x30000-0x40000 too..
++	 */
++	if (dev_priv->card_type >= NV_40) {
++		if (offset < 0x40000)
++			offset = 0x40000;
++	}
++
++	ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
++				    offset, dev_priv->ramin_rsvd_vram - offset);
++	if (ret) {
++		dev_priv->ramin_heap = NULL;
++		NV_ERROR(dev, "Failed to init RAMIN heap\n");
++	}
++
++	return ret;
++}
++
++void
++nv04_instmem_takedown(struct drm_device *dev)
++{
++}
++
++int
++nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
++{
++	if (gpuobj->im_backing)
++		return -EINVAL;
++
++	return 0;
++}
++
++void
++nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (gpuobj && gpuobj->im_backing) {
++		if (gpuobj->im_bound)
++			dev_priv->engine.instmem.unbind(dev, gpuobj);
++		gpuobj->im_backing = NULL;
++	}
++}
++
++int
++nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++	if (!gpuobj->im_pramin || gpuobj->im_bound)
++		return -EINVAL;
++
++	gpuobj->im_bound = 1;
++	return 0;
++}
++
++int
++nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++	if (gpuobj->im_bound == 0)
++		return -EINVAL;
++
++	gpuobj->im_bound = 0;
++	return 0;
++}
++
++void
++nv04_instmem_prepare_access(struct drm_device *dev, bool write)
++{
++}
++
++void
++nv04_instmem_finish_access(struct drm_device *dev)
++{
++}
++
++int
++nv04_instmem_suspend(struct drm_device *dev)
++{
++	return 0;
++}
++
++void
++nv04_instmem_resume(struct drm_device *dev)
++{
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
+new file mode 100644
+index 0000000..617ed1e
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_mc.c
+@@ -0,0 +1,20 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv04_mc_init(struct drm_device *dev)
++{
++	/* Power up everything, resetting each individual unit will
++	 * be done later if needed.
++	 */
++
++	nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
++	return 0;
++}
++
++void
++nv04_mc_takedown(struct drm_device *dev)
++{
++}
+diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
+new file mode 100644
+index 0000000..1d09ddd
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_timer.c
+@@ -0,0 +1,51 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv04_timer_init(struct drm_device *dev)
++{
++	nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
++	nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
++
++	/* Just use the pre-existing values when possible for now; these regs
++	 * are not written in nv (driver writer missed a /4 on the address), and
++	 * writing 8 and 3 to the correct regs breaks the timings on the LVDS
++	 * hardware sequencing microcode.
++	 * A correct solution (involving calculations with the GPU PLL) can
++	 * be done when kernel modesetting lands
++	 */
++	if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
++				!nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
++		nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008);
++		nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003);
++	}
++
++	return 0;
++}
++
++uint64_t
++nv04_timer_read(struct drm_device *dev)
++{
++	uint32_t low;
++	/* From kmmio dumps on nv28 this looks like how the blob does this.
++	 * It reads the high dword twice, before and after.
++	 * The only explanation seems to be that the 64-bit timer counter
++	 * advances between high and low dword reads and may corrupt the
++	 * result. Not confirmed.
++	 */
++	uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
++	uint32_t high1;
++	do {
++		high1 = high2;
++		low = nv_rd32(dev, NV04_PTIMER_TIME_0);
++		high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
++	} while (high1 != high2);
++	return (((uint64_t)high2) << 32) | (uint64_t)low;
++}
++
++void
++nv04_timer_takedown(struct drm_device *dev)
++{
++}
+diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
+new file mode 100644
+index 0000000..9c63099
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv04_tv.c
+@@ -0,0 +1,305 @@
++/*
++ * Copyright (C) 2009 Francisco Jerez.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_encoder.h"
++#include "nouveau_connector.h"
++#include "nouveau_crtc.h"
++#include "nouveau_hw.h"
++#include "drm_crtc_helper.h"
++
++#include "i2c/ch7006.h"
++
++static struct {
++	struct i2c_board_info board_info;
++	struct drm_encoder_funcs funcs;
++	struct drm_encoder_helper_funcs hfuncs;
++	void *params;
++
++} nv04_tv_encoder_info[] = {
++	{
++		.board_info = { I2C_BOARD_INFO("ch7006", 0x75) },
++		.params = &(struct ch7006_encoder_params) {
++			CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
++			0, 0, 0,
++			CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
++			CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
++		},
++	},
++};
++
++static bool probe_i2c_addr(struct i2c_adapter *adapter, int addr)
++{
++	struct i2c_msg msg = {
++		.addr = addr,
++		.len = 0,
++	};
++
++	return i2c_transfer(adapter, &msg, 1) == 1;
++}
++
++int nv04_tv_identify(struct drm_device *dev, int i2c_index)
++{
++	struct nouveau_i2c_chan *i2c;
++	bool was_locked;
++	int i, ret;
++
++	NV_TRACE(dev, "Probing TV encoders on I2C bus: %d\n", i2c_index);
++
++	i2c = nouveau_i2c_find(dev, i2c_index);
++	if (!i2c)
++		return -ENODEV;
++
++	was_locked = NVLockVgaCrtcs(dev, false);
++
++	for (i = 0; i < ARRAY_SIZE(nv04_tv_encoder_info); i++) {
++		if (probe_i2c_addr(&i2c->adapter,
++				   nv04_tv_encoder_info[i].board_info.addr)) {
++			ret = i;
++			break;
++		}
++	}
++
++	if (i < ARRAY_SIZE(nv04_tv_encoder_info)) {
++		NV_TRACE(dev, "Detected TV encoder: %s\n",
++			 nv04_tv_encoder_info[i].board_info.type);
++
++	} else {
++		NV_TRACE(dev, "No TV encoders found.\n");
++		i = -ENODEV;
++	}
++
++	NVLockVgaCrtcs(dev, was_locked);
++	return i;
++}
++
++#define PLLSEL_TV_CRTC1_MASK				\
++	(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1		\
++	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1)
++#define PLLSEL_TV_CRTC2_MASK				\
++	(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2		\
++	 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
++
++static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv04_mode_state *state = &dev_priv->mode_reg;
++	uint8_t crtc1A;
++
++	NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
++		mode, nv_encoder->dcb->index);
++
++	state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
++
++	if (mode == DRM_MODE_DPMS_ON) {
++		int head = nouveau_crtc(encoder->crtc)->index;
++		crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX);
++
++		state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK :
++					PLLSEL_TV_CRTC1_MASK;
++
++		/* Inhibit hsync */
++		crtc1A |= 0x80;
++
++		NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A);
++	}
++
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
++
++	to_encoder_slave(encoder)->slave_funcs->dpms(encoder, mode);
++}
++
++static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv04_crtc_reg *state = &dev_priv->mode_reg.crtc_reg[head];
++
++	state->tv_setup = 0;
++
++	if (bind) {
++		state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
++		state->CRTC[NV_CIO_CRE_49] |= 0x10;
++	} else {
++		state->CRTC[NV_CIO_CRE_49] &= ~0x10;
++	}
++
++	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
++		       state->CRTC[NV_CIO_CRE_LCD__INDEX]);
++	NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49,
++		       state->CRTC[NV_CIO_CRE_49]);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP,
++		      state->tv_setup);
++}
++
++static void nv04_tv_prepare(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	int head = nouveau_crtc(encoder->crtc)->index;
++	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
++
++	helper->dpms(encoder, DRM_MODE_DPMS_OFF);
++
++	nv04_dfp_disable(dev, head);
++
++	if (nv_two_heads(dev))
++		nv04_tv_bind(dev, head ^ 1, false);
++
++	nv04_tv_bind(dev, head, true);
++}
++
++static void nv04_tv_mode_set(struct drm_encoder *encoder,
++			     struct drm_display_mode *mode,
++			     struct drm_display_mode *adjusted_mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
++	struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
++
++	regp->tv_htotal = adjusted_mode->htotal;
++	regp->tv_vtotal = adjusted_mode->vtotal;
++
++	/* These delay the TV signals with respect to the VGA port,
++	 * they might be useful if we ever allow a CRTC to drive
++	 * multiple outputs.
++	 */
++	regp->tv_hskew = 1;
++	regp->tv_hsync_delay = 1;
++	regp->tv_hsync_delay2 = 64;
++	regp->tv_vskew = 1;
++	regp->tv_vsync_delay = 1;
++
++	to_encoder_slave(encoder)->slave_funcs->mode_set(encoder, mode, adjusted_mode);
++}
++
++static void nv04_tv_commit(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
++	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
++
++	helper->dpms(encoder, DRM_MODE_DPMS_ON);
++
++	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
++		      drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
++		      '@' + ffs(nv_encoder->dcb->or));
++}
++
++static void nv04_tv_destroy(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++
++	to_encoder_slave(encoder)->slave_funcs->destroy(encoder);
++
++	drm_encoder_cleanup(encoder);
++
++	kfree(nv_encoder);
++}
++
++int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
++{
++	struct nouveau_encoder *nv_encoder;
++	struct drm_encoder *encoder;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct i2c_adapter *adap;
++	struct drm_encoder_funcs *funcs = NULL;
++	struct drm_encoder_helper_funcs *hfuncs = NULL;
++	struct drm_encoder_slave_funcs *sfuncs = NULL;
++	int i2c_index = entry->i2c_index;
++	int type, ret;
++	bool was_locked;
++
++	/* Ensure that we can talk to this encoder */
++	type = nv04_tv_identify(dev, i2c_index);
++	if (type < 0)
++		return type;
++
++	/* Allocate the necessary memory */
++	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
++	if (!nv_encoder)
++		return -ENOMEM;
++
++	/* Initialize the common members */
++	encoder = to_drm_encoder(nv_encoder);
++
++	funcs = &nv04_tv_encoder_info[type].funcs;
++	hfuncs = &nv04_tv_encoder_info[type].hfuncs;
++
++	drm_encoder_init(dev, encoder, funcs, DRM_MODE_ENCODER_TVDAC);
++	drm_encoder_helper_add(encoder, hfuncs);
++
++	encoder->possible_crtcs = entry->heads;
++	encoder->possible_clones = 0;
++
++	nv_encoder->dcb = entry;
++	nv_encoder->or = ffs(entry->or) - 1;
++
++	/* Run the slave-specific initialization */
++	adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter;
++
++	was_locked = NVLockVgaCrtcs(dev, false);
++
++	ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap,
++				   &nv04_tv_encoder_info[type].board_info);
++
++	NVLockVgaCrtcs(dev, was_locked);
++
++	if (ret < 0)
++		goto fail;
++
++	/* Fill the function pointers */
++	sfuncs = to_encoder_slave(encoder)->slave_funcs;
++
++	*funcs = (struct drm_encoder_funcs) {
++		.destroy = nv04_tv_destroy,
++	};
++
++	*hfuncs = (struct drm_encoder_helper_funcs) {
++		.dpms = nv04_tv_dpms,
++		.save = sfuncs->save,
++		.restore = sfuncs->restore,
++		.mode_fixup = sfuncs->mode_fixup,
++		.prepare = nv04_tv_prepare,
++		.commit = nv04_tv_commit,
++		.mode_set = nv04_tv_mode_set,
++		.detect = sfuncs->detect,
++	};
++
++	/* Set the slave encoder configuration */
++	sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params);
++
++	return 0;
++
++fail:
++	drm_encoder_cleanup(encoder);
++
++	kfree(nv_encoder);
++	return ret;
++}
+diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
+new file mode 100644
+index 0000000..cc5cda4
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv10_fb.c
+@@ -0,0 +1,44 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++void
++nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
++			  uint32_t size, uint32_t pitch)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t limit = max(1u, addr + size) - 1;
++
++	if (pitch) {
++		if (dev_priv->card_type >= NV_20)
++			addr |= 1;
++		else
++			addr |= 1 << 31;
++	}
++
++	nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
++	nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
++	nv_wr32(dev, NV10_PFB_TILE(i), addr);
++}
++
++int
++nv10_fb_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
++	int i;
++
++	pfb->num_tiles = NV10_PFB_TILE__SIZE;
++
++	/* Turn all the tiling regions off. */
++	for (i = 0; i < pfb->num_tiles; i++)
++		pfb->set_region_tiling(dev, i, 0, 0, 0);
++
++	return 0;
++}
++
++void
++nv10_fb_takedown(struct drm_device *dev)
++{
++}
+diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
+new file mode 100644
+index 0000000..7aeabf2
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
+@@ -0,0 +1,260 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
++#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
++
++int
++nv10_fifo_channel_id(struct drm_device *dev)
++{
++	return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
++			NV10_PFIFO_CACHE1_PUSH1_CHID_MASK;
++}
++
++int
++nv10_fifo_create_context(struct nouveau_channel *chan)
++{
++	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
++	struct drm_device *dev = chan->dev;
++	uint32_t fc = NV10_RAMFC(chan->id);
++	int ret;
++
++	ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
++				      NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
++				      NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
++	if (ret)
++		return ret;
++
++	/* Fill entries that are seen filled in dumps of nvidia driver just
++	 * after channel's is put into DMA mode
++	 */
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	nv_wi32(dev, fc +  0, chan->pushbuf_base);
++	nv_wi32(dev, fc +  4, chan->pushbuf_base);
++	nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
++	nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
++			      NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
++			      NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
++#ifdef __BIG_ENDIAN
++			      NV_PFIFO_CACHE1_BIG_ENDIAN |
++#endif
++			      0);
++	dev_priv->engine.instmem.finish_access(dev);
++
++	/* enable the fifo dma operation */
++	nv_wr32(dev, NV04_PFIFO_MODE,
++		nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
++	return 0;
++}
++
++void
++nv10_fifo_destroy_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++
++	nv_wr32(dev, NV04_PFIFO_MODE,
++			nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
++
++	nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++}
++
++static void
++nv10_fifo_do_load_context(struct drm_device *dev, int chid)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t fc = NV10_RAMFC(chid), tmp;
++
++	dev_priv->engine.instmem.prepare_access(dev, false);
++
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
++	nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
++
++	tmp = nv_ri32(dev, fc + 12);
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
++
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28));
++
++	if (dev_priv->chipset < 0x17)
++		goto out;
++
++	nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32));
++	tmp = nv_ri32(dev, fc + 36);
++	nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
++	nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40));
++	nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44));
++	nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
++
++out:
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
++}
++
++int
++nv10_fifo_load_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	uint32_t tmp;
++
++	nv10_fifo_do_load_context(dev, chan->id);
++
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
++		     NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
++
++	/* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
++	tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
++
++	return 0;
++}
++
++int
++nv10_fifo_unload_context(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	uint32_t fc, tmp;
++	int chid;
++
++	chid = pfifo->channel_id(dev);
++	if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
++		return 0;
++	fc = NV10_RAMFC(chid);
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++
++	nv_wi32(dev, fc +  0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
++	nv_wi32(dev, fc +  4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
++	nv_wi32(dev, fc +  8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
++	tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
++	tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
++	nv_wi32(dev, fc + 12, tmp);
++	nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
++	nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
++	nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
++	nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
++
++	if (dev_priv->chipset < 0x17)
++		goto out;
++
++	nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
++	tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
++	nv_wi32(dev, fc + 36, tmp);
++	nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
++	nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
++	nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
++
++out:
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nv10_fifo_do_load_context(dev, pfifo->channels - 1);
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
++	return 0;
++}
++
++static void
++nv10_fifo_init_reset(struct drm_device *dev)
++{
++	nv_wr32(dev, NV03_PMC_ENABLE,
++		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
++	nv_wr32(dev, NV03_PMC_ENABLE,
++		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PFIFO);
++
++	nv_wr32(dev, 0x003224, 0x000f0078);
++	nv_wr32(dev, 0x002044, 0x0101ffff);
++	nv_wr32(dev, 0x002040, 0x000000ff);
++	nv_wr32(dev, 0x002500, 0x00000000);
++	nv_wr32(dev, 0x003000, 0x00000000);
++	nv_wr32(dev, 0x003050, 0x00000000);
++
++	nv_wr32(dev, 0x003258, 0x00000000);
++	nv_wr32(dev, 0x003210, 0x00000000);
++	nv_wr32(dev, 0x003270, 0x00000000);
++}
++
++static void
++nv10_fifo_init_ramxx(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
++				       ((dev_priv->ramht_bits - 9) << 16) |
++				       (dev_priv->ramht_offset >> 8));
++	nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
++
++	if (dev_priv->chipset < 0x17) {
++		nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
++	} else {
++		nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) |
++					       (1 << 16) /* 64 Bytes entry*/);
++		/* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
++	}
++}
++
++static void
++nv10_fifo_init_intr(struct drm_device *dev)
++{
++	nv_wr32(dev, 0x002100, 0xffffffff);
++	nv_wr32(dev, 0x002140, 0xffffffff);
++}
++
++int
++nv10_fifo_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	int i;
++
++	nv10_fifo_init_reset(dev);
++	nv10_fifo_init_ramxx(dev);
++
++	nv10_fifo_do_load_context(dev, pfifo->channels - 1);
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
++
++	nv10_fifo_init_intr(dev);
++	pfifo->enable(dev);
++	pfifo->reassign(dev, true);
++
++	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
++		if (dev_priv->fifos[i]) {
++			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
++			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
++		}
++	}
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
+new file mode 100644
+index 0000000..fcf2cdd
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv10_graph.c
+@@ -0,0 +1,1009 @@
++/*
++ * Copyright 2007 Matthieu CASTET <castet.matthieu at free.fr>
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drm.h"
++#include "nouveau_drv.h"
++
++#define NV10_FIFO_NUMBER 32
++
++struct pipe_state {
++	uint32_t pipe_0x0000[0x040/4];
++	uint32_t pipe_0x0040[0x010/4];
++	uint32_t pipe_0x0200[0x0c0/4];
++	uint32_t pipe_0x4400[0x080/4];
++	uint32_t pipe_0x6400[0x3b0/4];
++	uint32_t pipe_0x6800[0x2f0/4];
++	uint32_t pipe_0x6c00[0x030/4];
++	uint32_t pipe_0x7000[0x130/4];
++	uint32_t pipe_0x7400[0x0c0/4];
++	uint32_t pipe_0x7800[0x0c0/4];
++};
++
++static int nv10_graph_ctx_regs[] = {
++	NV10_PGRAPH_CTX_SWITCH1,
++	NV10_PGRAPH_CTX_SWITCH2,
++	NV10_PGRAPH_CTX_SWITCH3,
++	NV10_PGRAPH_CTX_SWITCH4,
++	NV10_PGRAPH_CTX_SWITCH5,
++	NV10_PGRAPH_CTX_CACHE1,	/* 8 values from 0x400160 to 0x40017c */
++	NV10_PGRAPH_CTX_CACHE2,	/* 8 values from 0x400180 to 0x40019c */
++	NV10_PGRAPH_CTX_CACHE3,	/* 8 values from 0x4001a0 to 0x4001bc */
++	NV10_PGRAPH_CTX_CACHE4,	/* 8 values from 0x4001c0 to 0x4001dc */
++	NV10_PGRAPH_CTX_CACHE5,	/* 8 values from 0x4001e0 to 0x4001fc */
++	0x00400164,
++	0x00400184,
++	0x004001a4,
++	0x004001c4,
++	0x004001e4,
++	0x00400168,
++	0x00400188,
++	0x004001a8,
++	0x004001c8,
++	0x004001e8,
++	0x0040016c,
++	0x0040018c,
++	0x004001ac,
++	0x004001cc,
++	0x004001ec,
++	0x00400170,
++	0x00400190,
++	0x004001b0,
++	0x004001d0,
++	0x004001f0,
++	0x00400174,
++	0x00400194,
++	0x004001b4,
++	0x004001d4,
++	0x004001f4,
++	0x00400178,
++	0x00400198,
++	0x004001b8,
++	0x004001d8,
++	0x004001f8,
++	0x0040017c,
++	0x0040019c,
++	0x004001bc,
++	0x004001dc,
++	0x004001fc,
++	NV10_PGRAPH_CTX_USER,
++	NV04_PGRAPH_DMA_START_0,
++	NV04_PGRAPH_DMA_START_1,
++	NV04_PGRAPH_DMA_LENGTH,
++	NV04_PGRAPH_DMA_MISC,
++	NV10_PGRAPH_DMA_PITCH,
++	NV04_PGRAPH_BOFFSET0,
++	NV04_PGRAPH_BBASE0,
++	NV04_PGRAPH_BLIMIT0,
++	NV04_PGRAPH_BOFFSET1,
++	NV04_PGRAPH_BBASE1,
++	NV04_PGRAPH_BLIMIT1,
++	NV04_PGRAPH_BOFFSET2,
++	NV04_PGRAPH_BBASE2,
++	NV04_PGRAPH_BLIMIT2,
++	NV04_PGRAPH_BOFFSET3,
++	NV04_PGRAPH_BBASE3,
++	NV04_PGRAPH_BLIMIT3,
++	NV04_PGRAPH_BOFFSET4,
++	NV04_PGRAPH_BBASE4,
++	NV04_PGRAPH_BLIMIT4,
++	NV04_PGRAPH_BOFFSET5,
++	NV04_PGRAPH_BBASE5,
++	NV04_PGRAPH_BLIMIT5,
++	NV04_PGRAPH_BPITCH0,
++	NV04_PGRAPH_BPITCH1,
++	NV04_PGRAPH_BPITCH2,
++	NV04_PGRAPH_BPITCH3,
++	NV04_PGRAPH_BPITCH4,
++	NV10_PGRAPH_SURFACE,
++	NV10_PGRAPH_STATE,
++	NV04_PGRAPH_BSWIZZLE2,
++	NV04_PGRAPH_BSWIZZLE5,
++	NV04_PGRAPH_BPIXEL,
++	NV10_PGRAPH_NOTIFY,
++	NV04_PGRAPH_PATT_COLOR0,
++	NV04_PGRAPH_PATT_COLOR1,
++	NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
++	0x00400904,
++	0x00400908,
++	0x0040090c,
++	0x00400910,
++	0x00400914,
++	0x00400918,
++	0x0040091c,
++	0x00400920,
++	0x00400924,
++	0x00400928,
++	0x0040092c,
++	0x00400930,
++	0x00400934,
++	0x00400938,
++	0x0040093c,
++	0x00400940,
++	0x00400944,
++	0x00400948,
++	0x0040094c,
++	0x00400950,
++	0x00400954,
++	0x00400958,
++	0x0040095c,
++	0x00400960,
++	0x00400964,
++	0x00400968,
++	0x0040096c,
++	0x00400970,
++	0x00400974,
++	0x00400978,
++	0x0040097c,
++	0x00400980,
++	0x00400984,
++	0x00400988,
++	0x0040098c,
++	0x00400990,
++	0x00400994,
++	0x00400998,
++	0x0040099c,
++	0x004009a0,
++	0x004009a4,
++	0x004009a8,
++	0x004009ac,
++	0x004009b0,
++	0x004009b4,
++	0x004009b8,
++	0x004009bc,
++	0x004009c0,
++	0x004009c4,
++	0x004009c8,
++	0x004009cc,
++	0x004009d0,
++	0x004009d4,
++	0x004009d8,
++	0x004009dc,
++	0x004009e0,
++	0x004009e4,
++	0x004009e8,
++	0x004009ec,
++	0x004009f0,
++	0x004009f4,
++	0x004009f8,
++	0x004009fc,
++	NV04_PGRAPH_PATTERN,	/* 2 values from 0x400808 to 0x40080c */
++	0x0040080c,
++	NV04_PGRAPH_PATTERN_SHAPE,
++	NV03_PGRAPH_MONO_COLOR0,
++	NV04_PGRAPH_ROP3,
++	NV04_PGRAPH_CHROMA,
++	NV04_PGRAPH_BETA_AND,
++	NV04_PGRAPH_BETA_PREMULT,
++	0x00400e70,
++	0x00400e74,
++	0x00400e78,
++	0x00400e7c,
++	0x00400e80,
++	0x00400e84,
++	0x00400e88,
++	0x00400e8c,
++	0x00400ea0,
++	0x00400ea4,
++	0x00400ea8,
++	0x00400e90,
++	0x00400e94,
++	0x00400e98,
++	0x00400e9c,
++	NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
++	NV10_PGRAPH_WINDOWCLIP_VERTICAL,   /* 8 values from 0x400f20-0x400f3c */
++	0x00400f04,
++	0x00400f24,
++	0x00400f08,
++	0x00400f28,
++	0x00400f0c,
++	0x00400f2c,
++	0x00400f10,
++	0x00400f30,
++	0x00400f14,
++	0x00400f34,
++	0x00400f18,
++	0x00400f38,
++	0x00400f1c,
++	0x00400f3c,
++	NV10_PGRAPH_XFMODE0,
++	NV10_PGRAPH_XFMODE1,
++	NV10_PGRAPH_GLOBALSTATE0,
++	NV10_PGRAPH_GLOBALSTATE1,
++	NV04_PGRAPH_STORED_FMT,
++	NV04_PGRAPH_SOURCE_COLOR,
++	NV03_PGRAPH_ABS_X_RAM,	/* 32 values from 0x400400 to 0x40047c */
++	NV03_PGRAPH_ABS_Y_RAM,	/* 32 values from 0x400480 to 0x4004fc */
++	0x00400404,
++	0x00400484,
++	0x00400408,
++	0x00400488,
++	0x0040040c,
++	0x0040048c,
++	0x00400410,
++	0x00400490,
++	0x00400414,
++	0x00400494,
++	0x00400418,
++	0x00400498,
++	0x0040041c,
++	0x0040049c,
++	0x00400420,
++	0x004004a0,
++	0x00400424,
++	0x004004a4,
++	0x00400428,
++	0x004004a8,
++	0x0040042c,
++	0x004004ac,
++	0x00400430,
++	0x004004b0,
++	0x00400434,
++	0x004004b4,
++	0x00400438,
++	0x004004b8,
++	0x0040043c,
++	0x004004bc,
++	0x00400440,
++	0x004004c0,
++	0x00400444,
++	0x004004c4,
++	0x00400448,
++	0x004004c8,
++	0x0040044c,
++	0x004004cc,
++	0x00400450,
++	0x004004d0,
++	0x00400454,
++	0x004004d4,
++	0x00400458,
++	0x004004d8,
++	0x0040045c,
++	0x004004dc,
++	0x00400460,
++	0x004004e0,
++	0x00400464,
++	0x004004e4,
++	0x00400468,
++	0x004004e8,
++	0x0040046c,
++	0x004004ec,
++	0x00400470,
++	0x004004f0,
++	0x00400474,
++	0x004004f4,
++	0x00400478,
++	0x004004f8,
++	0x0040047c,
++	0x004004fc,
++	NV03_PGRAPH_ABS_UCLIP_XMIN,
++	NV03_PGRAPH_ABS_UCLIP_XMAX,
++	NV03_PGRAPH_ABS_UCLIP_YMIN,
++	NV03_PGRAPH_ABS_UCLIP_YMAX,
++	0x00400550,
++	0x00400558,
++	0x00400554,
++	0x0040055c,
++	NV03_PGRAPH_ABS_UCLIPA_XMIN,
++	NV03_PGRAPH_ABS_UCLIPA_XMAX,
++	NV03_PGRAPH_ABS_UCLIPA_YMIN,
++	NV03_PGRAPH_ABS_UCLIPA_YMAX,
++	NV03_PGRAPH_ABS_ICLIP_XMAX,
++	NV03_PGRAPH_ABS_ICLIP_YMAX,
++	NV03_PGRAPH_XY_LOGIC_MISC0,
++	NV03_PGRAPH_XY_LOGIC_MISC1,
++	NV03_PGRAPH_XY_LOGIC_MISC2,
++	NV03_PGRAPH_XY_LOGIC_MISC3,
++	NV03_PGRAPH_CLIPX_0,
++	NV03_PGRAPH_CLIPX_1,
++	NV03_PGRAPH_CLIPY_0,
++	NV03_PGRAPH_CLIPY_1,
++	NV10_PGRAPH_COMBINER0_IN_ALPHA,
++	NV10_PGRAPH_COMBINER1_IN_ALPHA,
++	NV10_PGRAPH_COMBINER0_IN_RGB,
++	NV10_PGRAPH_COMBINER1_IN_RGB,
++	NV10_PGRAPH_COMBINER_COLOR0,
++	NV10_PGRAPH_COMBINER_COLOR1,
++	NV10_PGRAPH_COMBINER0_OUT_ALPHA,
++	NV10_PGRAPH_COMBINER1_OUT_ALPHA,
++	NV10_PGRAPH_COMBINER0_OUT_RGB,
++	NV10_PGRAPH_COMBINER1_OUT_RGB,
++	NV10_PGRAPH_COMBINER_FINAL0,
++	NV10_PGRAPH_COMBINER_FINAL1,
++	0x00400e00,
++	0x00400e04,
++	0x00400e08,
++	0x00400e0c,
++	0x00400e10,
++	0x00400e14,
++	0x00400e18,
++	0x00400e1c,
++	0x00400e20,
++	0x00400e24,
++	0x00400e28,
++	0x00400e2c,
++	0x00400e30,
++	0x00400e34,
++	0x00400e38,
++	0x00400e3c,
++	NV04_PGRAPH_PASSTHRU_0,
++	NV04_PGRAPH_PASSTHRU_1,
++	NV04_PGRAPH_PASSTHRU_2,
++	NV10_PGRAPH_DIMX_TEXTURE,
++	NV10_PGRAPH_WDIMX_TEXTURE,
++	NV10_PGRAPH_DVD_COLORFMT,
++	NV10_PGRAPH_SCALED_FORMAT,
++	NV04_PGRAPH_MISC24_0,
++	NV04_PGRAPH_MISC24_1,
++	NV04_PGRAPH_MISC24_2,
++	NV03_PGRAPH_X_MISC,
++	NV03_PGRAPH_Y_MISC,
++	NV04_PGRAPH_VALID1,
++	NV04_PGRAPH_VALID2,
++};
++
++static int nv17_graph_ctx_regs[] = {
++	NV10_PGRAPH_DEBUG_4,
++	0x004006b0,
++	0x00400eac,
++	0x00400eb0,
++	0x00400eb4,
++	0x00400eb8,
++	0x00400ebc,
++	0x00400ec0,
++	0x00400ec4,
++	0x00400ec8,
++	0x00400ecc,
++	0x00400ed0,
++	0x00400ed4,
++	0x00400ed8,
++	0x00400edc,
++	0x00400ee0,
++	0x00400a00,
++	0x00400a04,
++};
++
++struct graph_state {
++	int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
++	int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
++	struct pipe_state pipe_state;
++	uint32_t lma_window[4];
++};
++
++#define PIPE_SAVE(dev, state, addr)					\
++	do {								\
++		int __i;						\
++		nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
++		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
++			state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
++	} while (0)
++
++#define PIPE_RESTORE(dev, state, addr)					\
++	do {								\
++		int __i;						\
++		nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr);		\
++		for (__i = 0; __i < ARRAY_SIZE(state); __i++)		\
++			nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
++	} while (0)
++
++static void nv10_graph_save_pipe(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
++	struct pipe_state *pipe = &pgraph_ctx->pipe_state;
++
++	PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
++	PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
++	PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
++	PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
++	PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
++	PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
++	PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
++	PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
++	PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
++	PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
++}
++
++static void nv10_graph_load_pipe(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
++	struct pipe_state *pipe = &pgraph_ctx->pipe_state;
++	uint32_t xfmode0, xfmode1;
++	int i;
++
++	nouveau_wait_for_idle(dev);
++	/* XXX check haiku comments */
++	xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
++	xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
++	nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
++	nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
++	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
++	for (i = 0; i < 4; i++)
++		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
++	for (i = 0; i < 4; i++)
++		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
++
++	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
++	for (i = 0; i < 3; i++)
++		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
++
++	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
++	for (i = 0; i < 3; i++)
++		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
++
++	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
++	nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
++
++
++	PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
++	nouveau_wait_for_idle(dev);
++
++	/* restore XFMODE */
++	nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
++	nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
++	PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
++	PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
++	PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
++	PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
++	PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
++	PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
++	PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
++	PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
++	PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
++	nouveau_wait_for_idle(dev);
++}
++
++static void nv10_graph_create_pipe(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
++	struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
++	uint32_t *fifo_pipe_state_addr;
++	int i;
++#define PIPE_INIT(addr) \
++	do { \
++		fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
++	} while (0)
++#define PIPE_INIT_END(addr) \
++	do { \
++		uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
++				ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
++		if (fifo_pipe_state_addr != __end_addr) \
++			NV_ERROR(dev, "incomplete pipe init for 0x%x :  %p/%p\n", \
++				addr, fifo_pipe_state_addr, __end_addr); \
++	} while (0)
++#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
++
++	PIPE_INIT(0x0200);
++	for (i = 0; i < 48; i++)
++		NV_WRITE_PIPE_INIT(0x00000000);
++	PIPE_INIT_END(0x0200);
++
++	PIPE_INIT(0x6400);
++	for (i = 0; i < 211; i++)
++		NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x3f800000);
++	NV_WRITE_PIPE_INIT(0x40000000);
++	NV_WRITE_PIPE_INIT(0x40000000);
++	NV_WRITE_PIPE_INIT(0x40000000);
++	NV_WRITE_PIPE_INIT(0x40000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x3f800000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x3f000000);
++	NV_WRITE_PIPE_INIT(0x3f000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x3f800000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x3f800000);
++	NV_WRITE_PIPE_INIT(0x3f800000);
++	NV_WRITE_PIPE_INIT(0x3f800000);
++	NV_WRITE_PIPE_INIT(0x3f800000);
++	PIPE_INIT_END(0x6400);
++
++	PIPE_INIT(0x6800);
++	for (i = 0; i < 162; i++)
++		NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x3f800000);
++	for (i = 0; i < 25; i++)
++		NV_WRITE_PIPE_INIT(0x00000000);
++	PIPE_INIT_END(0x6800);
++
++	PIPE_INIT(0x6c00);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0xbf800000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	PIPE_INIT_END(0x6c00);
++
++	PIPE_INIT(0x7000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x7149f2ca);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x7149f2ca);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x7149f2ca);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x7149f2ca);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x7149f2ca);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x7149f2ca);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x7149f2ca);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x00000000);
++	NV_WRITE_PIPE_INIT(0x7149f2ca);
++	for (i = 0; i < 35; i++)
++		NV_WRITE_PIPE_INIT(0x00000000);
++	PIPE_INIT_END(0x7000);
++
++	PIPE_INIT(0x7400);
++	for (i = 0; i < 48; i++)
++		NV_WRITE_PIPE_INIT(0x00000000);
++	PIPE_INIT_END(0x7400);
++
++	PIPE_INIT(0x7800);
++	for (i = 0; i < 48; i++)
++		NV_WRITE_PIPE_INIT(0x00000000);
++	PIPE_INIT_END(0x7800);
++
++	PIPE_INIT(0x4400);
++	for (i = 0; i < 32; i++)
++		NV_WRITE_PIPE_INIT(0x00000000);
++	PIPE_INIT_END(0x4400);
++
++	PIPE_INIT(0x0000);
++	for (i = 0; i < 16; i++)
++		NV_WRITE_PIPE_INIT(0x00000000);
++	PIPE_INIT_END(0x0000);
++
++	PIPE_INIT(0x0040);
++	for (i = 0; i < 4; i++)
++		NV_WRITE_PIPE_INIT(0x00000000);
++	PIPE_INIT_END(0x0040);
++
++#undef PIPE_INIT
++#undef PIPE_INIT_END
++#undef NV_WRITE_PIPE_INIT
++}
++
++static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
++{
++	int i;
++	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
++		if (nv10_graph_ctx_regs[i] == reg)
++			return i;
++	}
++	NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
++	return -1;
++}
++
++static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
++{
++	int i;
++	for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
++		if (nv17_graph_ctx_regs[i] == reg)
++			return i;
++	}
++	NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
++	return -1;
++}
++
++int nv10_graph_load_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
++	uint32_t tmp;
++	int i;
++
++	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
++		nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
++	if (dev_priv->chipset >= 0x17) {
++		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
++			nv_wr32(dev, nv17_graph_ctx_regs[i],
++						pgraph_ctx->nv17[i]);
++	}
++
++	nv10_graph_load_pipe(chan);
++
++	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
++	tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
++	nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
++	tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
++	nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
++	return 0;
++}
++
++int
++nv10_graph_unload_context(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	struct nouveau_channel *chan;
++	struct graph_state *ctx;
++	uint32_t tmp;
++	int i;
++
++	chan = pgraph->channel(dev);
++	if (!chan)
++		return 0;
++	ctx = chan->pgraph_ctx;
++
++	for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
++		ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
++
++	if (dev_priv->chipset >= 0x17) {
++		for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
++			ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
++	}
++
++	nv10_graph_save_pipe(chan);
++
++	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
++	tmp  = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
++	tmp |= (pfifo->channels - 1) << 24;
++	nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
++	return 0;
++}
++
++void
++nv10_graph_context_switch(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	struct nouveau_channel *chan = NULL;
++	int chid;
++
++	pgraph->fifo_access(dev, false);
++	nouveau_wait_for_idle(dev);
++
++	/* If previous context is valid, we need to save it */
++	nv10_graph_unload_context(dev);
++
++	/* Load context for next channel */
++	chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
++	chan = dev_priv->fifos[chid];
++	if (chan)
++		nv10_graph_load_context(chan);
++
++	pgraph->fifo_access(dev, true);
++}
++
++#define NV_WRITE_CTX(reg, val) do { \
++	int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
++	if (offset > 0) \
++		pgraph_ctx->nv10[offset] = val; \
++	} while (0)
++
++#define NV17_WRITE_CTX(reg, val) do { \
++	int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
++	if (offset > 0) \
++		pgraph_ctx->nv17[offset] = val; \
++	} while (0)
++
++struct nouveau_channel *
++nv10_graph_channel(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int chid = dev_priv->engine.fifo.channels;
++
++	if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
++		chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
++
++	if (chid >= dev_priv->engine.fifo.channels)
++		return NULL;
++
++	return dev_priv->fifos[chid];
++}
++
++int nv10_graph_create_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct graph_state *pgraph_ctx;
++
++	NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
++
++	chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
++						GFP_KERNEL);
++	if (pgraph_ctx == NULL)
++		return -ENOMEM;
++
++
++	NV_WRITE_CTX(0x00400e88, 0x08000000);
++	NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
++	NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
++	NV_WRITE_CTX(0x00400e10, 0x00001000);
++	NV_WRITE_CTX(0x00400e14, 0x00001000);
++	NV_WRITE_CTX(0x00400e30, 0x00080008);
++	NV_WRITE_CTX(0x00400e34, 0x00080008);
++	if (dev_priv->chipset >= 0x17) {
++		/* is it really needed ??? */
++		NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
++					nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
++		NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
++		NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
++		NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
++		NV17_WRITE_CTX(0x00400ec0, 0x00000080);
++		NV17_WRITE_CTX(0x00400ed0, 0x00000080);
++	}
++	NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
++
++	nv10_graph_create_pipe(chan);
++	return 0;
++}
++
++void nv10_graph_destroy_context(struct nouveau_channel *chan)
++{
++	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
++
++	kfree(pgraph_ctx);
++	chan->pgraph_ctx = NULL;
++}
++
++void
++nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
++			     uint32_t size, uint32_t pitch)
++{
++	uint32_t limit = max(1u, addr + size) - 1;
++
++	if (pitch)
++		addr |= 1 << 31;
++
++	nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
++	nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
++	nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
++}
++
++int nv10_graph_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t tmp;
++	int i;
++
++	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
++			~NV_PMC_ENABLE_PGRAPH);
++	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
++			 NV_PMC_ENABLE_PGRAPH);
++
++	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
++	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
++	/* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
++				      (1<<29) |
++				      (1<<31));
++	if (dev_priv->chipset >= 0x17) {
++		nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
++		nv_wr32(dev, 0x400a10, 0x3ff3fb6);
++		nv_wr32(dev, 0x400838, 0x2f8684);
++		nv_wr32(dev, 0x40083c, 0x115f3f);
++		nv_wr32(dev, 0x004006b0, 0x40000020);
++	} else
++		nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
++
++	/* Turn all the tiling regions off. */
++	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
++		nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
++
++	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
++	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
++	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
++	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
++	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
++
++	tmp  = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
++	tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
++	nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
++	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
++	nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
++
++	return 0;
++}
++
++void nv10_graph_takedown(struct drm_device *dev)
++{
++}
++
++static int
++nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
++			   int mthd, uint32_t data)
++{
++	struct drm_device *dev = chan->dev;
++	struct graph_state *ctx = chan->pgraph_ctx;
++	struct pipe_state *pipe = &ctx->pipe_state;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
++	uint32_t xfmode0, xfmode1;
++	int i;
++
++	ctx->lma_window[(mthd - 0x1638) / 4] = data;
++
++	if (mthd != 0x1644)
++		return 0;
++
++	nouveau_wait_for_idle(dev);
++
++	PIPE_SAVE(dev, pipe_0x0040, 0x0040);
++	PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
++
++	PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
++
++	nouveau_wait_for_idle(dev);
++
++	xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
++	xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
++
++	PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
++	PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
++	PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
++	PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
++
++	nouveau_wait_for_idle(dev);
++
++	nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
++	nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
++	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
++	for (i = 0; i < 4; i++)
++		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
++	for (i = 0; i < 4; i++)
++		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
++
++	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
++	for (i = 0; i < 3; i++)
++		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
++
++	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
++	for (i = 0; i < 3; i++)
++		nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
++
++	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
++	nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
++
++	PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
++
++	nouveau_wait_for_idle(dev);
++
++	PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
++
++	nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
++	nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
++
++	PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
++	PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
++	PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
++	PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
++
++	nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
++	nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
++
++	nouveau_wait_for_idle(dev);
++
++	pgraph->fifo_access(dev, true);
++
++	return 0;
++}
++
++static int
++nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
++			   int mthd, uint32_t data)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++
++	nouveau_wait_for_idle(dev);
++
++	nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
++		nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
++	nv_wr32(dev, 0x004006b0,
++		nv_rd32(dev, 0x004006b0) | 0x8 << 24);
++
++	pgraph->fifo_access(dev, true);
++
++	return 0;
++}
++
++static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
++	{ 0x1638, nv17_graph_mthd_lma_window },
++	{ 0x163c, nv17_graph_mthd_lma_window },
++	{ 0x1640, nv17_graph_mthd_lma_window },
++	{ 0x1644, nv17_graph_mthd_lma_window },
++	{ 0x1658, nv17_graph_mthd_lma_enable },
++	{}
++};
++
++struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
++	{ 0x0030, false, NULL }, /* null */
++	{ 0x0039, false, NULL }, /* m2mf */
++	{ 0x004a, false, NULL }, /* gdirect */
++	{ 0x005f, false, NULL }, /* imageblit */
++	{ 0x009f, false, NULL }, /* imageblit (nv12) */
++	{ 0x008a, false, NULL }, /* ifc */
++	{ 0x0089, false, NULL }, /* sifm */
++	{ 0x0062, false, NULL }, /* surf2d */
++	{ 0x0043, false, NULL }, /* rop */
++	{ 0x0012, false, NULL }, /* beta1 */
++	{ 0x0072, false, NULL }, /* beta4 */
++	{ 0x0019, false, NULL }, /* cliprect */
++	{ 0x0044, false, NULL }, /* pattern */
++	{ 0x0052, false, NULL }, /* swzsurf */
++	{ 0x0093, false, NULL }, /* surf3d */
++	{ 0x0094, false, NULL }, /* tex_tri */
++	{ 0x0095, false, NULL }, /* multitex_tri */
++	{ 0x0056, false, NULL }, /* celcius (nv10) */
++	{ 0x0096, false, NULL }, /* celcius (nv11) */
++	{ 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
++	{}
++};
+diff --git a/drivers/gpu/drm/nouveau/nv17_gpio.c b/drivers/gpu/drm/nouveau/nv17_gpio.c
+new file mode 100644
+index 0000000..2e58c33
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv17_gpio.c
+@@ -0,0 +1,92 @@
++/*
++ * Copyright (C) 2009 Francisco Jerez.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_hw.h"
++
++static bool
++get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
++		  uint32_t *mask)
++{
++	if (ent->line < 2) {
++		*reg = NV_PCRTC_GPIO;
++		*shift = ent->line * 16;
++		*mask = 0x11;
++
++	} else if (ent->line < 10) {
++		*reg = NV_PCRTC_GPIO_EXT;
++		*shift = (ent->line - 2) * 4;
++		*mask = 0x3;
++
++	} else if (ent->line < 14) {
++		*reg = NV_PCRTC_850;
++		*shift = (ent->line - 10) * 4;
++		*mask = 0x3;
++
++	} else {
++		return false;
++	}
++
++	return true;
++}
++
++int
++nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
++{
++	struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
++	uint32_t reg, shift, mask, value;
++
++	if (!ent)
++		return -ENODEV;
++
++	if (!get_gpio_location(ent, &reg, &shift, &mask))
++		return -ENODEV;
++
++	value = NVReadCRTC(dev, 0, reg) >> shift;
++
++	return (ent->invert ? 1 : 0) ^ (value & 1);
++}
++
++int
++nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
++{
++	struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
++	uint32_t reg, shift, mask, value;
++
++	if (!ent)
++		return -ENODEV;
++
++	if (!get_gpio_location(ent, &reg, &shift, &mask))
++		return -ENODEV;
++
++	value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
++	mask = ~(mask << shift);
++
++	NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
+new file mode 100644
+index 0000000..21ac6e4
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv17_tv.c
+@@ -0,0 +1,778 @@
++/*
++ * Copyright (C) 2009 Francisco Jerez.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm_crtc_helper.h"
++#include "nouveau_drv.h"
++#include "nouveau_encoder.h"
++#include "nouveau_connector.h"
++#include "nouveau_crtc.h"
++#include "nouveau_hw.h"
++#include "nv17_tv.h"
++
++static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
++	uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
++		fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
++	uint32_t sample = 0;
++	int head;
++
++#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
++	testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
++	if (dev_priv->vbios->tvdactestval)
++		testval = dev_priv->vbios->tvdactestval;
++
++	dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
++	head = (dacclk & 0x100) >> 8;
++
++	/* Save the previous state. */
++	gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
++	gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
++	fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
++	fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
++	fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
++	fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
++	test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
++	ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c);
++	ctv_14 = NVReadRAMDAC(dev, head, 0x680c14);
++	ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
++
++	/* Prepare the DAC for load detection.  */
++	nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true);
++	nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true);
++
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
++		      NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
++		      NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 |
++		      NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
++		      NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |
++		      NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS);
++
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0);
++
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
++		      (dacclk & ~0xff) | 0x22);
++	msleep(1);
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
++		      (dacclk & ~0xff) | 0x21);
++
++	NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20);
++	NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16);
++
++	/* Sample pin 0x4 (usually S-video luma). */
++	NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff);
++	msleep(20);
++	sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
++		& 0x4 << 28;
++
++	/* Sample the remaining pins. */
++	NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff);
++	msleep(20);
++	sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
++		& 0xa << 28;
++
++	/* Restore the previous state. */
++	NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c);
++	NVWriteRAMDAC(dev, head, 0x680c14, ctv_14);
++	NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c);
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk);
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
++	nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1);
++	nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0);
++
++	return sample;
++}
++
++static enum drm_connector_status
++nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_mode_config *conf = &dev->mode_config;
++	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
++	struct dcb_entry *dcb = tv_enc->base.dcb;
++
++	if (dev_priv->chipset == 0x42 ||
++	    dev_priv->chipset == 0x43)
++		tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe;
++	else
++		tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe;
++
++	switch (tv_enc->pin_mask) {
++	case 0x2:
++	case 0x4:
++		tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
++		break;
++	case 0xc:
++		tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
++		break;
++	case 0xe:
++		if (dcb->tvconf.has_component_output)
++			tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
++		else
++			tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
++		break;
++	default:
++		tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
++		break;
++	}
++
++	drm_connector_property_set_value(connector,
++					 conf->tv_subconnector_property,
++					 tv_enc->subconnector);
++
++	if (tv_enc->subconnector) {
++		NV_INFO(dev, "Load detected on output %c\n",
++			'@' + ffs(dcb->or));
++		return connector_status_connected;
++	} else {
++		return connector_status_disconnected;
++	}
++}
++
++static const struct {
++	int hdisplay;
++	int vdisplay;
++} modes[] = {
++	{ 640, 400 },
++	{ 640, 480 },
++	{ 720, 480 },
++	{ 720, 576 },
++	{ 800, 600 },
++	{ 1024, 768 },
++	{ 1280, 720 },
++	{ 1280, 1024 },
++	{ 1920, 1080 }
++};
++
++static int nv17_tv_get_modes(struct drm_encoder *encoder,
++			     struct drm_connector *connector)
++{
++	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
++	struct drm_display_mode *mode;
++	struct drm_display_mode *output_mode;
++	int n = 0;
++	int i;
++
++	if (tv_norm->kind != CTV_ENC_MODE) {
++		struct drm_display_mode *tv_mode;
++
++		for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
++			mode = drm_mode_duplicate(encoder->dev, tv_mode);
++
++			mode->clock = tv_norm->tv_enc_mode.vrefresh *
++						mode->htotal / 1000 *
++						mode->vtotal / 1000;
++
++			if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++				mode->clock *= 2;
++
++			if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
++			    mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
++				mode->type |= DRM_MODE_TYPE_PREFERRED;
++
++			drm_mode_probed_add(connector, mode);
++			n++;
++		}
++		return n;
++	}
++
++	/* tv_norm->kind == CTV_ENC_MODE */
++	output_mode = &tv_norm->ctv_enc_mode.mode;
++	for (i = 0; i < ARRAY_SIZE(modes); i++) {
++		if (modes[i].hdisplay > output_mode->hdisplay ||
++		    modes[i].vdisplay > output_mode->vdisplay)
++			continue;
++
++		if (modes[i].hdisplay == output_mode->hdisplay &&
++		    modes[i].vdisplay == output_mode->vdisplay) {
++			mode = drm_mode_duplicate(encoder->dev, output_mode);
++			mode->type |= DRM_MODE_TYPE_PREFERRED;
++		} else {
++			mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
++				modes[i].vdisplay, 60, false,
++				output_mode->flags & DRM_MODE_FLAG_INTERLACE,
++				false);
++		}
++
++		/* CVT modes are sometimes unsuitable... */
++		if (output_mode->hdisplay <= 720
++		    || output_mode->hdisplay >= 1920) {
++			mode->htotal = output_mode->htotal;
++			mode->hsync_start = (mode->hdisplay + (mode->htotal
++					     - mode->hdisplay) * 9 / 10) & ~7;
++			mode->hsync_end = mode->hsync_start + 8;
++		}
++		if (output_mode->vdisplay >= 1024) {
++			mode->vtotal = output_mode->vtotal;
++			mode->vsync_start = output_mode->vsync_start;
++			mode->vsync_end = output_mode->vsync_end;
++		}
++
++		mode->type |= DRM_MODE_TYPE_DRIVER;
++		drm_mode_probed_add(connector, mode);
++		n++;
++	}
++	return n;
++}
++
++static int nv17_tv_mode_valid(struct drm_encoder *encoder,
++			      struct drm_display_mode *mode)
++{
++	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
++
++	if (tv_norm->kind == CTV_ENC_MODE) {
++		struct drm_display_mode *output_mode =
++						&tv_norm->ctv_enc_mode.mode;
++
++		if (mode->clock > 400000)
++			return MODE_CLOCK_HIGH;
++
++		if (mode->hdisplay > output_mode->hdisplay ||
++		    mode->vdisplay > output_mode->vdisplay)
++			return MODE_BAD;
++
++		if ((mode->flags & DRM_MODE_FLAG_INTERLACE) !=
++		    (output_mode->flags & DRM_MODE_FLAG_INTERLACE))
++			return MODE_NO_INTERLACE;
++
++		if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
++			return MODE_NO_DBLESCAN;
++
++	} else {
++		const int vsync_tolerance = 600;
++
++		if (mode->clock > 70000)
++			return MODE_CLOCK_HIGH;
++
++		if (abs(drm_mode_vrefresh(mode) * 1000 -
++			tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance)
++			return MODE_VSYNC;
++
++		/* The encoder takes care of the actual interlacing */
++		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
++			return MODE_NO_INTERLACE;
++	}
++
++	return MODE_OK;
++}
++
++static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
++			       struct drm_display_mode *mode,
++			       struct drm_display_mode *adjusted_mode)
++{
++	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
++
++	if (tv_norm->kind == CTV_ENC_MODE)
++		adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock;
++	else
++		adjusted_mode->clock = 90000;
++
++	return true;
++}
++
++static void  nv17_tv_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
++	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
++
++	if (nouveau_encoder(encoder)->last_dpms == mode)
++		return;
++	nouveau_encoder(encoder)->last_dpms = mode;
++
++	NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
++		 mode, nouveau_encoder(encoder)->dcb->index);
++
++	regs->ptv_200 &= ~1;
++
++	if (tv_norm->kind == CTV_ENC_MODE) {
++		nv04_dfp_update_fp_control(encoder, mode);
++
++	} else {
++		nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF);
++
++		if (mode == DRM_MODE_DPMS_ON)
++			regs->ptv_200 |= 1;
++	}
++
++	nv_load_ptv(dev, regs, 200);
++
++	nv17_gpio_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
++	nv17_gpio_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
++
++	nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
++}
++
++static void nv17_tv_prepare(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
++	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
++	int head = nouveau_crtc(encoder->crtc)->index;
++	uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[
++							NV_CIO_CRE_LCD__INDEX];
++	uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
++					nv04_dac_output_offset(encoder);
++	uint32_t dacclk;
++
++	helper->dpms(encoder, DRM_MODE_DPMS_OFF);
++
++	nv04_dfp_disable(dev, head);
++
++	/* Unbind any FP encoders from this head if we need the FP
++	 * stuff enabled. */
++	if (tv_norm->kind == CTV_ENC_MODE) {
++		struct drm_encoder *enc;
++
++		list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
++			struct dcb_entry *dcb = nouveau_encoder(enc)->dcb;
++
++			if ((dcb->type == OUTPUT_TMDS ||
++			     dcb->type == OUTPUT_LVDS) &&
++			     !enc->crtc &&
++			     nv04_dfp_get_bound_head(dev, dcb) == head) {
++				nv04_dfp_bind_head(dev, dcb, head ^ 1,
++						dev_priv->VBIOS.fp.dual_link);
++			}
++		}
++
++	}
++
++	/* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
++	 * at LCD__INDEX which we don't alter
++	 */
++	if (!(*cr_lcd & 0x44)) {
++		if (tv_norm->kind == CTV_ENC_MODE)
++			*cr_lcd = 0x1 | (head ? 0x0 : 0x8);
++		else
++			*cr_lcd = 0;
++	}
++
++	/* Set the DACCLK register */
++	dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
++
++	if (dev_priv->card_type == NV_40)
++		dacclk |= 0x1a << 16;
++
++	if (tv_norm->kind == CTV_ENC_MODE) {
++		dacclk |=  0x20;
++
++		if (head)
++			dacclk |= 0x100;
++		else
++			dacclk &= ~0x100;
++
++	} else {
++		dacclk |= 0x10;
++
++	}
++
++	NVWriteRAMDAC(dev, 0, dacclk_off, dacclk);
++}
++
++static void nv17_tv_mode_set(struct drm_encoder *encoder,
++			     struct drm_display_mode *drm_mode,
++			     struct drm_display_mode *adjusted_mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int head = nouveau_crtc(encoder->crtc)->index;
++	struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
++	struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
++	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
++	int i;
++
++	regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */
++	regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */
++	regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */
++	regs->tv_setup = 1;
++	regs->ramdac_8c0 = 0x0;
++
++	if (tv_norm->kind == TV_ENC_MODE) {
++		tv_regs->ptv_200 = 0x13111100;
++		if (head)
++			tv_regs->ptv_200 |= 0x10;
++
++		tv_regs->ptv_20c = 0x808010;
++		tv_regs->ptv_304 = 0x2d00000;
++		tv_regs->ptv_600 = 0x0;
++		tv_regs->ptv_60c = 0x0;
++		tv_regs->ptv_610 = 0x1e00000;
++
++		if (tv_norm->tv_enc_mode.vdisplay == 576) {
++			tv_regs->ptv_508 = 0x1200000;
++			tv_regs->ptv_614 = 0x33;
++
++		} else if (tv_norm->tv_enc_mode.vdisplay == 480) {
++			tv_regs->ptv_508 = 0xf00000;
++			tv_regs->ptv_614 = 0x13;
++		}
++
++		if (dev_priv->card_type >= NV_30) {
++			tv_regs->ptv_500 = 0xe8e0;
++			tv_regs->ptv_504 = 0x1710;
++			tv_regs->ptv_604 = 0x0;
++			tv_regs->ptv_608 = 0x0;
++		} else {
++			if (tv_norm->tv_enc_mode.vdisplay == 576) {
++				tv_regs->ptv_604 = 0x20;
++				tv_regs->ptv_608 = 0x10;
++				tv_regs->ptv_500 = 0x19710;
++				tv_regs->ptv_504 = 0x68f0;
++
++			} else if (tv_norm->tv_enc_mode.vdisplay == 480) {
++				tv_regs->ptv_604 = 0x10;
++				tv_regs->ptv_608 = 0x20;
++				tv_regs->ptv_500 = 0x4b90;
++				tv_regs->ptv_504 = 0x1b480;
++			}
++		}
++
++		for (i = 0; i < 0x40; i++)
++			tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i];
++
++	} else {
++		struct drm_display_mode *output_mode =
++						&tv_norm->ctv_enc_mode.mode;
++
++		/* The registers in PRAMDAC+0xc00 control some timings and CSC
++		 * parameters for the CTV encoder (It's only used for "HD" TV
++		 * modes, I don't think I have enough working to guess what
++		 * they exactly mean...), it's probably connected at the
++		 * output of the FP encoder, but it also needs the analog
++		 * encoder in its OR enabled and routed to the head it's
++		 * using. It's enabled with the DACCLK register, bits [5:4].
++		 */
++		for (i = 0; i < 38; i++)
++			regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i];
++
++		regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
++		regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
++		regs->fp_horiz_regs[FP_SYNC_START] =
++						output_mode->hsync_start - 1;
++		regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
++		regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay +
++			max((output_mode->hdisplay-600)/40 - 1, 1);
++
++		regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
++		regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
++		regs->fp_vert_regs[FP_SYNC_START] =
++						output_mode->vsync_start - 1;
++		regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
++		regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1;
++
++		regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
++			NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
++			NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
++
++		if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
++			regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
++		if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
++			regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
++
++		regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
++			NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
++			NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
++			NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
++			NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
++			NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
++			NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
++
++		regs->fp_debug_2 = 0;
++
++		regs->fp_margin_color = 0x801080;
++
++	}
++}
++
++static void nv17_tv_commit(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_encoder_helper_funcs *helper = encoder->helper_private;
++
++	if (get_tv_norm(encoder)->kind == TV_ENC_MODE) {
++		nv17_tv_update_rescaler(encoder);
++		nv17_tv_update_properties(encoder);
++	} else {
++		nv17_ctv_update_rescaler(encoder);
++	}
++
++	nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
++
++	/* This could use refinement for flatpanels, but it should work */
++	if (dev_priv->chipset < 0x44)
++		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
++					nv04_dac_output_offset(encoder),
++					0xf0000000);
++	else
++		NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
++					nv04_dac_output_offset(encoder),
++					0x00100000);
++
++	helper->dpms(encoder, DRM_MODE_DPMS_ON);
++
++	NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
++		drm_get_connector_name(
++			&nouveau_encoder_connector_get(nv_encoder)->base),
++		nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
++}
++
++static void nv17_tv_save(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
++
++	nouveau_encoder(encoder)->restore.output =
++					NVReadRAMDAC(dev, 0,
++					NV_PRAMDAC_DACCLK +
++					nv04_dac_output_offset(encoder));
++
++	nv17_tv_state_save(dev, &tv_enc->saved_state);
++
++	tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200;
++}
++
++static void nv17_tv_restore(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++
++	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
++				nv04_dac_output_offset(encoder),
++				nouveau_encoder(encoder)->restore.output);
++
++	nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
++
++	nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
++}
++
++static int nv17_tv_create_resources(struct drm_encoder *encoder,
++				    struct drm_connector *connector)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_mode_config *conf = &dev->mode_config;
++	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
++	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
++	int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
++							NUM_LD_TV_NORMS;
++	int i;
++
++	if (nouveau_tv_norm) {
++		for (i = 0; i < num_tv_norms; i++) {
++			if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
++				tv_enc->tv_norm = i;
++				break;
++			}
++		}
++
++		if (i == num_tv_norms)
++			NV_WARN(dev, "Invalid TV norm setting \"%s\"\n",
++				nouveau_tv_norm);
++	}
++
++	drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
++
++	drm_connector_attach_property(connector,
++					conf->tv_select_subconnector_property,
++					tv_enc->select_subconnector);
++	drm_connector_attach_property(connector,
++					conf->tv_subconnector_property,
++					tv_enc->subconnector);
++	drm_connector_attach_property(connector,
++					conf->tv_mode_property,
++					tv_enc->tv_norm);
++	drm_connector_attach_property(connector,
++					conf->tv_flicker_reduction_property,
++					tv_enc->flicker);
++	drm_connector_attach_property(connector,
++					conf->tv_saturation_property,
++					tv_enc->saturation);
++	drm_connector_attach_property(connector,
++					conf->tv_hue_property,
++					tv_enc->hue);
++	drm_connector_attach_property(connector,
++					conf->tv_overscan_property,
++					tv_enc->overscan);
++
++	return 0;
++}
++
++static int nv17_tv_set_property(struct drm_encoder *encoder,
++				struct drm_connector *connector,
++				struct drm_property *property,
++				uint64_t val)
++{
++	struct drm_mode_config *conf = &encoder->dev->mode_config;
++	struct drm_crtc *crtc = encoder->crtc;
++	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
++	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
++	bool modes_changed = false;
++
++	if (property == conf->tv_overscan_property) {
++		tv_enc->overscan = val;
++		if (encoder->crtc) {
++			if (tv_norm->kind == CTV_ENC_MODE)
++				nv17_ctv_update_rescaler(encoder);
++			else
++				nv17_tv_update_rescaler(encoder);
++		}
++
++	} else if (property == conf->tv_saturation_property) {
++		if (tv_norm->kind != TV_ENC_MODE)
++			return -EINVAL;
++
++		tv_enc->saturation = val;
++		nv17_tv_update_properties(encoder);
++
++	} else if (property == conf->tv_hue_property) {
++		if (tv_norm->kind != TV_ENC_MODE)
++			return -EINVAL;
++
++		tv_enc->hue = val;
++		nv17_tv_update_properties(encoder);
++
++	} else if (property == conf->tv_flicker_reduction_property) {
++		if (tv_norm->kind != TV_ENC_MODE)
++			return -EINVAL;
++
++		tv_enc->flicker = val;
++		if (encoder->crtc)
++			nv17_tv_update_rescaler(encoder);
++
++	} else if (property == conf->tv_mode_property) {
++		if (connector->dpms != DRM_MODE_DPMS_OFF)
++			return -EINVAL;
++
++		tv_enc->tv_norm = val;
++
++		modes_changed = true;
++
++	} else if (property == conf->tv_select_subconnector_property) {
++		if (tv_norm->kind != TV_ENC_MODE)
++			return -EINVAL;
++
++		tv_enc->select_subconnector = val;
++		nv17_tv_update_properties(encoder);
++
++	} else {
++		return -EINVAL;
++	}
++
++	if (modes_changed) {
++		drm_helper_probe_single_connector_modes(connector, 0, 0);
++
++		/* Disable the crtc to ensure a full modeset is
++		 * performed whenever it's turned on again. */
++		if (crtc) {
++			struct drm_mode_set modeset = {
++				.crtc = crtc,
++			};
++
++			crtc->funcs->set_config(&modeset);
++		}
++	}
++
++	return 0;
++}
++
++static void nv17_tv_destroy(struct drm_encoder *encoder)
++{
++	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
++
++	NV_DEBUG_KMS(encoder->dev, "\n");
++
++	drm_encoder_cleanup(encoder);
++	kfree(tv_enc);
++}
++
++static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
++	.dpms = nv17_tv_dpms,
++	.save = nv17_tv_save,
++	.restore = nv17_tv_restore,
++	.mode_fixup = nv17_tv_mode_fixup,
++	.prepare = nv17_tv_prepare,
++	.commit = nv17_tv_commit,
++	.mode_set = nv17_tv_mode_set,
++	.detect = nv17_tv_detect,
++};
++
++static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
++	.get_modes = nv17_tv_get_modes,
++	.mode_valid = nv17_tv_mode_valid,
++	.create_resources = nv17_tv_create_resources,
++	.set_property = nv17_tv_set_property,
++};
++
++static struct drm_encoder_funcs nv17_tv_funcs = {
++	.destroy = nv17_tv_destroy,
++};
++
++int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry)
++{
++	struct drm_encoder *encoder;
++	struct nv17_tv_encoder *tv_enc = NULL;
++
++	tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL);
++	if (!tv_enc)
++		return -ENOMEM;
++
++	tv_enc->overscan = 50;
++	tv_enc->flicker = 50;
++	tv_enc->saturation = 50;
++	tv_enc->hue = 0;
++	tv_enc->tv_norm = TV_NORM_PAL;
++	tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
++	tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
++	tv_enc->pin_mask = 0;
++
++	encoder = to_drm_encoder(&tv_enc->base);
++
++	tv_enc->base.dcb = entry;
++	tv_enc->base.or = ffs(entry->or) - 1;
++
++	drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC);
++	drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
++	to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
++
++	encoder->possible_crtcs = entry->heads;
++	encoder->possible_clones = 0;
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
+new file mode 100644
+index 0000000..c00977c
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv17_tv.h
+@@ -0,0 +1,156 @@
++/*
++ * Copyright (C) 2009 Francisco Jerez.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __NV17_TV_H__
++#define __NV17_TV_H__
++
++struct nv17_tv_state {
++	uint8_t tv_enc[0x40];
++
++	uint32_t hfilter[4][7];
++	uint32_t hfilter2[4][7];
++	uint32_t vfilter[4][7];
++
++	uint32_t ptv_200;
++	uint32_t ptv_204;
++	uint32_t ptv_208;
++	uint32_t ptv_20c;
++	uint32_t ptv_304;
++	uint32_t ptv_500;
++	uint32_t ptv_504;
++	uint32_t ptv_508;
++	uint32_t ptv_600;
++	uint32_t ptv_604;
++	uint32_t ptv_608;
++	uint32_t ptv_60c;
++	uint32_t ptv_610;
++	uint32_t ptv_614;
++};
++
++enum nv17_tv_norm{
++	TV_NORM_PAL,
++	TV_NORM_PAL_M,
++	TV_NORM_PAL_N,
++	TV_NORM_PAL_NC,
++	TV_NORM_NTSC_M,
++	TV_NORM_NTSC_J,
++	NUM_LD_TV_NORMS,
++	TV_NORM_HD480I = NUM_LD_TV_NORMS,
++	TV_NORM_HD480P,
++	TV_NORM_HD576I,
++	TV_NORM_HD576P,
++	TV_NORM_HD720P,
++	TV_NORM_HD1080I,
++	NUM_TV_NORMS
++};
++
++struct nv17_tv_encoder {
++	struct nouveau_encoder base;
++
++	struct nv17_tv_state state;
++	struct nv17_tv_state saved_state;
++
++	int overscan;
++	int flicker;
++	int saturation;
++	int hue;
++	enum nv17_tv_norm tv_norm;
++	int subconnector;
++	int select_subconnector;
++	uint32_t pin_mask;
++};
++#define to_tv_enc(x) container_of(nouveau_encoder(x),		\
++				  struct nv17_tv_encoder, base)
++
++extern char *nv17_tv_norm_names[NUM_TV_NORMS];
++
++extern struct nv17_tv_norm_params {
++	enum {
++		TV_ENC_MODE,
++		CTV_ENC_MODE,
++	} kind;
++
++	union {
++		struct {
++			int hdisplay;
++			int vdisplay;
++			int vrefresh; /* mHz */
++
++			uint8_t tv_enc[0x40];
++		} tv_enc_mode;
++
++		struct {
++			struct drm_display_mode mode;
++
++			uint32_t ctv_regs[38];
++		} ctv_enc_mode;
++	};
++
++} nv17_tv_norms[NUM_TV_NORMS];
++#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
++
++extern struct drm_display_mode nv17_tv_modes[];
++
++static inline int interpolate(int y0, int y1, int y2, int x)
++{
++	return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
++}
++
++void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state);
++void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state);
++void nv17_tv_update_properties(struct drm_encoder *encoder);
++void nv17_tv_update_rescaler(struct drm_encoder *encoder);
++void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
++
++/* TV hardware access functions */
++
++static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val)
++{
++	nv_wr32(dev, reg, val);
++}
++
++static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
++{
++	return nv_rd32(dev, reg);
++}
++
++static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val)
++{
++	nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
++	nv_write_ptv(dev, NV_PTV_TV_DATA, val);
++}
++
++static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg)
++{
++	nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
++	return nv_read_ptv(dev, NV_PTV_TV_DATA);
++}
++
++#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
++#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
++#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
++
++#endif
+diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+new file mode 100644
+index 0000000..d64683d
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+@@ -0,0 +1,583 @@
++/*
++ * Copyright (C) 2009 Francisco Jerez.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm_crtc_helper.h"
++#include "nouveau_drv.h"
++#include "nouveau_encoder.h"
++#include "nouveau_crtc.h"
++#include "nouveau_hw.h"
++#include "nv17_tv.h"
++
++char *nv17_tv_norm_names[NUM_TV_NORMS] = {
++	[TV_NORM_PAL] = "PAL",
++	[TV_NORM_PAL_M] = "PAL-M",
++	[TV_NORM_PAL_N] = "PAL-N",
++	[TV_NORM_PAL_NC] = "PAL-Nc",
++	[TV_NORM_NTSC_M] = "NTSC-M",
++	[TV_NORM_NTSC_J] = "NTSC-J",
++	[TV_NORM_HD480I] = "hd480i",
++	[TV_NORM_HD480P] = "hd480p",
++	[TV_NORM_HD576I] = "hd576i",
++	[TV_NORM_HD576P] = "hd576p",
++	[TV_NORM_HD720P] = "hd720p",
++	[TV_NORM_HD1080I] = "hd1080i"
++};
++
++/* TV standard specific parameters */
++
++struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = {
++	[TV_NORM_PAL] = { TV_ENC_MODE, {
++			.tv_enc_mode = { 720, 576, 50000, {
++					0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
++					0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
++					0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
++					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
++					0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
++					0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
++					0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
++					0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
++				} } } },
++
++	[TV_NORM_PAL_M] = { TV_ENC_MODE, {
++			.tv_enc_mode = { 720, 480, 59940, {
++					0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18,
++					0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0,
++					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
++					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
++					0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
++					0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
++					0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c,
++					0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
++				} } } },
++
++	[TV_NORM_PAL_N] = { TV_ENC_MODE, {
++			.tv_enc_mode = { 720, 576, 50000, {
++					0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
++					0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0,
++					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
++					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
++					0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
++					0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
++					0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
++					0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
++				} } } },
++
++	[TV_NORM_PAL_NC] = { TV_ENC_MODE, {
++			.tv_enc_mode = { 720, 576, 50000, {
++					0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18,
++					0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
++					0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
++					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
++					0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
++					0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
++					0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
++					0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
++				} } } },
++
++	[TV_NORM_NTSC_M] = { TV_ENC_MODE, {
++			.tv_enc_mode = { 720, 480, 59940, {
++					0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
++					0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0,
++					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
++					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
++					0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
++					0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
++					0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c,
++					0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
++				} } } },
++
++	[TV_NORM_NTSC_J] = { TV_ENC_MODE, {
++			.tv_enc_mode = { 720, 480, 59940, {
++					0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
++					0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
++					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
++					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
++					0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
++					0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
++					0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
++					0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
++				} } } },
++
++	[TV_NORM_HD480I] = { TV_ENC_MODE, {
++			.tv_enc_mode = { 720, 480, 59940, {
++					0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
++					0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
++					0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
++					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
++					0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
++					0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
++					0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
++					0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
++				} } } },
++
++	[TV_NORM_HD576I] = { TV_ENC_MODE, {
++			.tv_enc_mode = { 720, 576, 50000, {
++					0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
++					0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
++					0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
++					0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
++					0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
++					0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
++					0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
++					0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
++				} } } },
++
++
++	[TV_NORM_HD480P] = { CTV_ENC_MODE, {
++			.ctv_enc_mode = {
++				.mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000,
++						   720, 735, 743, 858, 0, 480, 490, 494, 525, 0,
++						   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++				.ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
++					      0x354003a, 0x40000, 0x6f0344, 0x18100000,
++					      0x10160004, 0x10060005, 0x1006000c, 0x10060020,
++					      0x10060021, 0x140e0022, 0x10060202, 0x1802020a,
++					      0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff,
++					      0x10000fff, 0x10000fff, 0x10000fff, 0x70,
++					      0x3ff0000, 0x57, 0x2e001e, 0x258012c,
++					      0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
++					      0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
++				} } } },
++
++	[TV_NORM_HD576P] = { CTV_ENC_MODE, {
++			.ctv_enc_mode = {
++				.mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000,
++						   720, 730, 738, 864, 0, 576, 581, 585, 625, 0,
++						   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++				.ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
++					      0x354003a, 0x40000, 0x6f0344, 0x18100000,
++					      0x10060001, 0x10060009, 0x10060026, 0x10060027,
++					      0x140e0028, 0x10060268, 0x1810026d, 0x10000fff,
++					      0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff,
++					      0x10000fff, 0x10000fff, 0x10000fff, 0x69,
++					      0x3ff0000, 0x57, 0x2e001e, 0x258012c,
++					      0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
++					      0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
++				} } } },
++
++	[TV_NORM_HD720P] = { CTV_ENC_MODE, {
++			.ctv_enc_mode = {
++				.mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250,
++						   1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0,
++						   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++				.ctv_regs = { 0x1260394, 0x0, 0x0, 0x622,
++					      0x66b0021, 0x6004a, 0x1210626, 0x8170000,
++					      0x70004, 0x70016, 0x70017, 0x40f0018,
++					      0x702e8, 0x81702ed, 0xfff, 0xfff,
++					      0xfff, 0xfff, 0xfff, 0xfff,
++					      0xfff, 0xfff, 0xfff, 0x0,
++					      0x2e40001, 0x58, 0x2e001e, 0x258012c,
++					      0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300,
++					      0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0
++				} } } },
++
++	[TV_NORM_HD1080I] = { CTV_ENC_MODE, {
++			.ctv_enc_mode = {
++				.mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250,
++						   1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0,
++						   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
++						   | DRM_MODE_FLAG_INTERLACE) },
++				.ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868,
++					      0x8940028, 0x60054, 0xe80870, 0xbf70000,
++					      0xbc70004, 0x70005, 0x70012, 0x70013,
++					      0x40f0014, 0x70230, 0xbf70232, 0xbf70233,
++					      0x1c70237, 0x70238, 0x70244, 0x70245,
++					      0x40f0246, 0x70462, 0x1f70464, 0x0,
++					      0x2e40001, 0x58, 0x2e001e, 0x258012c,
++					      0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300,
++					      0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0
++				} } } }
++};
++
++/*
++ * The following is some guesswork on how the TV encoder flicker
++ * filter/rescaler works:
++ *
++ * It seems to use some sort of resampling filter, it is controlled
++ * through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they
++ * control the horizontal and vertical stage respectively, there is
++ * also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER,
++ * but they seem to do nothing. A rough guess might be that they could
++ * be used to independently control the filtering of each interlaced
++ * field, but I don't know how they are enabled. The whole filtering
++ * process seems to be disabled with bits 26:27 of PTV_200, but we
++ * aren't doing that.
++ *
++ * The layout of both register sets is the same:
++ *
++ * A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40]
++ * B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c]
++ *
++ * Each coefficient is stored in bits [31],[15:9] in two's complement
++ * format. They seem to be some kind of weights used in a low-pass
++ * filter. Both A and B coefficients are applied to the 14 nearest
++ * samples on each side (Listed from nearest to furthermost.  They
++ * roughly cover 2 framebuffer pixels on each side).  They are
++ * probably multiplied with some more hardwired weights before being
++ * used: B-coefficients are applied the same on both sides,
++ * A-coefficients are inverted before being applied to the opposite
++ * side.
++ *
++ * After all the hassle, I got the following formula by empirical
++ * means...
++ */
++
++#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o)
++
++#define id1 (1LL << 8)
++#define id2 (1LL << 16)
++#define id3 (1LL << 24)
++#define id4 (1LL << 32)
++#define id5 (1LL << 48)
++
++static struct filter_params{
++	int64_t k1;
++	int64_t ki;
++	int64_t ki2;
++	int64_t ki3;
++	int64_t kr;
++	int64_t kir;
++	int64_t ki2r;
++	int64_t ki3r;
++	int64_t kf;
++	int64_t kif;
++	int64_t ki2f;
++	int64_t ki3f;
++	int64_t krf;
++	int64_t kirf;
++	int64_t ki2rf;
++	int64_t ki3rf;
++} fparams[2][4] = {
++	/* Horizontal filter parameters */
++	{
++		{64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5,
++		 0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4,
++		 9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3,
++		 -8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1},
++		{-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5,
++		 29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4,
++		 104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3,
++		 -37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1},
++		{-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5,
++		 33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4,
++		 87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3,
++		 17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1},
++		{51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5,
++		 -41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4,
++		 -154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3,
++		 112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1},
++	},
++
++	/* Vertical filter parameters */
++	{
++		{67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5,
++		 -3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4,
++		 -9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3,
++		 6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1},
++		{6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5,
++		 8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4,
++		 37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3,
++		 -2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1},
++		{-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5,
++		 39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4,
++		 152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3,
++		 -38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1},
++		{-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5,
++		 20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4,
++		 60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3,
++		 -17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1},
++	}
++};
++
++static void tv_setup_filter(struct drm_encoder *encoder)
++{
++	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
++	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
++	struct drm_display_mode *mode = &encoder->crtc->mode;
++	uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter,
++				       &tv_enc->state.vfilter};
++	int i, j, k;
++	int32_t overscan = calc_overscan(tv_enc->overscan);
++	int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100);
++	uint64_t rs[] = {mode->hdisplay * id3,
++			 mode->vdisplay * id3};
++
++	do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay);
++	do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay);
++
++	for (k = 0; k < 2; k++) {
++		rs[k] = max((int64_t)rs[k], id2);
++
++		for (j = 0; j < 4; j++) {
++			struct filter_params *p = &fparams[k][j];
++
++			for (i = 0; i < 7; i++) {
++				int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i)
++					+ (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k]
++					+ (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker
++					+ (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k];
++
++				(*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9);
++			}
++		}
++	}
++}
++
++/* Hardware state saving/restoring */
++
++static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
++{
++	int i, j;
++	uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
++
++	for (i = 0; i < 4; i++) {
++		for (j = 0; j < 7; j++)
++			regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j);
++	}
++}
++
++static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
++{
++	int i, j;
++	uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
++
++	for (i = 0; i < 4; i++) {
++		for (j = 0; j < 7; j++)
++			nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]);
++	}
++}
++
++void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state)
++{
++	int i;
++
++	for (i = 0; i < 0x40; i++)
++		state->tv_enc[i] = nv_read_tv_enc(dev, i);
++
++	tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter);
++	tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
++	tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter);
++
++	nv_save_ptv(dev, state, 200);
++	nv_save_ptv(dev, state, 204);
++	nv_save_ptv(dev, state, 208);
++	nv_save_ptv(dev, state, 20c);
++	nv_save_ptv(dev, state, 304);
++	nv_save_ptv(dev, state, 500);
++	nv_save_ptv(dev, state, 504);
++	nv_save_ptv(dev, state, 508);
++	nv_save_ptv(dev, state, 600);
++	nv_save_ptv(dev, state, 604);
++	nv_save_ptv(dev, state, 608);
++	nv_save_ptv(dev, state, 60c);
++	nv_save_ptv(dev, state, 610);
++	nv_save_ptv(dev, state, 614);
++}
++
++void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
++{
++	int i;
++
++	for (i = 0; i < 0x40; i++)
++		nv_write_tv_enc(dev, i, state->tv_enc[i]);
++
++	tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter);
++	tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
++	tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter);
++
++	nv_load_ptv(dev, state, 200);
++	nv_load_ptv(dev, state, 204);
++	nv_load_ptv(dev, state, 208);
++	nv_load_ptv(dev, state, 20c);
++	nv_load_ptv(dev, state, 304);
++	nv_load_ptv(dev, state, 500);
++	nv_load_ptv(dev, state, 504);
++	nv_load_ptv(dev, state, 508);
++	nv_load_ptv(dev, state, 600);
++	nv_load_ptv(dev, state, 604);
++	nv_load_ptv(dev, state, 608);
++	nv_load_ptv(dev, state, 60c);
++	nv_load_ptv(dev, state, 610);
++	nv_load_ptv(dev, state, 614);
++
++	/* This is required for some settings to kick in. */
++	nv_write_tv_enc(dev, 0x3e, 1);
++	nv_write_tv_enc(dev, 0x3e, 0);
++}
++
++/* Timings similar to the ones the blob sets */
++
++struct drm_display_mode nv17_tv_modes[] = {
++	{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
++		   320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
++		   | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
++	{ DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0,
++		   320, 344, 392, 560, 0, 240, 240, 246, 263, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
++		   | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
++	{ DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0,
++		   400, 432, 496, 640, 0, 300, 300, 303, 314, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
++		   | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
++	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0,
++		   640, 672, 768, 880, 0, 480, 480, 492, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0,
++		   720, 752, 872, 960, 0, 480, 480, 493, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0,
++		   720, 776, 856, 960, 0, 576, 576, 588, 597, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0,
++		   800, 840, 920, 1040, 0, 600, 600, 604, 618, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0,
++		   1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	{}
++};
++
++void nv17_tv_update_properties(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
++	struct nv17_tv_state *regs = &tv_enc->state;
++	struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
++	int subconnector = tv_enc->select_subconnector ?
++						tv_enc->select_subconnector :
++						tv_enc->subconnector;
++
++	switch (subconnector) {
++	case DRM_MODE_SUBCONNECTOR_Composite:
++	{
++		regs->ptv_204 = 0x2;
++
++		/* The composite connector may be found on either pin. */
++		if (tv_enc->pin_mask & 0x4)
++			regs->ptv_204 |= 0x010000;
++		else if (tv_enc->pin_mask & 0x2)
++			regs->ptv_204 |= 0x100000;
++		else
++			regs->ptv_204 |= 0x110000;
++
++		regs->tv_enc[0x7] = 0x10;
++		break;
++	}
++	case DRM_MODE_SUBCONNECTOR_SVIDEO:
++		regs->ptv_204 = 0x11012;
++		regs->tv_enc[0x7] = 0x18;
++		break;
++
++	case DRM_MODE_SUBCONNECTOR_Component:
++		regs->ptv_204 = 0x111333;
++		regs->tv_enc[0x7] = 0x14;
++		break;
++
++	case DRM_MODE_SUBCONNECTOR_SCART:
++		regs->ptv_204 = 0x111012;
++		regs->tv_enc[0x7] = 0x18;
++		break;
++	}
++
++	regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255,
++					 tv_enc->saturation);
++	regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255,
++					 tv_enc->saturation);
++	regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
++
++	nv_load_ptv(dev, regs, 204);
++	nv_load_tv_enc(dev, regs, 7);
++	nv_load_tv_enc(dev, regs, 20);
++	nv_load_tv_enc(dev, regs, 22);
++	nv_load_tv_enc(dev, regs, 25);
++}
++
++void nv17_tv_update_rescaler(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
++	struct nv17_tv_state *regs = &tv_enc->state;
++
++	regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8);
++
++	tv_setup_filter(encoder);
++
++	nv_load_ptv(dev, regs, 208);
++	tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter);
++	tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2);
++	tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter);
++}
++
++void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
++	int head = nouveau_crtc(encoder->crtc)->index;
++	struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
++	struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
++	struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode;
++	int overscan, hmargin, vmargin, hratio, vratio;
++
++	/* The rescaler doesn't do the right thing for interlaced modes. */
++	if (output_mode->flags & DRM_MODE_FLAG_INTERLACE)
++		overscan = 100;
++	else
++		overscan = tv_enc->overscan;
++
++	hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
++	vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
++
++	hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin,
++			      overscan);
++	vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin,
++			      overscan);
++
++	hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin);
++	vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3;
++
++	regs->fp_horiz_regs[FP_VALID_START] = hmargin;
++	regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
++	regs->fp_vert_regs[FP_VALID_START] = vmargin;
++	regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1;
++
++	regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
++		XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) |
++		NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
++		XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
++
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START,
++		      regs->fp_horiz_regs[FP_VALID_START]);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END,
++		      regs->fp_horiz_regs[FP_VALID_END]);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START,
++		      regs->fp_vert_regs[FP_VALID_START]);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END,
++		      regs->fp_vert_regs[FP_VALID_END]);
++	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1);
++}
+diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
+new file mode 100644
+index 0000000..d6fc0a8
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv20_graph.c
+@@ -0,0 +1,775 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++/*
++ * NV20
++ * -----
++ * There are 3 families :
++ * NV20 is 0x10de:0x020*
++ * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
++ * NV2A is 0x10de:0x02A0
++ *
++ * NV30
++ * -----
++ * There are 3 families :
++ * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
++ * NV34 is 0x10de:0x032*
++ * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
++ *
++ * Not seen in the wild, no dumps (probably NV35) :
++ * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
++ * NV38 is 0x10de:0x0333, 0x10de:0x00fe
++ *
++ */
++
++#define NV20_GRCTX_SIZE (3580*4)
++#define NV25_GRCTX_SIZE (3529*4)
++#define NV2A_GRCTX_SIZE (3500*4)
++
++#define NV30_31_GRCTX_SIZE (24392)
++#define NV34_GRCTX_SIZE    (18140)
++#define NV35_36_GRCTX_SIZE (22396)
++
++static void
++nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++	int i;
++
++	nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
++	nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
++	nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
++	nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
++	nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
++	nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
++	for (i = 0x04d4; i <= 0x04e0; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00030303);
++	for (i = 0x04f4; i <= 0x0500; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00080000);
++	for (i = 0x050c; i <= 0x0518; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x01012000);
++	for (i = 0x051c; i <= 0x0528; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x000105b8);
++	for (i = 0x052c; i <= 0x0538; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00080008);
++	for (i = 0x055c; i <= 0x0598; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x07ff0000);
++	nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
++	nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
++	nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
++	nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
++	nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
++	nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
++	for (i = 0x1c1c; i <= 0x248c; i += 16) {
++		nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
++		nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
++		nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
++	}
++	nv_wo32(dev, ctx, 0x281c/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x2830/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x285c/4, 0x40000000);
++	nv_wo32(dev, ctx, 0x2860/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x2864/4, 0x3f000000);
++	nv_wo32(dev, ctx, 0x286c/4, 0x40000000);
++	nv_wo32(dev, ctx, 0x2870/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x2878/4, 0xbf800000);
++	nv_wo32(dev, ctx, 0x2880/4, 0xbf800000);
++	nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000);
++	nv_wo32(dev, ctx, 0x3530/4, 0x000003f8);
++	nv_wo32(dev, ctx, 0x3540/4, 0x002fe000);
++	for (i = 0x355c; i <= 0x3578; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x001c527c);
++}
++
++static void
++nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++	int i;
++
++	nv_wo32(dev, ctx, 0x035c/4, 0xffff0000);
++	nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000);
++	nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000);
++	nv_wo32(dev, ctx, 0x049c/4, 0x00000101);
++	nv_wo32(dev, ctx, 0x04b0/4, 0x00000111);
++	nv_wo32(dev, ctx, 0x04c8/4, 0x00000080);
++	nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000);
++	nv_wo32(dev, ctx, 0x04d0/4, 0x00000001);
++	nv_wo32(dev, ctx, 0x04e4/4, 0x44400000);
++	nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000);
++	for (i = 0x0510; i <= 0x051c; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00030303);
++	for (i = 0x0530; i <= 0x053c; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00080000);
++	for (i = 0x0548; i <= 0x0554; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x01012000);
++	for (i = 0x0558; i <= 0x0564; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x000105b8);
++	for (i = 0x0568; i <= 0x0574; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00080008);
++	for (i = 0x0598; i <= 0x05d4; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x07ff0000);
++	nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff);
++	nv_wo32(dev, ctx, 0x0620/4, 0x00000080);
++	nv_wo32(dev, ctx, 0x0624/4, 0x30201000);
++	nv_wo32(dev, ctx, 0x0628/4, 0x70605040);
++	nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080);
++	nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0);
++	nv_wo32(dev, ctx, 0x0664/4, 0x00000001);
++	nv_wo32(dev, ctx, 0x066c/4, 0x00004000);
++	nv_wo32(dev, ctx, 0x0678/4, 0x00000001);
++	nv_wo32(dev, ctx, 0x0680/4, 0x00040000);
++	nv_wo32(dev, ctx, 0x0684/4, 0x00010000);
++	for (i = 0x1b04; i <= 0x2374; i += 16) {
++		nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
++		nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
++		nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
++	}
++	nv_wo32(dev, ctx, 0x2704/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x2718/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x2744/4, 0x40000000);
++	nv_wo32(dev, ctx, 0x2748/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x274c/4, 0x3f000000);
++	nv_wo32(dev, ctx, 0x2754/4, 0x40000000);
++	nv_wo32(dev, ctx, 0x2758/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x2760/4, 0xbf800000);
++	nv_wo32(dev, ctx, 0x2768/4, 0xbf800000);
++	nv_wo32(dev, ctx, 0x308c/4, 0x000fe000);
++	nv_wo32(dev, ctx, 0x3108/4, 0x000003f8);
++	nv_wo32(dev, ctx, 0x3468/4, 0x002fe000);
++	for (i = 0x3484; i <= 0x34a0; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x001c527c);
++}
++
++static void
++nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++	int i;
++
++	nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
++	nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
++	nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
++	nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
++	nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
++	nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
++	for (i = 0x04d4; i <= 0x04e0; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00030303);
++	for (i = 0x04f4; i <= 0x0500; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00080000);
++	for (i = 0x050c; i <= 0x0518; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x01012000);
++	for (i = 0x051c; i <= 0x0528; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x000105b8);
++	for (i = 0x052c; i <= 0x0538; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00080008);
++	for (i = 0x055c; i <= 0x0598; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x07ff0000);
++	nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
++	nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
++	nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
++	nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
++	nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
++	nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
++	for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
++		nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
++		nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
++		nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
++	}
++	nv_wo32(dev, ctx, 0x269c/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x26dc/4, 0x40000000);
++	nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000);
++	nv_wo32(dev, ctx, 0x26ec/4, 0x40000000);
++	nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000);
++	nv_wo32(dev, ctx, 0x2700/4, 0xbf800000);
++	nv_wo32(dev, ctx, 0x3024/4, 0x000fe000);
++	nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8);
++	nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000);
++	for (i = 0x341c; i <= 0x3438; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x001c527c);
++}
++
++static void
++nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++	int i;
++
++	nv_wo32(dev, ctx, 0x0410/4, 0x00000101);
++	nv_wo32(dev, ctx, 0x0424/4, 0x00000111);
++	nv_wo32(dev, ctx, 0x0428/4, 0x00000060);
++	nv_wo32(dev, ctx, 0x0444/4, 0x00000080);
++	nv_wo32(dev, ctx, 0x0448/4, 0xffff0000);
++	nv_wo32(dev, ctx, 0x044c/4, 0x00000001);
++	nv_wo32(dev, ctx, 0x0460/4, 0x44400000);
++	nv_wo32(dev, ctx, 0x048c/4, 0xffff0000);
++	for (i = 0x04e0; i < 0x04e8; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x0fff0000);
++	nv_wo32(dev, ctx, 0x04ec/4, 0x00011100);
++	for (i = 0x0508; i < 0x0548; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x07ff0000);
++	nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff);
++	nv_wo32(dev, ctx, 0x058c/4, 0x00000080);
++	nv_wo32(dev, ctx, 0x0590/4, 0x30201000);
++	nv_wo32(dev, ctx, 0x0594/4, 0x70605040);
++	nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888);
++	nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8);
++	nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000);
++	for (i = 0x0600; i < 0x0640; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00010588);
++	for (i = 0x0640; i < 0x0680; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00030303);
++	for (i = 0x06c0; i < 0x0700; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x0008aae4);
++	for (i = 0x0700; i < 0x0740; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x01012000);
++	for (i = 0x0740; i < 0x0780; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00080008);
++	nv_wo32(dev, ctx, 0x085c/4, 0x00040000);
++	nv_wo32(dev, ctx, 0x0860/4, 0x00010000);
++	for (i = 0x0864; i < 0x0874; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00040004);
++	for (i = 0x1f18; i <= 0x3088 ; i += 16) {
++		nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
++		nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
++		nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
++	}
++	for (i = 0x30b8; i < 0x30c8; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x0000ffff);
++	nv_wo32(dev, ctx, 0x344c/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x3808/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x381c/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x3848/4, 0x40000000);
++	nv_wo32(dev, ctx, 0x384c/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x3850/4, 0x3f000000);
++	nv_wo32(dev, ctx, 0x3858/4, 0x40000000);
++	nv_wo32(dev, ctx, 0x385c/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x3864/4, 0xbf800000);
++	nv_wo32(dev, ctx, 0x386c/4, 0xbf800000);
++}
++
++static void
++nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++	int i;
++
++	nv_wo32(dev, ctx, 0x040c/4, 0x01000101);
++	nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
++	nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
++	nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
++	nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
++	nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
++	nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
++	nv_wo32(dev, ctx, 0x0480/4, 0xffff0000);
++	for (i = 0x04d4; i < 0x04dc; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x0fff0000);
++	nv_wo32(dev, ctx, 0x04e0/4, 0x00011100);
++	for (i = 0x04fc; i < 0x053c; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x07ff0000);
++	nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff);
++	nv_wo32(dev, ctx, 0x057c/4, 0x00000080);
++	nv_wo32(dev, ctx, 0x0580/4, 0x30201000);
++	nv_wo32(dev, ctx, 0x0584/4, 0x70605040);
++	nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888);
++	nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8);
++	nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000);
++	for (i = 0x05f0; i < 0x0630; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00010588);
++	for (i = 0x0630; i < 0x0670; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00030303);
++	for (i = 0x06b0; i < 0x06f0; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x0008aae4);
++	for (i = 0x06f0; i < 0x0730; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x01012000);
++	for (i = 0x0730; i < 0x0770; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00080008);
++	nv_wo32(dev, ctx, 0x0850/4, 0x00040000);
++	nv_wo32(dev, ctx, 0x0854/4, 0x00010000);
++	for (i = 0x0858; i < 0x0868; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00040004);
++	for (i = 0x15ac; i <= 0x271c ; i += 16) {
++		nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
++		nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
++		nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
++	}
++	for (i = 0x274c; i < 0x275c; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x0000ffff);
++	nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x2edc/4, 0x40000000);
++	nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000);
++	nv_wo32(dev, ctx, 0x2eec/4, 0x40000000);
++	nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000);
++	nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000);
++}
++
++static void
++nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
++{
++	int i;
++
++	nv_wo32(dev, ctx, 0x040c/4, 0x00000101);
++	nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
++	nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
++	nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
++	nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
++	nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
++	nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
++	nv_wo32(dev, ctx, 0x0488/4, 0xffff0000);
++	for (i = 0x04dc; i < 0x04e4; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x0fff0000);
++	nv_wo32(dev, ctx, 0x04e8/4, 0x00011100);
++	for (i = 0x0504; i < 0x0544; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x07ff0000);
++	nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff);
++	nv_wo32(dev, ctx, 0x0588/4, 0x00000080);
++	nv_wo32(dev, ctx, 0x058c/4, 0x30201000);
++	nv_wo32(dev, ctx, 0x0590/4, 0x70605040);
++	nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888);
++	nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8);
++	nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000);
++	for (i = 0x0604; i < 0x0644; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00010588);
++	for (i = 0x0644; i < 0x0684; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00030303);
++	for (i = 0x06c4; i < 0x0704; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x0008aae4);
++	for (i = 0x0704; i < 0x0744; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x01012000);
++	for (i = 0x0744; i < 0x0784; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00080008);
++	nv_wo32(dev, ctx, 0x0860/4, 0x00040000);
++	nv_wo32(dev, ctx, 0x0864/4, 0x00010000);
++	for (i = 0x0868; i < 0x0878; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x00040004);
++	for (i = 0x1f1c; i <= 0x308c ; i += 16) {
++		nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
++		nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
++		nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
++	}
++	for (i = 0x30bc; i < 0x30cc; i += 4)
++		nv_wo32(dev, ctx, i/4, 0x0000ffff);
++	nv_wo32(dev, ctx, 0x3450/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x380c/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x3820/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x384c/4, 0x40000000);
++	nv_wo32(dev, ctx, 0x3850/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x3854/4, 0x3f000000);
++	nv_wo32(dev, ctx, 0x385c/4, 0x40000000);
++	nv_wo32(dev, ctx, 0x3860/4, 0x3f800000);
++	nv_wo32(dev, ctx, 0x3868/4, 0xbf800000);
++	nv_wo32(dev, ctx, 0x3870/4, 0xbf800000);
++}
++
++int
++nv20_graph_create_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
++	unsigned int ctx_size;
++	unsigned int idoffs = 0x28/4;
++	int ret;
++
++	switch (dev_priv->chipset) {
++	case 0x20:
++		ctx_size = NV20_GRCTX_SIZE;
++		ctx_init = nv20_graph_context_init;
++		idoffs = 0;
++		break;
++	case 0x25:
++	case 0x28:
++		ctx_size = NV25_GRCTX_SIZE;
++		ctx_init = nv25_graph_context_init;
++		break;
++	case 0x2a:
++		ctx_size = NV2A_GRCTX_SIZE;
++		ctx_init = nv2a_graph_context_init;
++		idoffs = 0;
++		break;
++	case 0x30:
++	case 0x31:
++		ctx_size = NV30_31_GRCTX_SIZE;
++		ctx_init = nv30_31_graph_context_init;
++		break;
++	case 0x34:
++		ctx_size = NV34_GRCTX_SIZE;
++		ctx_init = nv34_graph_context_init;
++		break;
++	case 0x35:
++	case 0x36:
++		ctx_size = NV35_36_GRCTX_SIZE;
++		ctx_init = nv35_36_graph_context_init;
++		break;
++	default:
++		ctx_size = 0;
++		ctx_init = nv35_36_graph_context_init;
++		NV_ERROR(dev, "Please contact the devs if you want your NV%x"
++			      " card to work\n", dev_priv->chipset);
++		return -ENOSYS;
++		break;
++	}
++
++	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
++					  NVOBJ_FLAG_ZERO_ALLOC,
++					  &chan->ramin_grctx);
++	if (ret)
++		return ret;
++
++	/* Initialise default context values */
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	ctx_init(dev, chan->ramin_grctx->gpuobj);
++
++	/* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
++	nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs,
++					(chan->id << 24) | 0x1); /* CTX_USER */
++
++	nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id,
++			chan->ramin_grctx->instance >> 4);
++
++	dev_priv->engine.instmem.finish_access(dev);
++	return 0;
++}
++
++void
++nv20_graph_destroy_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (chan->ramin_grctx)
++		nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0);
++	dev_priv->engine.instmem.finish_access(dev);
++}
++
++int
++nv20_graph_load_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	uint32_t inst;
++
++	if (!chan->ramin_grctx)
++		return -EINVAL;
++	inst = chan->ramin_grctx->instance >> 4;
++
++	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
++		     NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
++	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
++
++	nouveau_wait_for_idle(dev);
++	return 0;
++}
++
++int
++nv20_graph_unload_context(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	struct nouveau_channel *chan;
++	uint32_t inst, tmp;
++
++	chan = pgraph->channel(dev);
++	if (!chan)
++		return 0;
++	inst = chan->ramin_grctx->instance >> 4;
++
++	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
++		     NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
++
++	nouveau_wait_for_idle(dev);
++
++	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
++	tmp  = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
++	tmp |= (pfifo->channels - 1) << 24;
++	nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
++	return 0;
++}
++
++static void
++nv20_graph_rdi(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int i, writecount = 32;
++	uint32_t rdi_index = 0x2c80000;
++
++	if (dev_priv->chipset == 0x20) {
++		rdi_index = 0x3d0000;
++		writecount = 15;
++	}
++
++	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
++	for (i = 0; i < writecount; i++)
++		nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
++
++	nouveau_wait_for_idle(dev);
++}
++
++void
++nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
++			     uint32_t size, uint32_t pitch)
++{
++	uint32_t limit = max(1u, addr + size) - 1;
++
++	if (pitch)
++		addr |= 1;
++
++	nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
++	nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
++	nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
++
++	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
++	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
++	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
++	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
++	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
++	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
++}
++
++int
++nv20_graph_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv =
++		(struct drm_nouveau_private *)dev->dev_private;
++	uint32_t tmp, vramsz;
++	int ret, i;
++
++	nv_wr32(dev, NV03_PMC_ENABLE,
++		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
++	nv_wr32(dev, NV03_PMC_ENABLE,
++		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PGRAPH);
++
++	if (!dev_priv->ctx_table) {
++		/* Create Context Pointer Table */
++		dev_priv->ctx_table_size = 32 * 4;
++		ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
++						  dev_priv->ctx_table_size, 16,
++						  NVOBJ_FLAG_ZERO_ALLOC,
++						  &dev_priv->ctx_table);
++		if (ret)
++			return ret;
++	}
++
++	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
++		 dev_priv->ctx_table->instance >> 4);
++
++	nv20_graph_rdi(dev);
++
++	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
++	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
++	nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
++	nv_wr32(dev, 0x40009C           , 0x00000040);
++
++	if (dev_priv->chipset >= 0x25) {
++		nv_wr32(dev, 0x400890, 0x00080000);
++		nv_wr32(dev, 0x400610, 0x304B1FB6);
++		nv_wr32(dev, 0x400B80, 0x18B82880);
++		nv_wr32(dev, 0x400B84, 0x44000000);
++		nv_wr32(dev, 0x400098, 0x40000080);
++		nv_wr32(dev, 0x400B88, 0x000000ff);
++	} else {
++		nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
++		nv_wr32(dev, 0x400094, 0x00000005);
++		nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
++		nv_wr32(dev, 0x400B84, 0x24000000);
++		nv_wr32(dev, 0x400098, 0x00000040);
++		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
++		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
++		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
++		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
++	}
++
++	/* Turn all the tiling regions off. */
++	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
++		nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
++
++	for (i = 0; i < 8; i++) {
++		nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
++		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
++		nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
++					nv_rd32(dev, 0x100300 + i * 4));
++	}
++	nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
++	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
++	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
++
++	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
++	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
++
++	tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
++	nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
++	tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
++	nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
++
++	/* begin RAM config */
++	vramsz = drm_get_resource_len(dev, 0) - 1;
++	nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
++	nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
++	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
++	nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
++	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
++	nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
++	nv_wr32(dev, 0x400820, 0);
++	nv_wr32(dev, 0x400824, 0);
++	nv_wr32(dev, 0x400864, vramsz - 1);
++	nv_wr32(dev, 0x400868, vramsz - 1);
++
++	/* interesting.. the below overwrites some of the tile setup above.. */
++	nv_wr32(dev, 0x400B20, 0x00000000);
++	nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
++
++	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
++	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
++	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
++	nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
++
++	return 0;
++}
++
++void
++nv20_graph_takedown(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
++}
++
++int
++nv30_graph_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int ret, i;
++
++	nv_wr32(dev, NV03_PMC_ENABLE,
++		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
++	nv_wr32(dev, NV03_PMC_ENABLE,
++		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PGRAPH);
++
++	if (!dev_priv->ctx_table) {
++		/* Create Context Pointer Table */
++		dev_priv->ctx_table_size = 32 * 4;
++		ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
++						  dev_priv->ctx_table_size, 16,
++						  NVOBJ_FLAG_ZERO_ALLOC,
++						  &dev_priv->ctx_table);
++		if (ret)
++			return ret;
++	}
++
++	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
++			dev_priv->ctx_table->instance >> 4);
++
++	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
++	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
++	nv_wr32(dev, 0x400890, 0x01b463ff);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
++	nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
++	nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
++	nv_wr32(dev, 0x400B80, 0x1003d888);
++	nv_wr32(dev, 0x400B84, 0x0c000000);
++	nv_wr32(dev, 0x400098, 0x00000000);
++	nv_wr32(dev, 0x40009C, 0x0005ad00);
++	nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
++	nv_wr32(dev, 0x4000a0, 0x00000000);
++	nv_wr32(dev, 0x4000a4, 0x00000008);
++	nv_wr32(dev, 0x4008a8, 0xb784a400);
++	nv_wr32(dev, 0x400ba0, 0x002f8685);
++	nv_wr32(dev, 0x400ba4, 0x00231f3f);
++	nv_wr32(dev, 0x4008a4, 0x40000020);
++
++	if (dev_priv->chipset == 0x34) {
++		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
++		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
++		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
++		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
++		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
++		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
++		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
++		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
++	}
++
++	nv_wr32(dev, 0x4000c0, 0x00000016);
++
++	/* Turn all the tiling regions off. */
++	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
++		nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
++
++	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
++	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
++	nv_wr32(dev, 0x0040075c             , 0x00000001);
++
++	/* begin RAM config */
++	/* vramsz = drm_get_resource_len(dev, 0) - 1; */
++	nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
++	nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
++	if (dev_priv->chipset != 0x34) {
++		nv_wr32(dev, 0x400750, 0x00EA0000);
++		nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
++		nv_wr32(dev, 0x400750, 0x00EA0004);
++		nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
++	}
++
++	return 0;
++}
++
++struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
++	{ 0x0030, false, NULL }, /* null */
++	{ 0x0039, false, NULL }, /* m2mf */
++	{ 0x004a, false, NULL }, /* gdirect */
++	{ 0x009f, false, NULL }, /* imageblit (nv12) */
++	{ 0x008a, false, NULL }, /* ifc */
++	{ 0x0089, false, NULL }, /* sifm */
++	{ 0x0062, false, NULL }, /* surf2d */
++	{ 0x0043, false, NULL }, /* rop */
++	{ 0x0012, false, NULL }, /* beta1 */
++	{ 0x0072, false, NULL }, /* beta4 */
++	{ 0x0019, false, NULL }, /* cliprect */
++	{ 0x0044, false, NULL }, /* pattern */
++	{ 0x009e, false, NULL }, /* swzsurf */
++	{ 0x0096, false, NULL }, /* celcius */
++	{ 0x0097, false, NULL }, /* kelvin (nv20) */
++	{ 0x0597, false, NULL }, /* kelvin (nv25) */
++	{}
++};
++
++struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
++	{ 0x0030, false, NULL }, /* null */
++	{ 0x0039, false, NULL }, /* m2mf */
++	{ 0x004a, false, NULL }, /* gdirect */
++	{ 0x009f, false, NULL }, /* imageblit (nv12) */
++	{ 0x008a, false, NULL }, /* ifc */
++	{ 0x038a, false, NULL }, /* ifc (nv30) */
++	{ 0x0089, false, NULL }, /* sifm */
++	{ 0x0389, false, NULL }, /* sifm (nv30) */
++	{ 0x0062, false, NULL }, /* surf2d */
++	{ 0x0362, false, NULL }, /* surf2d (nv30) */
++	{ 0x0043, false, NULL }, /* rop */
++	{ 0x0012, false, NULL }, /* beta1 */
++	{ 0x0072, false, NULL }, /* beta4 */
++	{ 0x0019, false, NULL }, /* cliprect */
++	{ 0x0044, false, NULL }, /* pattern */
++	{ 0x039e, false, NULL }, /* swzsurf */
++	{ 0x0397, false, NULL }, /* rankine (nv30) */
++	{ 0x0497, false, NULL }, /* rankine (nv35) */
++	{ 0x0697, false, NULL }, /* rankine (nv34) */
++	{}
++};
++
+diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
+new file mode 100644
+index 0000000..3cd07d8
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv40_fb.c
+@@ -0,0 +1,75 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++void
++nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
++			  uint32_t size, uint32_t pitch)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t limit = max(1u, addr + size) - 1;
++
++	if (pitch)
++		addr |= 1;
++
++	switch (dev_priv->chipset) {
++	case 0x40:
++		nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
++		nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
++		nv_wr32(dev, NV10_PFB_TILE(i), addr);
++		break;
++
++	default:
++		nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
++		nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
++		nv_wr32(dev, NV40_PFB_TILE(i), addr);
++		break;
++	}
++}
++
++int
++nv40_fb_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
++	uint32_t tmp;
++	int i;
++
++	/* This is strictly a NV4x register (don't know about NV5x). */
++	/* The blob sets these to all kinds of values, and they mess up our setup. */
++	/* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
++	/* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
++	/* Any idea what this is? */
++	nv_wr32(dev, NV40_PFB_UNK_800, 0x1);
++
++	switch (dev_priv->chipset) {
++	case 0x40:
++	case 0x45:
++		tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
++		nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
++		pfb->num_tiles = NV10_PFB_TILE__SIZE;
++		break;
++	case 0x46: /* G72 */
++	case 0x47: /* G70 */
++	case 0x49: /* G71 */
++	case 0x4b: /* G73 */
++	case 0x4c: /* C51 (G7X version) */
++		pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
++		break;
++	default:
++		pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
++		break;
++	}
++
++	/* Turn all the tiling regions off. */
++	for (i = 0; i < pfb->num_tiles; i++)
++		pfb->set_region_tiling(dev, i, 0, 0, 0);
++
++	return 0;
++}
++
++void
++nv40_fb_takedown(struct drm_device *dev)
++{
++}
+diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
+new file mode 100644
+index 0000000..b4f19cc
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
+@@ -0,0 +1,314 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE))
++#define NV40_RAMFC__SIZE 128
++
++int
++nv40_fifo_create_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t fc = NV40_RAMFC(chan->id);
++	int ret;
++
++	ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
++				      NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
++				      NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
++	if (ret)
++		return ret;
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	nv_wi32(dev, fc +  0, chan->pushbuf_base);
++	nv_wi32(dev, fc +  4, chan->pushbuf_base);
++	nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
++	nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
++			      NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
++			      NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
++#ifdef __BIG_ENDIAN
++			      NV_PFIFO_CACHE1_BIG_ENDIAN |
++#endif
++			      0x30000000 /* no idea.. */);
++	nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
++	nv_wi32(dev, fc + 60, 0x0001FFFF);
++	dev_priv->engine.instmem.finish_access(dev);
++
++	/* enable the fifo dma operation */
++	nv_wr32(dev, NV04_PFIFO_MODE,
++		nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
++	return 0;
++}
++
++void
++nv40_fifo_destroy_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++
++	nv_wr32(dev, NV04_PFIFO_MODE,
++		nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
++
++	if (chan->ramfc)
++		nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++}
++
++static void
++nv40_fifo_do_load_context(struct drm_device *dev, int chid)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
++
++	dev_priv->engine.instmem.prepare_access(dev, false);
++
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
++	nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
++
++	/* No idea what 0x2058 is.. */
++	tmp   = nv_ri32(dev, fc + 24);
++	tmp2  = nv_rd32(dev, 0x2058) & 0xFFF;
++	tmp2 |= (tmp & 0x30000000);
++	nv_wr32(dev, 0x2058, tmp2);
++	tmp  &= ~0x30000000;
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
++
++	nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28));
++	nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32));
++	nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
++	tmp = nv_ri32(dev, fc + 40);
++	nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
++	nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
++	nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
++	nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
++	nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
++
++	/* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
++	tmp  = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
++	tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF;
++	nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp);
++
++	nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
++	/* NVIDIA does this next line twice... */
++	nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
++	nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
++	nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
++
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
++}
++
++int
++nv40_fifo_load_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	uint32_t tmp;
++
++	nv40_fifo_do_load_context(dev, chan->id);
++
++	/* Set channel active, and in DMA mode */
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
++		     NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
++
++	/* Reset DMA_CTL_AT_INFO to INVALID */
++	tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
++	nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
++
++	return 0;
++}
++
++int
++nv40_fifo_unload_context(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	uint32_t fc, tmp;
++	int chid;
++
++	chid = pfifo->channel_id(dev);
++	if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
++		return 0;
++	fc = NV40_RAMFC(chid);
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
++	nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
++	nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
++	nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
++	nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
++	nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
++	tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
++	tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
++	nv_wi32(dev, fc + 24, tmp);
++	nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
++	nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
++	nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
++	tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
++	nv_wi32(dev, fc + 40, tmp);
++	nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
++	nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
++	/* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
++	 * more involved depending on the value of 0x3228?
++	 */
++	nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
++	nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
++	nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
++	/* No idea what the below is for exactly, ripped from a mmio-trace */
++	nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
++	/* NVIDIA do this next line twice.. bug? */
++	nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
++	nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
++	nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
++#if 0 /* no real idea which is PUT/GET in UNK_48.. */
++	tmp  = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
++	tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
++	nv_wi32(dev, fc + 72, tmp);
++#endif
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nv40_fifo_do_load_context(dev, pfifo->channels - 1);
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
++		     NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
++	return 0;
++}
++
++static void
++nv40_fifo_init_reset(struct drm_device *dev)
++{
++	int i;
++
++	nv_wr32(dev, NV03_PMC_ENABLE,
++		nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
++	nv_wr32(dev, NV03_PMC_ENABLE,
++		nv_rd32(dev, NV03_PMC_ENABLE) |  NV_PMC_ENABLE_PFIFO);
++
++	nv_wr32(dev, 0x003224, 0x000f0078);
++	nv_wr32(dev, 0x003210, 0x00000000);
++	nv_wr32(dev, 0x003270, 0x00000000);
++	nv_wr32(dev, 0x003240, 0x00000000);
++	nv_wr32(dev, 0x003244, 0x00000000);
++	nv_wr32(dev, 0x003258, 0x00000000);
++	nv_wr32(dev, 0x002504, 0x00000000);
++	for (i = 0; i < 16; i++)
++		nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
++	nv_wr32(dev, 0x00250c, 0x0000ffff);
++	nv_wr32(dev, 0x002048, 0x00000000);
++	nv_wr32(dev, 0x003228, 0x00000000);
++	nv_wr32(dev, 0x0032e8, 0x00000000);
++	nv_wr32(dev, 0x002410, 0x00000000);
++	nv_wr32(dev, 0x002420, 0x00000000);
++	nv_wr32(dev, 0x002058, 0x00000001);
++	nv_wr32(dev, 0x00221c, 0x00000000);
++	/* something with 0x2084, read/modify/write, no change */
++	nv_wr32(dev, 0x002040, 0x000000ff);
++	nv_wr32(dev, 0x002500, 0x00000000);
++	nv_wr32(dev, 0x003200, 0x00000000);
++
++	nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
++}
++
++static void
++nv40_fifo_init_ramxx(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
++				       ((dev_priv->ramht_bits - 9) << 16) |
++				       (dev_priv->ramht_offset >> 8));
++	nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
++
++	switch (dev_priv->chipset) {
++	case 0x47:
++	case 0x49:
++	case 0x4b:
++		nv_wr32(dev, 0x2230, 1);
++		break;
++	default:
++		break;
++	}
++
++	switch (dev_priv->chipset) {
++	case 0x40:
++	case 0x41:
++	case 0x42:
++	case 0x43:
++	case 0x45:
++	case 0x47:
++	case 0x48:
++	case 0x49:
++	case 0x4b:
++		nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
++		break;
++	default:
++		nv_wr32(dev, 0x2230, 0);
++		nv_wr32(dev, NV40_PFIFO_RAMFC,
++			((nouveau_mem_fb_amount(dev) - 512 * 1024 +
++			  dev_priv->ramfc_offset) >> 16) | (3 << 16));
++		break;
++	}
++}
++
++static void
++nv40_fifo_init_intr(struct drm_device *dev)
++{
++	nv_wr32(dev, 0x002100, 0xffffffff);
++	nv_wr32(dev, 0x002140, 0xffffffff);
++}
++
++int
++nv40_fifo_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	int i;
++
++	nv40_fifo_init_reset(dev);
++	nv40_fifo_init_ramxx(dev);
++
++	nv40_fifo_do_load_context(dev, pfifo->channels - 1);
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
++
++	nv40_fifo_init_intr(dev);
++	pfifo->enable(dev);
++	pfifo->reassign(dev, true);
++
++	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
++		if (dev_priv->fifos[i]) {
++			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
++			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
++		}
++	}
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
+new file mode 100644
+index 0000000..53e8afe
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv40_graph.c
+@@ -0,0 +1,406 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_grctx.h"
++
++struct nouveau_channel *
++nv40_graph_channel(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t inst;
++	int i;
++
++	inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
++	if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
++		return NULL;
++	inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
++
++	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
++		struct nouveau_channel *chan = dev_priv->fifos[i];
++
++		if (chan && chan->ramin_grctx &&
++		    chan->ramin_grctx->instance == inst)
++			return chan;
++	}
++
++	return NULL;
++}
++
++int
++nv40_graph_create_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
++	int ret;
++
++	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
++				     16, NVOBJ_FLAG_ZERO_ALLOC,
++				     &chan->ramin_grctx);
++	if (ret)
++		return ret;
++
++	/* Initialise default context values */
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	if (!pgraph->ctxprog) {
++		struct nouveau_grctx ctx = {};
++
++		ctx.dev = chan->dev;
++		ctx.mode = NOUVEAU_GRCTX_VALS;
++		ctx.data = chan->ramin_grctx->gpuobj;
++		nv40_grctx_init(&ctx);
++	} else {
++		nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj);
++	}
++	nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
++		     chan->ramin_grctx->gpuobj->im_pramin->start);
++	dev_priv->engine.instmem.finish_access(dev);
++	return 0;
++}
++
++void
++nv40_graph_destroy_context(struct nouveau_channel *chan)
++{
++	nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
++}
++
++static int
++nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
++{
++	uint32_t old_cp, tv = 1000, tmp;
++	int i;
++
++	old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
++	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++
++	tmp  = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
++	tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
++		      NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
++	nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
++
++	tmp  = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
++	tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
++	nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
++
++	nouveau_wait_for_idle(dev);
++
++	for (i = 0; i < tv; i++) {
++		if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
++			break;
++	}
++
++	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
++
++	if (i == tv) {
++		uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
++		NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
++		NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
++			 ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
++			 ucstat  & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
++		NV_ERROR(dev, "0x40030C = 0x%08x\n",
++			 nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
++		return -EBUSY;
++	}
++
++	return 0;
++}
++
++/* Restore the context for a specific channel into PGRAPH */
++int
++nv40_graph_load_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	uint32_t inst;
++	int ret;
++
++	if (!chan->ramin_grctx)
++		return -EINVAL;
++	inst = chan->ramin_grctx->instance >> 4;
++
++	ret = nv40_graph_transfer_context(dev, inst, 0);
++	if (ret)
++		return ret;
++
++	/* 0x40032C, no idea of it's exact function.  Could simply be a
++	 * record of the currently active PGRAPH context.  It's currently
++	 * unknown as to what bit 24 does.  The nv ddx has it set, so we will
++	 * set it here too.
++	 */
++	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
++	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR,
++		 (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) |
++		  NV40_PGRAPH_CTXCTL_CUR_LOADED);
++	/* 0x32E0 records the instance address of the active FIFO's PGRAPH
++	 * context.  If at any time this doesn't match 0x40032C, you will
++	 * recieve PGRAPH_INTR_CONTEXT_SWITCH
++	 */
++	nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst);
++	return 0;
++}
++
++int
++nv40_graph_unload_context(struct drm_device *dev)
++{
++	uint32_t inst;
++	int ret;
++
++	inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
++	if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
++		return 0;
++	inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
++
++	ret = nv40_graph_transfer_context(dev, inst, 1);
++
++	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
++	return ret;
++}
++
++void
++nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
++			     uint32_t size, uint32_t pitch)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t limit = max(1u, addr + size) - 1;
++
++	if (pitch)
++		addr |= 1;
++
++	switch (dev_priv->chipset) {
++	case 0x44:
++	case 0x4a:
++	case 0x4e:
++		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
++		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
++		nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
++		break;
++
++	case 0x46:
++	case 0x47:
++	case 0x49:
++	case 0x4b:
++		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
++		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
++		nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
++		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
++		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
++		nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
++		break;
++
++	default:
++		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
++		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
++		nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
++		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
++		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
++		nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
++		break;
++	}
++}
++
++/*
++ * G70		0x47
++ * G71		0x49
++ * NV45		0x48
++ * G72[M]	0x46
++ * G73		0x4b
++ * C51_G7X	0x4c
++ * C51		0x4e
++ */
++int
++nv40_graph_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv =
++		(struct drm_nouveau_private *)dev->dev_private;
++	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
++	uint32_t vramsz;
++	int i, j;
++
++	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
++			~NV_PMC_ENABLE_PGRAPH);
++	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
++			 NV_PMC_ENABLE_PGRAPH);
++
++	if (nouveau_ctxfw) {
++		nouveau_grctx_prog_load(dev);
++		dev_priv->engine.graph.grctx_size = 175 * 1024;
++	}
++
++	if (!dev_priv->engine.graph.ctxprog) {
++		struct nouveau_grctx ctx = {};
++		uint32_t cp[256];
++
++		ctx.dev = dev;
++		ctx.mode = NOUVEAU_GRCTX_PROG;
++		ctx.data = cp;
++		ctx.ctxprog_max = 256;
++		nv40_grctx_init(&ctx);
++		dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
++
++		nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
++		for (i = 0; i < ctx.ctxprog_len; i++)
++			nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
++	}
++
++	/* No context present currently */
++	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
++
++	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
++	nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
++
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
++	nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
++	nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
++
++	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
++	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
++
++	j = nv_rd32(dev, 0x1540) & 0xff;
++	if (j) {
++		for (i = 0; !(j & 1); j >>= 1, i++)
++			;
++		nv_wr32(dev, 0x405000, i);
++	}
++
++	if (dev_priv->chipset == 0x40) {
++		nv_wr32(dev, 0x4009b0, 0x83280fff);
++		nv_wr32(dev, 0x4009b4, 0x000000a0);
++	} else {
++		nv_wr32(dev, 0x400820, 0x83280eff);
++		nv_wr32(dev, 0x400824, 0x000000a0);
++	}
++
++	switch (dev_priv->chipset) {
++	case 0x40:
++	case 0x45:
++		nv_wr32(dev, 0x4009b8, 0x0078e366);
++		nv_wr32(dev, 0x4009bc, 0x0000014c);
++		break;
++	case 0x41:
++	case 0x42: /* pciid also 0x00Cx */
++	/* case 0x0120: XXX (pciid) */
++		nv_wr32(dev, 0x400828, 0x007596ff);
++		nv_wr32(dev, 0x40082c, 0x00000108);
++		break;
++	case 0x43:
++		nv_wr32(dev, 0x400828, 0x0072cb77);
++		nv_wr32(dev, 0x40082c, 0x00000108);
++		break;
++	case 0x44:
++	case 0x46: /* G72 */
++	case 0x4a:
++	case 0x4c: /* G7x-based C51 */
++	case 0x4e:
++		nv_wr32(dev, 0x400860, 0);
++		nv_wr32(dev, 0x400864, 0);
++		break;
++	case 0x47: /* G70 */
++	case 0x49: /* G71 */
++	case 0x4b: /* G73 */
++		nv_wr32(dev, 0x400828, 0x07830610);
++		nv_wr32(dev, 0x40082c, 0x0000016A);
++		break;
++	default:
++		break;
++	}
++
++	nv_wr32(dev, 0x400b38, 0x2ffff800);
++	nv_wr32(dev, 0x400b3c, 0x00006000);
++
++	/* Turn all the tiling regions off. */
++	for (i = 0; i < pfb->num_tiles; i++)
++		nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
++
++	/* begin RAM config */
++	vramsz = drm_get_resource_len(dev, 0) - 1;
++	switch (dev_priv->chipset) {
++	case 0x40:
++		nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
++		nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
++		nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
++		nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
++		nv_wr32(dev, 0x400820, 0);
++		nv_wr32(dev, 0x400824, 0);
++		nv_wr32(dev, 0x400864, vramsz);
++		nv_wr32(dev, 0x400868, vramsz);
++		break;
++	default:
++		switch (dev_priv->chipset) {
++		case 0x46:
++		case 0x47:
++		case 0x49:
++		case 0x4b:
++			nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
++			nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
++			break;
++		default:
++			nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
++			nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
++			break;
++		}
++		nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
++		nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
++		nv_wr32(dev, 0x400840, 0);
++		nv_wr32(dev, 0x400844, 0);
++		nv_wr32(dev, 0x4008A0, vramsz);
++		nv_wr32(dev, 0x4008A4, vramsz);
++		break;
++	}
++
++	return 0;
++}
++
++void nv40_graph_takedown(struct drm_device *dev)
++{
++	nouveau_grctx_fini(dev);
++}
++
++struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
++	{ 0x0030, false, NULL }, /* null */
++	{ 0x0039, false, NULL }, /* m2mf */
++	{ 0x004a, false, NULL }, /* gdirect */
++	{ 0x009f, false, NULL }, /* imageblit (nv12) */
++	{ 0x008a, false, NULL }, /* ifc */
++	{ 0x0089, false, NULL }, /* sifm */
++	{ 0x3089, false, NULL }, /* sifm (nv40) */
++	{ 0x0062, false, NULL }, /* surf2d */
++	{ 0x3062, false, NULL }, /* surf2d (nv40) */
++	{ 0x0043, false, NULL }, /* rop */
++	{ 0x0012, false, NULL }, /* beta1 */
++	{ 0x0072, false, NULL }, /* beta4 */
++	{ 0x0019, false, NULL }, /* cliprect */
++	{ 0x0044, false, NULL }, /* pattern */
++	{ 0x309e, false, NULL }, /* swzsurf */
++	{ 0x4097, false, NULL }, /* curie (nv40) */
++	{ 0x4497, false, NULL }, /* curie (nv44) */
++	{}
++};
++
+diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c
+new file mode 100644
+index 0000000..11b11c3
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv40_grctx.c
+@@ -0,0 +1,678 @@
++/*
++ * Copyright 2009 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Ben Skeggs
++ */
++
++/* NVIDIA context programs handle a number of other conditions which are
++ * not implemented in our versions.  It's not clear why NVIDIA context
++ * programs have this code, nor whether it's strictly necessary for
++ * correct operation.  We'll implement additional handling if/when we
++ * discover it's necessary.
++ *
++ * - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state"
++ *   flag is set, this gets saved into the context.
++ * - On context save, the context program for all cards load nsource
++ *   into a flag register and check for ILLEGAL_MTHD.  If it's set,
++ *   opcode 0x60000d is called before resuming normal operation.
++ * - Some context programs check more conditions than the above.  NV44
++ *   checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001))
++ *   and calls 0x60000d before resuming normal operation.
++ * - At the very beginning of NVIDIA's context programs, flag 9 is checked
++ *   and if true 0x800001 is called with count=0, pos=0, the flag is cleared
++ *   and then the ctxprog is aborted.  It looks like a complicated NOP,
++ *   its purpose is unknown.
++ * - In the section of code that loads the per-vs state, NVIDIA check
++ *   flag 10.  If it's set, they only transfer the small 0x300 byte block
++ *   of state + the state for a single vs as opposed to the state for
++ *   all vs units.  It doesn't seem likely that it'll occur in normal
++ *   operation, especially seeing as it appears NVIDIA may have screwed
++ *   up the ctxprogs for some cards and have an invalid instruction
++ *   rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction.
++ * - There's a number of places where context offset 0 (where we place
++ *   the PRAMIN offset of the context) is loaded into either 0x408000,
++ *   0x408004 or 0x408008.  Not sure what's up there either.
++ * - The ctxprogs for some cards save 0x400a00 again during the cleanup
++ *   path for auto-loadctx.
++ */
++
++#define CP_FLAG_CLEAR                 0
++#define CP_FLAG_SET                   1
++#define CP_FLAG_SWAP_DIRECTION        ((0 * 32) + 0)
++#define CP_FLAG_SWAP_DIRECTION_LOAD   0
++#define CP_FLAG_SWAP_DIRECTION_SAVE   1
++#define CP_FLAG_USER_SAVE             ((0 * 32) + 5)
++#define CP_FLAG_USER_SAVE_NOT_PENDING 0
++#define CP_FLAG_USER_SAVE_PENDING     1
++#define CP_FLAG_USER_LOAD             ((0 * 32) + 6)
++#define CP_FLAG_USER_LOAD_NOT_PENDING 0
++#define CP_FLAG_USER_LOAD_PENDING     1
++#define CP_FLAG_STATUS                ((3 * 32) + 0)
++#define CP_FLAG_STATUS_IDLE           0
++#define CP_FLAG_STATUS_BUSY           1
++#define CP_FLAG_AUTO_SAVE             ((3 * 32) + 4)
++#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
++#define CP_FLAG_AUTO_SAVE_PENDING     1
++#define CP_FLAG_AUTO_LOAD             ((3 * 32) + 5)
++#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
++#define CP_FLAG_AUTO_LOAD_PENDING     1
++#define CP_FLAG_UNK54                 ((3 * 32) + 6)
++#define CP_FLAG_UNK54_CLEAR           0
++#define CP_FLAG_UNK54_SET             1
++#define CP_FLAG_ALWAYS                ((3 * 32) + 8)
++#define CP_FLAG_ALWAYS_FALSE          0
++#define CP_FLAG_ALWAYS_TRUE           1
++#define CP_FLAG_UNK57                 ((3 * 32) + 9)
++#define CP_FLAG_UNK57_CLEAR           0
++#define CP_FLAG_UNK57_SET             1
++
++#define CP_CTX                   0x00100000
++#define CP_CTX_COUNT             0x000fc000
++#define CP_CTX_COUNT_SHIFT               14
++#define CP_CTX_REG               0x00003fff
++#define CP_LOAD_SR               0x00200000
++#define CP_LOAD_SR_VALUE         0x000fffff
++#define CP_BRA                   0x00400000
++#define CP_BRA_IP                0x0000ff00
++#define CP_BRA_IP_SHIFT                   8
++#define CP_BRA_IF_CLEAR          0x00000080
++#define CP_BRA_FLAG              0x0000007f
++#define CP_WAIT                  0x00500000
++#define CP_WAIT_SET              0x00000080
++#define CP_WAIT_FLAG             0x0000007f
++#define CP_SET                   0x00700000
++#define CP_SET_1                 0x00000080
++#define CP_SET_FLAG              0x0000007f
++#define CP_NEXT_TO_SWAP          0x00600007
++#define CP_NEXT_TO_CURRENT       0x00600009
++#define CP_SET_CONTEXT_POINTER   0x0060000a
++#define CP_END                   0x0060000e
++#define CP_LOAD_MAGIC_UNK01      0x00800001 /* unknown */
++#define CP_LOAD_MAGIC_NV44TCL    0x00800029 /* per-vs state (0x4497) */
++#define CP_LOAD_MAGIC_NV40TCL    0x00800041 /* per-vs state (0x4097) */
++
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_grctx.h"
++
++/* TODO:
++ *  - get vs count from 0x1540
++ *  - document unimplemented bits compared to nvidia
++ *    - nsource handling
++ *    - R0 & 0x0200 handling
++ *    - single-vs handling
++ *    - 400314 bit 0
++ */
++
++static int
++nv40_graph_4097(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if ((dev_priv->chipset & 0xf0) == 0x60)
++		return 0;
++
++	return !!(0x0baf & (1 << dev_priv->chipset));
++}
++
++static int
++nv40_graph_vs_count(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	switch (dev_priv->chipset) {
++	case 0x47:
++	case 0x49:
++	case 0x4b:
++		return 8;
++	case 0x40:
++		return 6;
++	case 0x41:
++	case 0x42:
++		return 5;
++	case 0x43:
++	case 0x44:
++	case 0x46:
++	case 0x4a:
++		return 3;
++	case 0x4c:
++	case 0x4e:
++	case 0x67:
++	default:
++		return 1;
++	}
++}
++
++
++enum cp_label {
++	cp_check_load = 1,
++	cp_setup_auto_load,
++	cp_setup_load,
++	cp_setup_save,
++	cp_swap_state,
++	cp_swap_state3d_3_is_save,
++	cp_prepare_exit,
++	cp_exit,
++};
++
++static void
++nv40_graph_construct_general(struct nouveau_grctx *ctx)
++{
++	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
++	int i;
++
++	cp_ctx(ctx, 0x4000a4, 1);
++	gr_def(ctx, 0x4000a4, 0x00000008);
++	cp_ctx(ctx, 0x400144, 58);
++	gr_def(ctx, 0x400144, 0x00000001);
++	cp_ctx(ctx, 0x400314, 1);
++	gr_def(ctx, 0x400314, 0x00000000);
++	cp_ctx(ctx, 0x400400, 10);
++	cp_ctx(ctx, 0x400480, 10);
++	cp_ctx(ctx, 0x400500, 19);
++	gr_def(ctx, 0x400514, 0x00040000);
++	gr_def(ctx, 0x400524, 0x55555555);
++	gr_def(ctx, 0x400528, 0x55555555);
++	gr_def(ctx, 0x40052c, 0x55555555);
++	gr_def(ctx, 0x400530, 0x55555555);
++	cp_ctx(ctx, 0x400560, 6);
++	gr_def(ctx, 0x400568, 0x0000ffff);
++	gr_def(ctx, 0x40056c, 0x0000ffff);
++	cp_ctx(ctx, 0x40057c, 5);
++	cp_ctx(ctx, 0x400710, 3);
++	gr_def(ctx, 0x400710, 0x20010001);
++	gr_def(ctx, 0x400714, 0x0f73ef00);
++	cp_ctx(ctx, 0x400724, 1);
++	gr_def(ctx, 0x400724, 0x02008821);
++	cp_ctx(ctx, 0x400770, 3);
++	if (dev_priv->chipset == 0x40) {
++		cp_ctx(ctx, 0x400814, 4);
++		cp_ctx(ctx, 0x400828, 5);
++		cp_ctx(ctx, 0x400840, 5);
++		gr_def(ctx, 0x400850, 0x00000040);
++		cp_ctx(ctx, 0x400858, 4);
++		gr_def(ctx, 0x400858, 0x00000040);
++		gr_def(ctx, 0x40085c, 0x00000040);
++		gr_def(ctx, 0x400864, 0x80000000);
++		cp_ctx(ctx, 0x40086c, 9);
++		gr_def(ctx, 0x40086c, 0x80000000);
++		gr_def(ctx, 0x400870, 0x80000000);
++		gr_def(ctx, 0x400874, 0x80000000);
++		gr_def(ctx, 0x400878, 0x80000000);
++		gr_def(ctx, 0x400888, 0x00000040);
++		gr_def(ctx, 0x40088c, 0x80000000);
++		cp_ctx(ctx, 0x4009c0, 8);
++		gr_def(ctx, 0x4009cc, 0x80000000);
++		gr_def(ctx, 0x4009dc, 0x80000000);
++	} else {
++		cp_ctx(ctx, 0x400840, 20);
++		if (!nv40_graph_4097(ctx->dev)) {
++			for (i = 0; i < 8; i++)
++				gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
++		}
++		gr_def(ctx, 0x400880, 0x00000040);
++		gr_def(ctx, 0x400884, 0x00000040);
++		gr_def(ctx, 0x400888, 0x00000040);
++		cp_ctx(ctx, 0x400894, 11);
++		gr_def(ctx, 0x400894, 0x00000040);
++		if (nv40_graph_4097(ctx->dev)) {
++			for (i = 0; i < 8; i++)
++				gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
++		}
++		cp_ctx(ctx, 0x4008e0, 2);
++		cp_ctx(ctx, 0x4008f8, 2);
++		if (dev_priv->chipset == 0x4c ||
++		    (dev_priv->chipset & 0xf0) == 0x60)
++			cp_ctx(ctx, 0x4009f8, 1);
++	}
++	cp_ctx(ctx, 0x400a00, 73);
++	gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
++	cp_ctx(ctx, 0x401000, 4);
++	cp_ctx(ctx, 0x405004, 1);
++	switch (dev_priv->chipset) {
++	case 0x47:
++	case 0x49:
++	case 0x4b:
++		cp_ctx(ctx, 0x403448, 1);
++		gr_def(ctx, 0x403448, 0x00001010);
++		break;
++	default:
++		cp_ctx(ctx, 0x403440, 1);
++		switch (dev_priv->chipset) {
++		case 0x40:
++			gr_def(ctx, 0x403440, 0x00000010);
++			break;
++		case 0x44:
++		case 0x46:
++		case 0x4a:
++			gr_def(ctx, 0x403440, 0x00003010);
++			break;
++		case 0x41:
++		case 0x42:
++		case 0x43:
++		case 0x4c:
++		case 0x4e:
++		case 0x67:
++		default:
++			gr_def(ctx, 0x403440, 0x00001010);
++			break;
++		}
++		break;
++	}
++}
++
++static void
++nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
++{
++	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
++	int i;
++
++	if (dev_priv->chipset == 0x40) {
++		cp_ctx(ctx, 0x401880, 51);
++		gr_def(ctx, 0x401940, 0x00000100);
++	} else
++	if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
++	    dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
++		cp_ctx(ctx, 0x401880, 32);
++		for (i = 0; i < 16; i++)
++			gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
++		if (dev_priv->chipset == 0x46)
++			cp_ctx(ctx, 0x401900, 16);
++		cp_ctx(ctx, 0x401940, 3);
++	}
++	cp_ctx(ctx, 0x40194c, 18);
++	gr_def(ctx, 0x401954, 0x00000111);
++	gr_def(ctx, 0x401958, 0x00080060);
++	gr_def(ctx, 0x401974, 0x00000080);
++	gr_def(ctx, 0x401978, 0xffff0000);
++	gr_def(ctx, 0x40197c, 0x00000001);
++	gr_def(ctx, 0x401990, 0x46400000);
++	if (dev_priv->chipset == 0x40) {
++		cp_ctx(ctx, 0x4019a0, 2);
++		cp_ctx(ctx, 0x4019ac, 5);
++	} else {
++		cp_ctx(ctx, 0x4019a0, 1);
++		cp_ctx(ctx, 0x4019b4, 3);
++	}
++	gr_def(ctx, 0x4019bc, 0xffff0000);
++	switch (dev_priv->chipset) {
++	case 0x46:
++	case 0x47:
++	case 0x49:
++	case 0x4b:
++		cp_ctx(ctx, 0x4019c0, 18);
++		for (i = 0; i < 16; i++)
++			gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888);
++		break;
++	}
++	cp_ctx(ctx, 0x401a08, 8);
++	gr_def(ctx, 0x401a10, 0x0fff0000);
++	gr_def(ctx, 0x401a14, 0x0fff0000);
++	gr_def(ctx, 0x401a1c, 0x00011100);
++	cp_ctx(ctx, 0x401a2c, 4);
++	cp_ctx(ctx, 0x401a44, 26);
++	for (i = 0; i < 16; i++)
++		gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
++	gr_def(ctx, 0x401a8c, 0x4b7fffff);
++	if (dev_priv->chipset == 0x40) {
++		cp_ctx(ctx, 0x401ab8, 3);
++	} else {
++		cp_ctx(ctx, 0x401ab8, 1);
++		cp_ctx(ctx, 0x401ac0, 1);
++	}
++	cp_ctx(ctx, 0x401ad0, 8);
++	gr_def(ctx, 0x401ad0, 0x30201000);
++	gr_def(ctx, 0x401ad4, 0x70605040);
++	gr_def(ctx, 0x401ad8, 0xb8a89888);
++	gr_def(ctx, 0x401adc, 0xf8e8d8c8);
++	cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
++	gr_def(ctx, 0x401b10, 0x40100000);
++	cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
++	gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
++			      0x00000004 : 0x00000000);
++	cp_ctx(ctx, 0x401b30, 25);
++	gr_def(ctx, 0x401b34, 0x0000ffff);
++	gr_def(ctx, 0x401b68, 0x435185d6);
++	gr_def(ctx, 0x401b6c, 0x2155b699);
++	gr_def(ctx, 0x401b70, 0xfedcba98);
++	gr_def(ctx, 0x401b74, 0x00000098);
++	gr_def(ctx, 0x401b84, 0xffffffff);
++	gr_def(ctx, 0x401b88, 0x00ff7000);
++	gr_def(ctx, 0x401b8c, 0x0000ffff);
++	if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
++	    dev_priv->chipset != 0x4e)
++		cp_ctx(ctx, 0x401b94, 1);
++	cp_ctx(ctx, 0x401b98, 8);
++	gr_def(ctx, 0x401b9c, 0x00ff0000);
++	cp_ctx(ctx, 0x401bc0, 9);
++	gr_def(ctx, 0x401be0, 0x00ffff00);
++	cp_ctx(ctx, 0x401c00, 192);
++	for (i = 0; i < 16; i++) { /* fragment texture units */
++		gr_def(ctx, 0x401c40 + (i * 4), 0x00018488);
++		gr_def(ctx, 0x401c80 + (i * 4), 0x00028202);
++		gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4);
++		gr_def(ctx, 0x401d40 + (i * 4), 0x01012000);
++		gr_def(ctx, 0x401d80 + (i * 4), 0x00080008);
++		gr_def(ctx, 0x401e00 + (i * 4), 0x00100008);
++	}
++	for (i = 0; i < 4; i++) { /* vertex texture units */
++		gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80);
++		gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202);
++		gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008);
++		gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008);
++	}
++	cp_ctx(ctx, 0x400f5c, 3);
++	gr_def(ctx, 0x400f5c, 0x00000002);
++	cp_ctx(ctx, 0x400f84, 1);
++}
++
++static void
++nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
++{
++	struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
++	int i;
++
++	cp_ctx(ctx, 0x402000, 1);
++	cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
++	switch (dev_priv->chipset) {
++	case 0x40:
++		gr_def(ctx, 0x402404, 0x00000001);
++		break;
++	case 0x4c:
++	case 0x4e:
++	case 0x67:
++		gr_def(ctx, 0x402404, 0x00000020);
++		break;
++	case 0x46:
++	case 0x49:
++	case 0x4b:
++		gr_def(ctx, 0x402404, 0x00000421);
++		break;
++	default:
++		gr_def(ctx, 0x402404, 0x00000021);
++	}
++	if (dev_priv->chipset != 0x40)
++		gr_def(ctx, 0x402408, 0x030c30c3);
++	switch (dev_priv->chipset) {
++	case 0x44:
++	case 0x46:
++	case 0x4a:
++	case 0x4c:
++	case 0x4e:
++	case 0x67:
++		cp_ctx(ctx, 0x402440, 1);
++		gr_def(ctx, 0x402440, 0x00011001);
++		break;
++	default:
++		break;
++	}
++	cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
++	gr_def(ctx, 0x402488, 0x3e020200);
++	gr_def(ctx, 0x40248c, 0x00ffffff);
++	switch (dev_priv->chipset) {
++	case 0x40:
++		gr_def(ctx, 0x402490, 0x60103f00);
++		break;
++	case 0x47:
++		gr_def(ctx, 0x402490, 0x40103f00);
++		break;
++	case 0x41:
++	case 0x42:
++	case 0x49:
++	case 0x4b:
++		gr_def(ctx, 0x402490, 0x20103f00);
++		break;
++	default:
++		gr_def(ctx, 0x402490, 0x0c103f00);
++		break;
++	}
++	gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
++			      0x00020000 : 0x00040000);
++	cp_ctx(ctx, 0x402500, 31);
++	gr_def(ctx, 0x402530, 0x00008100);
++	if (dev_priv->chipset == 0x40)
++		cp_ctx(ctx, 0x40257c, 6);
++	cp_ctx(ctx, 0x402594, 16);
++	cp_ctx(ctx, 0x402800, 17);
++	gr_def(ctx, 0x402800, 0x00000001);
++	switch (dev_priv->chipset) {
++	case 0x47:
++	case 0x49:
++	case 0x4b:
++		cp_ctx(ctx, 0x402864, 1);
++		gr_def(ctx, 0x402864, 0x00001001);
++		cp_ctx(ctx, 0x402870, 3);
++		gr_def(ctx, 0x402878, 0x00000003);
++		if (dev_priv->chipset != 0x47) { /* belong at end!! */
++			cp_ctx(ctx, 0x402900, 1);
++			cp_ctx(ctx, 0x402940, 1);
++			cp_ctx(ctx, 0x402980, 1);
++			cp_ctx(ctx, 0x4029c0, 1);
++			cp_ctx(ctx, 0x402a00, 1);
++			cp_ctx(ctx, 0x402a40, 1);
++			cp_ctx(ctx, 0x402a80, 1);
++			cp_ctx(ctx, 0x402ac0, 1);
++		}
++		break;
++	case 0x40:
++		cp_ctx(ctx, 0x402844, 1);
++		gr_def(ctx, 0x402844, 0x00000001);
++		cp_ctx(ctx, 0x402850, 1);
++		break;
++	default:
++		cp_ctx(ctx, 0x402844, 1);
++		gr_def(ctx, 0x402844, 0x00001001);
++		cp_ctx(ctx, 0x402850, 2);
++		gr_def(ctx, 0x402854, 0x00000003);
++		break;
++	}
++
++	cp_ctx(ctx, 0x402c00, 4);
++	gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
++			      0x80800001 : 0x00888001);
++	switch (dev_priv->chipset) {
++	case 0x47:
++	case 0x49:
++	case 0x4b:
++		cp_ctx(ctx, 0x402c20, 40);
++		for (i = 0; i < 32; i++)
++			gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff);
++		cp_ctx(ctx, 0x4030b8, 13);
++		gr_def(ctx, 0x4030dc, 0x00000005);
++		gr_def(ctx, 0x4030e8, 0x0000ffff);
++		break;
++	default:
++		cp_ctx(ctx, 0x402c10, 4);
++		if (dev_priv->chipset == 0x40)
++			cp_ctx(ctx, 0x402c20, 36);
++		else
++		if (dev_priv->chipset <= 0x42)
++			cp_ctx(ctx, 0x402c20, 24);
++		else
++		if (dev_priv->chipset <= 0x4a)
++			cp_ctx(ctx, 0x402c20, 16);
++		else
++			cp_ctx(ctx, 0x402c20, 8);
++		cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
++		gr_def(ctx, 0x402cd4, 0x00000005);
++		if (dev_priv->chipset != 0x40)
++			gr_def(ctx, 0x402ce0, 0x0000ffff);
++		break;
++	}
++
++	cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
++	cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
++	cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
++	for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
++		gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
++
++	if (dev_priv->chipset != 0x40) {
++		cp_ctx(ctx, 0x403600, 1);
++		gr_def(ctx, 0x403600, 0x00000001);
++	}
++	cp_ctx(ctx, 0x403800, 1);
++
++	cp_ctx(ctx, 0x403c18, 1);
++	gr_def(ctx, 0x403c18, 0x00000001);
++	switch (dev_priv->chipset) {
++	case 0x46:
++	case 0x47:
++	case 0x49:
++	case 0x4b:
++		cp_ctx(ctx, 0x405018, 1);
++		gr_def(ctx, 0x405018, 0x08e00001);
++		cp_ctx(ctx, 0x405c24, 1);
++		gr_def(ctx, 0x405c24, 0x000e3000);
++		break;
++	}
++	if (dev_priv->chipset != 0x4e)
++		cp_ctx(ctx, 0x405800, 11);
++	cp_ctx(ctx, 0x407000, 1);
++}
++
++static void
++nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
++{
++	int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084;
++
++	cp_out (ctx, 0x300000);
++	cp_lsr (ctx, len - 4);
++	cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save);
++	cp_lsr (ctx, len);
++	cp_name(ctx, cp_swap_state3d_3_is_save);
++	cp_out (ctx, 0x800001);
++
++	ctx->ctxvals_pos += len;
++}
++
++static void
++nv40_graph_construct_shader(struct nouveau_grctx *ctx)
++{
++	struct drm_device *dev = ctx->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *obj = ctx->data;
++	int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
++	int offset, i;
++
++	vs_nr    = nv40_graph_vs_count(ctx->dev);
++	vs_nr_b0 = 363;
++	vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
++	if (dev_priv->chipset == 0x40) {
++		b0_offset = 0x2200/4; /* 33a0 */
++		b1_offset = 0x55a0/4; /* 1500 */
++		vs_len = 0x6aa0/4;
++	} else
++	if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
++		b0_offset = 0x2200/4; /* 2200 */
++		b1_offset = 0x4400/4; /* 0b00 */
++		vs_len = 0x4f00/4;
++	} else {
++		b0_offset = 0x1d40/4; /* 2200 */
++		b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
++		vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4;
++	}
++
++	cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
++	cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029);
++
++	offset = ctx->ctxvals_pos;
++	ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
++
++	if (ctx->mode != NOUVEAU_GRCTX_VALS)
++		return;
++
++	offset += 0x0280/4;
++	for (i = 0; i < 16; i++, offset += 2)
++		nv_wo32(dev, obj, offset, 0x3f800000);
++
++	for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
++		for (i = 0; i < vs_nr_b0 * 6; i += 6)
++			nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001);
++		for (i = 0; i < vs_nr_b1 * 4; i += 4)
++			nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000);
++	}
++}
++
++void
++nv40_grctx_init(struct nouveau_grctx *ctx)
++{
++	/* decide whether we're loading/unloading the context */
++	cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
++	cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
++
++	cp_name(ctx, cp_check_load);
++	cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
++	cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
++	cp_bra (ctx, ALWAYS, TRUE, cp_exit);
++
++	/* setup for context load */
++	cp_name(ctx, cp_setup_auto_load);
++	cp_wait(ctx, STATUS, IDLE);
++	cp_out (ctx, CP_NEXT_TO_SWAP);
++	cp_name(ctx, cp_setup_load);
++	cp_wait(ctx, STATUS, IDLE);
++	cp_set (ctx, SWAP_DIRECTION, LOAD);
++	cp_out (ctx, 0x00910880); /* ?? */
++	cp_out (ctx, 0x00901ffe); /* ?? */
++	cp_out (ctx, 0x01940000); /* ?? */
++	cp_lsr (ctx, 0x20);
++	cp_out (ctx, 0x0060000b); /* ?? */
++	cp_wait(ctx, UNK57, CLEAR);
++	cp_out (ctx, 0x0060000c); /* ?? */
++	cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
++
++	/* setup for context save */
++	cp_name(ctx, cp_setup_save);
++	cp_set (ctx, SWAP_DIRECTION, SAVE);
++
++	/* general PGRAPH state */
++	cp_name(ctx, cp_swap_state);
++	cp_pos (ctx, 0x00020/4);
++	nv40_graph_construct_general(ctx);
++	cp_wait(ctx, STATUS, IDLE);
++
++	/* 3D state, block 1 */
++	cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit);
++	nv40_graph_construct_state3d(ctx);
++	cp_wait(ctx, STATUS, IDLE);
++
++	/* 3D state, block 2 */
++	nv40_graph_construct_state3d_2(ctx);
++
++	/* Some other block of "random" state */
++	nv40_graph_construct_state3d_3(ctx);
++
++	/* Per-vertex shader state */
++	cp_pos (ctx, ctx->ctxvals_pos);
++	nv40_graph_construct_shader(ctx);
++
++	/* pre-exit state updates */
++	cp_name(ctx, cp_prepare_exit);
++	cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
++	cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
++	cp_out (ctx, CP_NEXT_TO_CURRENT);
++
++	cp_name(ctx, cp_exit);
++	cp_set (ctx, USER_SAVE, NOT_PENDING);
++	cp_set (ctx, USER_LOAD, NOT_PENDING);
++	cp_out (ctx, CP_END);
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
+new file mode 100644
+index 0000000..2a3495e
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv40_mc.c
+@@ -0,0 +1,38 @@
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_drm.h"
++
++int
++nv40_mc_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t tmp;
++
++	/* Power up everything, resetting each individual unit will
++	 * be done later if needed.
++	 */
++	nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
++
++	switch (dev_priv->chipset) {
++	case 0x44:
++	case 0x46: /* G72 */
++	case 0x4e:
++	case 0x4c: /* C51_G7X */
++		tmp = nv_rd32(dev, NV40_PFB_020C);
++		nv_wr32(dev, NV40_PMC_1700, tmp);
++		nv_wr32(dev, NV40_PMC_1704, 0);
++		nv_wr32(dev, NV40_PMC_1708, 0);
++		nv_wr32(dev, NV40_PMC_170C, tmp);
++		break;
++	default:
++		break;
++	}
++
++	return 0;
++}
++
++void
++nv40_mc_takedown(struct drm_device *dev)
++{
++}
+diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
+new file mode 100644
+index 0000000..d1a651e
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
+@@ -0,0 +1,792 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm_mode.h"
++#include "drm_crtc_helper.h"
++
++#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
++#include "nouveau_reg.h"
++#include "nouveau_drv.h"
++#include "nouveau_hw.h"
++#include "nouveau_encoder.h"
++#include "nouveau_crtc.h"
++#include "nouveau_fb.h"
++#include "nouveau_connector.h"
++#include "nv50_display.h"
++
++static void
++nv50_crtc_lut_load(struct drm_crtc *crtc)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
++	int i;
++
++	NV_DEBUG_KMS(crtc->dev, "\n");
++
++	for (i = 0; i < 256; i++) {
++		writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
++		writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
++		writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
++	}
++
++	if (nv_crtc->lut.depth == 30) {
++		writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
++		writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
++		writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
++	}
++}
++
++int
++nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
++{
++	struct drm_device *dev = nv_crtc->base.dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *evo = dev_priv->evo;
++	int index = nv_crtc->index, ret;
++
++	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
++	NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked");
++
++	if (blanked) {
++		nv_crtc->cursor.hide(nv_crtc, false);
++
++		ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5);
++		if (ret) {
++			NV_ERROR(dev, "no space while blanking crtc\n");
++			return ret;
++		}
++		BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
++		OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
++		OUT_RING(evo, 0);
++		if (dev_priv->chipset != 0x50) {
++			BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
++			OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
++		}
++
++		BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
++		OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
++	} else {
++		if (nv_crtc->cursor.visible)
++			nv_crtc->cursor.show(nv_crtc, false);
++		else
++			nv_crtc->cursor.hide(nv_crtc, false);
++
++		ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8);
++		if (ret) {
++			NV_ERROR(dev, "no space while unblanking crtc\n");
++			return ret;
++		}
++		BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
++		OUT_RING(evo, nv_crtc->lut.depth == 8 ?
++				NV50_EVO_CRTC_CLUT_MODE_OFF :
++				NV50_EVO_CRTC_CLUT_MODE_ON);
++		OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start <<
++				 PAGE_SHIFT) >> 8);
++		if (dev_priv->chipset != 0x50) {
++			BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
++			OUT_RING(evo, NvEvoVRAM);
++		}
++
++		BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
++		OUT_RING(evo, nv_crtc->fb.offset >> 8);
++		OUT_RING(evo, 0);
++		BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
++		if (dev_priv->chipset != 0x50)
++			if (nv_crtc->fb.tile_flags == 0x7a00)
++				OUT_RING(evo, NvEvoFB32);
++			else
++			if (nv_crtc->fb.tile_flags == 0x7000)
++				OUT_RING(evo, NvEvoFB16);
++			else
++				OUT_RING(evo, NvEvoVRAM);
++		else
++			OUT_RING(evo, NvEvoVRAM);
++	}
++
++	nv_crtc->fb.blanked = blanked;
++	return 0;
++}
++
++static int
++nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
++{
++	struct drm_device *dev = nv_crtc->base.dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *evo = dev_priv->evo;
++	int ret;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
++	if (ret) {
++		NV_ERROR(dev, "no space while setting dither\n");
++		return ret;
++	}
++
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
++	if (on)
++		OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
++	else
++		OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
++
++	if (update) {
++		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
++		OUT_RING(evo, 0);
++		FIRE_RING(evo);
++	}
++
++	return 0;
++}
++
++struct nouveau_connector *
++nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
++{
++	struct drm_device *dev = nv_crtc->base.dev;
++	struct drm_connector *connector;
++	struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
++
++	/* The safest approach is to find an encoder with the right crtc, that
++	 * is also linked to a connector. */
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		if (connector->encoder)
++			if (connector->encoder->crtc == crtc)
++				return nouveau_connector(connector);
++	}
++
++	return NULL;
++}
++
++static int
++nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
++{
++	struct nouveau_connector *nv_connector =
++		nouveau_crtc_connector_get(nv_crtc);
++	struct drm_device *dev = nv_crtc->base.dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *evo = dev_priv->evo;
++	struct drm_display_mode *native_mode = NULL;
++	struct drm_display_mode *mode = &nv_crtc->base.mode;
++	uint32_t outX, outY, horiz, vert;
++	int ret;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	switch (scaling_mode) {
++	case DRM_MODE_SCALE_NONE:
++		break;
++	default:
++		if (!nv_connector || !nv_connector->native_mode) {
++			NV_ERROR(dev, "No native mode, forcing panel scaling\n");
++			scaling_mode = DRM_MODE_SCALE_NONE;
++		} else {
++			native_mode = nv_connector->native_mode;
++		}
++		break;
++	}
++
++	switch (scaling_mode) {
++	case DRM_MODE_SCALE_ASPECT:
++		horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
++		vert = (native_mode->vdisplay << 19) / mode->vdisplay;
++
++		if (vert > horiz) {
++			outX = (mode->hdisplay * horiz) >> 19;
++			outY = (mode->vdisplay * horiz) >> 19;
++		} else {
++			outX = (mode->hdisplay * vert) >> 19;
++			outY = (mode->vdisplay * vert) >> 19;
++		}
++		break;
++	case DRM_MODE_SCALE_FULLSCREEN:
++		outX = native_mode->hdisplay;
++		outY = native_mode->vdisplay;
++		break;
++	case DRM_MODE_SCALE_CENTER:
++	case DRM_MODE_SCALE_NONE:
++	default:
++		outX = mode->hdisplay;
++		outY = mode->vdisplay;
++		break;
++	}
++
++	ret = RING_SPACE(evo, update ? 7 : 5);
++	if (ret)
++		return ret;
++
++	/* Got a better name for SCALER_ACTIVE? */
++	/* One day i've got to really figure out why this is needed. */
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
++	if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
++	    (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
++	    mode->hdisplay != outX || mode->vdisplay != outY) {
++		OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
++	} else {
++		OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
++	}
++
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
++	OUT_RING(evo, outY << 16 | outX);
++	OUT_RING(evo, outY << 16 | outX);
++
++	if (update) {
++		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
++		OUT_RING(evo, 0);
++		FIRE_RING(evo);
++	}
++
++	return 0;
++}
++
++int
++nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
++{
++	uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
++	struct nouveau_pll_vals pll;
++	struct pll_lims limits;
++	uint32_t reg1, reg2;
++	int ret;
++
++	ret = get_pll_limits(dev, pll_reg, &limits);
++	if (ret)
++		return ret;
++
++	ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
++	if (ret <= 0)
++		return ret;
++
++	if (limits.vco2.maxfreq) {
++		reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
++		reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
++		nv_wr32(dev, pll_reg, 0x10000611);
++		nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
++		nv_wr32(dev, pll_reg + 8,
++			reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
++	} else {
++		reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
++		nv_wr32(dev, pll_reg, 0x50000610);
++		nv_wr32(dev, pll_reg + 4, reg1 |
++			(pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
++	}
++
++	return 0;
++}
++
++static void
++nv50_crtc_destroy(struct drm_crtc *crtc)
++{
++	struct drm_device *dev;
++	struct nouveau_crtc *nv_crtc;
++
++	if (!crtc)
++		return;
++
++	dev = crtc->dev;
++	nv_crtc = nouveau_crtc(crtc);
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	drm_crtc_cleanup(&nv_crtc->base);
++
++	nv50_cursor_fini(nv_crtc);
++
++	nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
++	nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
++	kfree(nv_crtc->mode);
++	kfree(nv_crtc);
++}
++
++int
++nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
++		     uint32_t buffer_handle, uint32_t width, uint32_t height)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct nouveau_bo *cursor = NULL;
++	struct drm_gem_object *gem;
++	int ret = 0, i;
++
++	if (width != 64 || height != 64)
++		return -EINVAL;
++
++	if (!buffer_handle) {
++		nv_crtc->cursor.hide(nv_crtc, true);
++		return 0;
++	}
++
++	gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
++	if (!gem)
++		return -EINVAL;
++	cursor = nouveau_gem_object(gem);
++
++	ret = nouveau_bo_map(cursor);
++	if (ret)
++		goto out;
++
++	/* The simple will do for now. */
++	for (i = 0; i < 64 * 64; i++)
++		nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
++
++	nouveau_bo_unmap(cursor);
++
++	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
++					    dev_priv->vm_vram_base);
++	nv_crtc->cursor.show(nv_crtc, true);
++
++out:
++	mutex_lock(&dev->struct_mutex);
++	drm_gem_object_unreference(gem);
++	mutex_unlock(&dev->struct_mutex);
++	return ret;
++}
++
++int
++nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++
++	nv_crtc->cursor.set_pos(nv_crtc, x, y);
++	return 0;
++}
++
++static void
++nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
++		    uint32_t size)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	int i;
++
++	if (size != 256)
++		return;
++
++	for (i = 0; i < 256; i++) {
++		nv_crtc->lut.r[i] = r[i];
++		nv_crtc->lut.g[i] = g[i];
++		nv_crtc->lut.b[i] = b[i];
++	}
++
++	/* We need to know the depth before we upload, but it's possible to
++	 * get called before a framebuffer is bound.  If this is the case,
++	 * mark the lut values as dirty by setting depth==0, and it'll be
++	 * uploaded on the first mode_set_base()
++	 */
++	if (!nv_crtc->base.fb) {
++		nv_crtc->lut.depth = 0;
++		return;
++	}
++
++	nv50_crtc_lut_load(crtc);
++}
++
++static void
++nv50_crtc_save(struct drm_crtc *crtc)
++{
++	NV_ERROR(crtc->dev, "!!\n");
++}
++
++static void
++nv50_crtc_restore(struct drm_crtc *crtc)
++{
++	NV_ERROR(crtc->dev, "!!\n");
++}
++
++static const struct drm_crtc_funcs nv50_crtc_funcs = {
++	.save = nv50_crtc_save,
++	.restore = nv50_crtc_restore,
++	.cursor_set = nv50_crtc_cursor_set,
++	.cursor_move = nv50_crtc_cursor_move,
++	.gamma_set = nv50_crtc_gamma_set,
++	.set_config = drm_crtc_helper_set_config,
++	.destroy = nv50_crtc_destroy,
++};
++
++static void
++nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
++{
++}
++
++static void
++nv50_crtc_prepare(struct drm_crtc *crtc)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct drm_device *dev = crtc->dev;
++	struct drm_encoder *encoder;
++	uint32_t dac = 0, sor = 0;
++
++	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
++
++	/* Disconnect all unused encoders. */
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++
++		if (!drm_helper_encoder_in_use(encoder))
++			continue;
++
++		if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
++		    nv_encoder->dcb->type == OUTPUT_TV)
++			dac |= (1 << nv_encoder->or);
++		else
++			sor |= (1 << nv_encoder->or);
++	}
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++
++		if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
++		    nv_encoder->dcb->type == OUTPUT_TV) {
++			if (dac & (1 << nv_encoder->or))
++				continue;
++		} else {
++			if (sor & (1 << nv_encoder->or))
++				continue;
++		}
++
++		nv_encoder->disconnect(nv_encoder);
++	}
++
++	nv50_crtc_blank(nv_crtc, true);
++}
++
++static void
++nv50_crtc_commit(struct drm_crtc *crtc)
++{
++	struct drm_crtc *crtc2;
++	struct drm_device *dev = crtc->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *evo = dev_priv->evo;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	int ret;
++
++	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
++
++	nv50_crtc_blank(nv_crtc, false);
++
++	/* Explicitly blank all unused crtc's. */
++	list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) {
++		if (!drm_helper_crtc_in_use(crtc2))
++			nv50_crtc_blank(nouveau_crtc(crtc2), true);
++	}
++
++	ret = RING_SPACE(evo, 2);
++	if (ret) {
++		NV_ERROR(dev, "no space while committing crtc\n");
++		return;
++	}
++	BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
++	OUT_RING(evo, 0);
++	FIRE_RING(evo);
++}
++
++static bool
++nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
++		     struct drm_display_mode *adjusted_mode)
++{
++	return true;
++}
++
++static int
++nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
++			   struct drm_framebuffer *old_fb, bool update)
++{
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct drm_device *dev = nv_crtc->base.dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *evo = dev_priv->evo;
++	struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
++	struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
++	int ret, format;
++
++	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
++
++	switch (drm_fb->depth) {
++	case  8:
++		format = NV50_EVO_CRTC_FB_DEPTH_8;
++		break;
++	case 15:
++		format = NV50_EVO_CRTC_FB_DEPTH_15;
++		break;
++	case 16:
++		format = NV50_EVO_CRTC_FB_DEPTH_16;
++		break;
++	case 24:
++	case 32:
++		format = NV50_EVO_CRTC_FB_DEPTH_24;
++		break;
++	case 30:
++		format = NV50_EVO_CRTC_FB_DEPTH_30;
++		break;
++	default:
++		 NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
++		 return -EINVAL;
++	}
++
++	ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
++	if (ret)
++		return ret;
++
++	if (old_fb) {
++		struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
++		nouveau_bo_unpin(ofb->nvbo);
++	}
++
++	nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
++	nv_crtc->fb.tile_flags = fb->nvbo->tile_flags;
++	nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
++	if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
++		ret = RING_SPACE(evo, 2);
++		if (ret)
++			return ret;
++
++		BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
++		if (nv_crtc->fb.tile_flags == 0x7a00)
++			OUT_RING(evo, NvEvoFB32);
++		else
++		if (nv_crtc->fb.tile_flags == 0x7000)
++			OUT_RING(evo, NvEvoFB16);
++		else
++			OUT_RING(evo, NvEvoVRAM);
++	}
++
++	ret = RING_SPACE(evo, 12);
++	if (ret)
++		return ret;
++
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
++	OUT_RING(evo, nv_crtc->fb.offset >> 8);
++	OUT_RING(evo, 0);
++	OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width);
++	if (!nv_crtc->fb.tile_flags) {
++		OUT_RING(evo, drm_fb->pitch | (1 << 20));
++	} else {
++		OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
++				  fb->nvbo->tile_mode);
++	}
++	if (dev_priv->chipset == 0x50)
++		OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format);
++	else
++		OUT_RING(evo, format);
++
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
++	OUT_RING(evo, fb->base.depth == 8 ?
++		 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
++
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
++	OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
++	OUT_RING(evo, (y << 16) | x);
++
++	if (nv_crtc->lut.depth != fb->base.depth) {
++		nv_crtc->lut.depth = fb->base.depth;
++		nv50_crtc_lut_load(crtc);
++	}
++
++	if (update) {
++		ret = RING_SPACE(evo, 2);
++		if (ret)
++			return ret;
++		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
++		OUT_RING(evo, 0);
++		FIRE_RING(evo);
++	}
++
++	return 0;
++}
++
++static int
++nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
++		   struct drm_display_mode *adjusted_mode, int x, int y,
++		   struct drm_framebuffer *old_fb)
++{
++	struct drm_device *dev = crtc->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *evo = dev_priv->evo;
++	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
++	struct nouveau_connector *nv_connector = NULL;
++	uint32_t hsync_dur,  vsync_dur, hsync_start_to_end, vsync_start_to_end;
++	uint32_t hunk1, vunk1, vunk2a, vunk2b;
++	int ret;
++
++	/* Find the connector attached to this CRTC */
++	nv_connector = nouveau_crtc_connector_get(nv_crtc);
++
++	*nv_crtc->mode = *adjusted_mode;
++
++	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
++
++	hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
++	vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
++	hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
++	vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
++	/* I can't give this a proper name, anyone else can? */
++	hunk1 = adjusted_mode->htotal -
++		adjusted_mode->hsync_start + adjusted_mode->hdisplay;
++	vunk1 = adjusted_mode->vtotal -
++		adjusted_mode->vsync_start + adjusted_mode->vdisplay;
++	/* Another strange value, this time only for interlaced adjusted_modes. */
++	vunk2a = 2 * adjusted_mode->vtotal -
++		 adjusted_mode->vsync_start + adjusted_mode->vdisplay;
++	vunk2b = adjusted_mode->vtotal -
++		 adjusted_mode->vsync_start + adjusted_mode->vtotal;
++
++	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
++		vsync_dur /= 2;
++		vsync_start_to_end  /= 2;
++		vunk1 /= 2;
++		vunk2a /= 2;
++		vunk2b /= 2;
++		/* magic */
++		if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
++			vsync_start_to_end -= 1;
++			vunk1 -= 1;
++			vunk2a -= 1;
++			vunk2b -= 1;
++		}
++	}
++
++	ret = RING_SPACE(evo, 17);
++	if (ret)
++		return ret;
++
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
++	OUT_RING(evo, adjusted_mode->clock | 0x800000);
++	OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
++
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
++	OUT_RING(evo, 0);
++	OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
++	OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
++	OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
++			(hsync_start_to_end - 1));
++	OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
++
++	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
++		BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
++		OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
++	} else {
++		OUT_RING(evo, 0);
++		OUT_RING(evo, 0);
++	}
++
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
++	OUT_RING(evo, 0);
++
++	/* This is the actual resolution of the mode. */
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
++	OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
++	OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
++
++	nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
++	nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
++
++	return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false);
++}
++
++static int
++nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
++			struct drm_framebuffer *old_fb)
++{
++	return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true);
++}
++
++static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
++	.dpms = nv50_crtc_dpms,
++	.prepare = nv50_crtc_prepare,
++	.commit = nv50_crtc_commit,
++	.mode_fixup = nv50_crtc_mode_fixup,
++	.mode_set = nv50_crtc_mode_set,
++	.mode_set_base = nv50_crtc_mode_set_base,
++	.load_lut = nv50_crtc_lut_load,
++};
++
++int
++nv50_crtc_create(struct drm_device *dev, int index)
++{
++	struct nouveau_crtc *nv_crtc = NULL;
++	int ret, i;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
++	if (!nv_crtc)
++		return -ENOMEM;
++
++	nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
++	if (!nv_crtc->mode) {
++		kfree(nv_crtc);
++		return -ENOMEM;
++	}
++
++	/* Default CLUT parameters, will be activated on the hw upon
++	 * first mode set.
++	 */
++	for (i = 0; i < 256; i++) {
++		nv_crtc->lut.r[i] = i << 8;
++		nv_crtc->lut.g[i] = i << 8;
++		nv_crtc->lut.b[i] = i << 8;
++	}
++	nv_crtc->lut.depth = 0;
++
++	ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
++			     0, 0x0000, false, true, &nv_crtc->lut.nvbo);
++	if (!ret) {
++		ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
++		if (!ret)
++			ret = nouveau_bo_map(nv_crtc->lut.nvbo);
++		if (ret)
++			nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
++	}
++
++	if (ret) {
++		kfree(nv_crtc->mode);
++		kfree(nv_crtc);
++		return ret;
++	}
++
++	nv_crtc->index = index;
++
++	/* set function pointers */
++	nv_crtc->set_dither = nv50_crtc_set_dither;
++	nv_crtc->set_scale = nv50_crtc_set_scale;
++
++	drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
++	drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
++	drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
++
++	ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
++			     0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
++	if (!ret) {
++		ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
++		if (!ret)
++			ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
++		if (ret)
++			nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
++	}
++
++	nv50_cursor_init(nv_crtc);
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
+new file mode 100644
+index 0000000..753e723
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
+@@ -0,0 +1,156 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm_mode.h"
++
++#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
++#include "nouveau_reg.h"
++#include "nouveau_drv.h"
++#include "nouveau_crtc.h"
++#include "nv50_display.h"
++
++static void
++nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
++{
++	struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
++	struct nouveau_channel *evo = dev_priv->evo;
++	struct drm_device *dev = nv_crtc->base.dev;
++	int ret;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	if (update && nv_crtc->cursor.visible)
++		return;
++
++	ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
++	if (ret) {
++		NV_ERROR(dev, "no space while unhiding cursor\n");
++		return;
++	}
++
++	if (dev_priv->chipset != 0x50) {
++		BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
++		OUT_RING(evo, NvEvoVRAM);
++	}
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
++	OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
++	OUT_RING(evo, nv_crtc->cursor.offset >> 8);
++
++	if (update) {
++		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
++		OUT_RING(evo, 0);
++		FIRE_RING(evo);
++		nv_crtc->cursor.visible = true;
++	}
++}
++
++static void
++nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
++{
++	struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
++	struct nouveau_channel *evo = dev_priv->evo;
++	struct drm_device *dev = nv_crtc->base.dev;
++	int ret;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	if (update && !nv_crtc->cursor.visible)
++		return;
++
++	ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
++	if (ret) {
++		NV_ERROR(dev, "no space while hiding cursor\n");
++		return;
++	}
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
++	OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
++	OUT_RING(evo, 0);
++	if (dev_priv->chipset != 0x50) {
++		BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
++		OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
++	}
++
++	if (update) {
++		BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
++		OUT_RING(evo, 0);
++		FIRE_RING(evo);
++		nv_crtc->cursor.visible = false;
++	}
++}
++
++static void
++nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
++{
++	struct drm_device *dev = nv_crtc->base.dev;
++
++	nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
++		((y & 0xFFFF) << 16) | (x & 0xFFFF));
++	/* Needed to make the cursor move. */
++	nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
++}
++
++static void
++nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
++{
++	NV_DEBUG_KMS(nv_crtc->base.dev, "\n");
++	if (offset == nv_crtc->cursor.offset)
++		return;
++
++	nv_crtc->cursor.offset = offset;
++	if (nv_crtc->cursor.visible) {
++		nv_crtc->cursor.visible = false;
++		nv_crtc->cursor.show(nv_crtc, true);
++	}
++}
++
++int
++nv50_cursor_init(struct nouveau_crtc *nv_crtc)
++{
++	nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
++	nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
++	nv_crtc->cursor.hide = nv50_cursor_hide;
++	nv_crtc->cursor.show = nv50_cursor_show;
++	return 0;
++}
++
++void
++nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
++{
++	struct drm_device *dev = nv_crtc->base.dev;
++	int idx = nv_crtc->index;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
++	if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
++		     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
++		NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
++		NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
++			 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx)));
++	}
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
+new file mode 100644
+index 0000000..f08f042
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv50_dac.c
+@@ -0,0 +1,304 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm_crtc_helper.h"
++
++#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
++#include "nouveau_reg.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++#include "nouveau_encoder.h"
++#include "nouveau_connector.h"
++#include "nouveau_crtc.h"
++#include "nv50_display.h"
++
++static void
++nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
++{
++	struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *evo = dev_priv->evo;
++	int ret;
++
++	NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or);
++
++	ret = RING_SPACE(evo, 2);
++	if (ret) {
++		NV_ERROR(dev, "no space while disconnecting DAC\n");
++		return;
++	}
++	BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
++	OUT_RING(evo, 0);
++}
++
++static enum drm_connector_status
++nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	enum drm_connector_status status = connector_status_disconnected;
++	uint32_t dpms_state, load_pattern, load_state;
++	int or = nv_encoder->or;
++
++	nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
++	dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
++
++	nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
++		0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
++	if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
++		     NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
++		NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
++		NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
++			  nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
++		return status;
++	}
++
++	/* Use bios provided value if possible. */
++	if (dev_priv->vbios->dactestval) {
++		load_pattern = dev_priv->vbios->dactestval;
++		NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
++			  load_pattern);
++	} else {
++		load_pattern = 340;
++		NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n",
++			 load_pattern);
++	}
++
++	nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
++		NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
++	mdelay(45); /* give it some time to process */
++	load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
++
++	nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
++	nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
++		NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
++
++	if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
++			  NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
++		status = connector_status_connected;
++
++	if (status == connector_status_connected)
++		NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or);
++	else
++		NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or);
++
++	return status;
++}
++
++static void
++nv50_dac_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	uint32_t val;
++	int or = nv_encoder->or;
++
++	NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
++
++	/* wait for it to be done */
++	if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
++		     NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
++		NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
++		NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
++			 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
++		return;
++	}
++
++	val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
++
++	if (mode != DRM_MODE_DPMS_ON)
++		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
++
++	switch (mode) {
++	case DRM_MODE_DPMS_STANDBY:
++		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
++		break;
++	case DRM_MODE_DPMS_SUSPEND:
++		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
++		break;
++	case DRM_MODE_DPMS_OFF:
++		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
++		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
++		val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
++		break;
++	default:
++		break;
++	}
++
++	nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
++		NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
++}
++
++static void
++nv50_dac_save(struct drm_encoder *encoder)
++{
++	NV_ERROR(encoder->dev, "!!\n");
++}
++
++static void
++nv50_dac_restore(struct drm_encoder *encoder)
++{
++	NV_ERROR(encoder->dev, "!!\n");
++}
++
++static bool
++nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
++		    struct drm_display_mode *adjusted_mode)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct nouveau_connector *connector;
++
++	NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
++
++	connector = nouveau_encoder_connector_get(nv_encoder);
++	if (!connector) {
++		NV_ERROR(encoder->dev, "Encoder has no connector\n");
++		return false;
++	}
++
++	if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
++	     connector->native_mode) {
++		int id = adjusted_mode->base.id;
++		*adjusted_mode = *connector->native_mode;
++		adjusted_mode->base.id = id;
++	}
++
++	return true;
++}
++
++static void
++nv50_dac_prepare(struct drm_encoder *encoder)
++{
++}
++
++static void
++nv50_dac_commit(struct drm_encoder *encoder)
++{
++}
++
++static void
++nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
++		  struct drm_display_mode *adjusted_mode)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *evo = dev_priv->evo;
++	struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
++	uint32_t mode_ctl = 0, mode_ctl2 = 0;
++	int ret;
++
++	NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
++
++	nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
++
++	if (crtc->index == 1)
++		mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
++	else
++		mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
++
++	/* Lacking a working tv-out, this is not a 100% sure. */
++	if (nv_encoder->dcb->type == OUTPUT_ANALOG)
++		mode_ctl |= 0x40;
++	else
++	if (nv_encoder->dcb->type == OUTPUT_TV)
++		mode_ctl |= 0x100;
++
++	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
++		mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
++
++	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
++		mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
++
++	ret = RING_SPACE(evo, 3);
++	if (ret) {
++		NV_ERROR(dev, "no space while connecting DAC\n");
++		return;
++	}
++	BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
++	OUT_RING(evo, mode_ctl);
++	OUT_RING(evo, mode_ctl2);
++}
++
++static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
++	.dpms = nv50_dac_dpms,
++	.save = nv50_dac_save,
++	.restore = nv50_dac_restore,
++	.mode_fixup = nv50_dac_mode_fixup,
++	.prepare = nv50_dac_prepare,
++	.commit = nv50_dac_commit,
++	.mode_set = nv50_dac_mode_set,
++	.detect = nv50_dac_detect
++};
++
++static void
++nv50_dac_destroy(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++
++	if (!encoder)
++		return;
++
++	NV_DEBUG_KMS(encoder->dev, "\n");
++
++	drm_encoder_cleanup(encoder);
++	kfree(nv_encoder);
++}
++
++static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
++	.destroy = nv50_dac_destroy,
++};
++
++int
++nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
++{
++	struct nouveau_encoder *nv_encoder;
++	struct drm_encoder *encoder;
++
++	NV_DEBUG_KMS(dev, "\n");
++	NV_INFO(dev, "Detected a DAC output\n");
++
++	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
++	if (!nv_encoder)
++		return -ENOMEM;
++	encoder = to_drm_encoder(nv_encoder);
++
++	nv_encoder->dcb = entry;
++	nv_encoder->or = ffs(entry->or) - 1;
++
++	nv_encoder->disconnect = nv50_dac_disconnect;
++
++	drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs,
++			 DRM_MODE_ENCODER_DAC);
++	drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
++
++	encoder->possible_crtcs = entry->heads;
++	encoder->possible_clones = 0;
++	return 0;
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+new file mode 100644
+index 0000000..90f0bf5
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -0,0 +1,1032 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "nv50_display.h"
++#include "nouveau_crtc.h"
++#include "nouveau_encoder.h"
++#include "nouveau_connector.h"
++#include "nouveau_fb.h"
++#include "drm_crtc_helper.h"
++
++static void
++nv50_evo_channel_del(struct nouveau_channel **pchan)
++{
++	struct nouveau_channel *chan = *pchan;
++
++	if (!chan)
++		return;
++	*pchan = NULL;
++
++	nouveau_gpuobj_channel_takedown(chan);
++	nouveau_bo_ref(NULL, &chan->pushbuf_bo);
++
++	if (chan->user)
++		iounmap(chan->user);
++
++	kfree(chan);
++}
++
++static int
++nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
++		    uint32_t tile_flags, uint32_t magic_flags,
++		    uint32_t offset, uint32_t limit)
++{
++	struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
++	struct drm_device *dev = evo->dev;
++	struct nouveau_gpuobj *obj = NULL;
++	int ret;
++
++	ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
++	if (ret)
++		return ret;
++	obj->engine = NVOBJ_ENGINE_DISPLAY;
++
++	ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
++	if (ret) {
++		nouveau_gpuobj_del(dev, &obj);
++		return ret;
++	}
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
++	nv_wo32(dev, obj, 1, limit);
++	nv_wo32(dev, obj, 2, offset);
++	nv_wo32(dev, obj, 3, 0x00000000);
++	nv_wo32(dev, obj, 4, 0x00000000);
++	nv_wo32(dev, obj, 5, 0x00010000);
++	dev_priv->engine.instmem.finish_access(dev);
++
++	return 0;
++}
++
++static int
++nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan;
++	int ret;
++
++	chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
++	if (!chan)
++		return -ENOMEM;
++	*pchan = chan;
++
++	chan->id = -1;
++	chan->dev = dev;
++	chan->user_get = 4;
++	chan->user_put = 0;
++
++	INIT_LIST_HEAD(&chan->ramht_refs);
++
++	ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
++				     NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
++	if (ret) {
++		NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
++		nv50_evo_channel_del(pchan);
++		return ret;
++	}
++
++	ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj->
++				    im_pramin->start, 32768);
++	if (ret) {
++		NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
++		nv50_evo_channel_del(pchan);
++		return ret;
++	}
++
++	ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
++				     0, &chan->ramht);
++	if (ret) {
++		NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
++		nv50_evo_channel_del(pchan);
++		return ret;
++	}
++
++	if (dev_priv->chipset != 0x50) {
++		ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
++					  0, 0xffffffff);
++		if (ret) {
++			nv50_evo_channel_del(pchan);
++			return ret;
++		}
++
++
++		ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
++					  0, 0xffffffff);
++		if (ret) {
++			nv50_evo_channel_del(pchan);
++			return ret;
++		}
++	}
++
++	ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
++				  0, nouveau_mem_fb_amount(dev));
++	if (ret) {
++		nv50_evo_channel_del(pchan);
++		return ret;
++	}
++
++	ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
++			     false, true, &chan->pushbuf_bo);
++	if (ret == 0)
++		ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
++	if (ret) {
++		NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
++		nv50_evo_channel_del(pchan);
++		return ret;
++	}
++
++	ret = nouveau_bo_map(chan->pushbuf_bo);
++	if (ret) {
++		NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
++		nv50_evo_channel_del(pchan);
++		return ret;
++	}
++
++	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
++					NV50_PDISPLAY_USER(0), PAGE_SIZE);
++	if (!chan->user) {
++		NV_ERROR(dev, "Error mapping EVO control regs.\n");
++		nv50_evo_channel_del(pchan);
++		return -ENOMEM;
++	}
++
++	return 0;
++}
++
++int
++nv50_display_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
++	struct nouveau_channel *evo = dev_priv->evo;
++	struct drm_connector *connector;
++	uint32_t val, ram_amount, hpd_en[2];
++	uint64_t start;
++	int ret, i;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
++	/*
++	 * I think the 0x006101XX range is some kind of main control area
++	 * that enables things.
++	 */
++	/* CRTC? */
++	for (i = 0; i < 2; i++) {
++		val = nv_rd32(dev, 0x00616100 + (i * 0x800));
++		nv_wr32(dev, 0x00610190 + (i * 0x10), val);
++		val = nv_rd32(dev, 0x00616104 + (i * 0x800));
++		nv_wr32(dev, 0x00610194 + (i * 0x10), val);
++		val = nv_rd32(dev, 0x00616108 + (i * 0x800));
++		nv_wr32(dev, 0x00610198 + (i * 0x10), val);
++		val = nv_rd32(dev, 0x0061610c + (i * 0x800));
++		nv_wr32(dev, 0x0061019c + (i * 0x10), val);
++	}
++	/* DAC */
++	for (i = 0; i < 3; i++) {
++		val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
++		nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
++	}
++	/* SOR */
++	for (i = 0; i < 4; i++) {
++		val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
++		nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
++	}
++	/* Something not yet in use, tv-out maybe. */
++	for (i = 0; i < 3; i++) {
++		val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
++		nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
++	}
++
++	for (i = 0; i < 3; i++) {
++		nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
++			NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
++		nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
++	}
++
++	/* This used to be in crtc unblank, but seems out of place there. */
++	nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
++	/* RAM is clamped to 256 MiB. */
++	ram_amount = nouveau_mem_fb_amount(dev);
++	NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
++	if (ram_amount > 256*1024*1024)
++		ram_amount = 256*1024*1024;
++	nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
++	nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
++	nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
++
++	/* The precise purpose is unknown, i suspect it has something to do
++	 * with text mode.
++	 */
++	if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
++		nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
++		nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
++		if (!nv_wait(0x006194e8, 2, 0)) {
++			NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
++			NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
++						nv_rd32(dev, 0x6194e8));
++			return -EBUSY;
++		}
++	}
++
++	/* taken from nv bug #12637, attempts to un-wedge the hw if it's
++	 * stuck in some unspecified state
++	 */
++	start = ptimer->read(dev);
++	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
++	while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
++		if ((val & 0x9f0000) == 0x20000)
++			nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
++							val | 0x800000);
++
++		if ((val & 0x3f0000) == 0x30000)
++			nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
++							val | 0x200000);
++
++		if (ptimer->read(dev) - start > 1000000000ULL) {
++			NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
++			NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
++			return -EBUSY;
++		}
++	}
++
++	nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
++	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
++	if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) {
++		NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
++		NV_ERROR(dev, "0x610200 = 0x%08x\n",
++			  nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
++		return -EBUSY;
++	}
++
++	for (i = 0; i < 2; i++) {
++		nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
++		if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
++			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
++			NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
++			NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
++				 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
++			return -EBUSY;
++		}
++
++		nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
++			NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
++		if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
++			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
++			     NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
++			NV_ERROR(dev, "timeout: "
++				      "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
++			NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
++				 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
++			return -EBUSY;
++		}
++	}
++
++	nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
++
++	/* initialise fifo */
++	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
++		((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
++		NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
++		NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
++	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
++	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
++	if (!nv_wait(0x610200, 0x80000000, 0x00000000)) {
++		NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
++		NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
++		return -EBUSY;
++	}
++	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
++		(nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
++		 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
++	nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
++	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
++		NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
++	nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
++
++	evo->dma.max = (4096/4) - 2;
++	evo->dma.put = 0;
++	evo->dma.cur = evo->dma.put;
++	evo->dma.free = evo->dma.max - evo->dma.cur;
++
++	ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
++	if (ret)
++		return ret;
++
++	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
++		OUT_RING(evo, 0);
++
++	ret = RING_SPACE(evo, 11);
++	if (ret)
++		return ret;
++	BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
++	OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
++	OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
++	OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
++	OUT_RING(evo, 0);
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
++	OUT_RING(evo, 0);
++	BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
++	OUT_RING(evo, 0);
++	FIRE_RING(evo);
++	if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2))
++		NV_ERROR(dev, "evo pushbuf stalled\n");
++
++	/* enable clock change interrupts. */
++	nv_wr32(dev, 0x610028, 0x00010001);
++	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
++					     NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
++					     NV50_PDISPLAY_INTR_EN_CLK_UNK40));
++
++	/* enable hotplug interrupts */
++	hpd_en[0] = hpd_en[1] = 0;
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		struct nouveau_connector *conn = nouveau_connector(connector);
++		struct dcb_gpio_entry *gpio;
++
++		if (connector->connector_type != DRM_MODE_CONNECTOR_DVII &&
++		    connector->connector_type != DRM_MODE_CONNECTOR_DVID &&
++		    connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
++			continue;
++
++		gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
++		if (!gpio)
++			continue;
++
++		hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf));
++	}
++
++	nv_wr32(dev, 0xe054, 0xffffffff);
++	nv_wr32(dev, 0xe050, hpd_en[0]);
++	if (dev_priv->chipset >= 0x90) {
++		nv_wr32(dev, 0xe074, 0xffffffff);
++		nv_wr32(dev, 0xe070, hpd_en[1]);
++	}
++
++	return 0;
++}
++
++static int nv50_display_disable(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_crtc *drm_crtc;
++	int ret, i;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
++		struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
++
++		nv50_crtc_blank(crtc, true);
++	}
++
++	ret = RING_SPACE(dev_priv->evo, 2);
++	if (ret == 0) {
++		BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
++		OUT_RING(dev_priv->evo, 0);
++	}
++	FIRE_RING(dev_priv->evo);
++
++	/* Almost like ack'ing a vblank interrupt, maybe in the spirit of
++	 * cleaning up?
++	 */
++	list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
++		struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
++		uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
++
++		if (!crtc->base.enabled)
++			continue;
++
++		nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
++		if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) {
++			NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
++				      "0x%08x\n", mask, mask);
++			NV_ERROR(dev, "0x610024 = 0x%08x\n",
++				 nv_rd32(dev, NV50_PDISPLAY_INTR_1));
++		}
++	}
++
++	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
++	nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
++	if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
++		NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
++		NV_ERROR(dev, "0x610200 = 0x%08x\n",
++			  nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
++	}
++
++	for (i = 0; i < 3; i++) {
++		if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i),
++			     NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
++			NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
++			NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
++				  nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
++		}
++	}
++
++	/* disable interrupts. */
++	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
++
++	/* disable hotplug interrupts */
++	nv_wr32(dev, 0xe054, 0xffffffff);
++	nv_wr32(dev, 0xe050, 0x00000000);
++	if (dev_priv->chipset >= 0x90) {
++		nv_wr32(dev, 0xe074, 0xffffffff);
++		nv_wr32(dev, 0xe070, 0x00000000);
++	}
++	return 0;
++}
++
++int nv50_display_create(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct parsed_dcb *dcb = dev_priv->vbios->dcb;
++	uint32_t connector[16] = {};
++	int ret, i;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	/* init basic kernel modesetting */
++	drm_mode_config_init(dev);
++
++	/* Initialise some optional connector properties. */
++	drm_mode_create_scaling_mode_property(dev);
++	drm_mode_create_dithering_property(dev);
++
++	dev->mode_config.min_width = 0;
++	dev->mode_config.min_height = 0;
++
++	dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
++
++	dev->mode_config.max_width = 8192;
++	dev->mode_config.max_height = 8192;
++
++	dev->mode_config.fb_base = dev_priv->fb_phys;
++
++	/* Create EVO channel */
++	ret = nv50_evo_channel_new(dev, &dev_priv->evo);
++	if (ret) {
++		NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
++		return ret;
++	}
++
++	/* Create CRTC objects */
++	for (i = 0; i < 2; i++)
++		nv50_crtc_create(dev, i);
++
++	/* We setup the encoders from the BIOS table */
++	for (i = 0 ; i < dcb->entries; i++) {
++		struct dcb_entry *entry = &dcb->entry[i];
++
++		if (entry->location != DCB_LOC_ON_CHIP) {
++			NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n",
++				entry->type, ffs(entry->or) - 1);
++			continue;
++		}
++
++		switch (entry->type) {
++		case OUTPUT_TMDS:
++		case OUTPUT_LVDS:
++		case OUTPUT_DP:
++			nv50_sor_create(dev, entry);
++			break;
++		case OUTPUT_ANALOG:
++			nv50_dac_create(dev, entry);
++			break;
++		default:
++			NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
++			continue;
++		}
++
++		connector[entry->connector] |= (1 << entry->type);
++	}
++
++	/* It appears that DCB 3.0+ VBIOS has a connector table, however,
++	 * I'm not 100% certain how to decode it correctly yet so just
++	 * look at what encoders are present on each connector index and
++	 * attempt to derive the connector type from that.
++	 */
++	for (i = 0 ; i < dcb->entries; i++) {
++		struct dcb_entry *entry = &dcb->entry[i];
++		uint16_t encoders;
++		int type;
++
++		encoders = connector[entry->connector];
++		if (!(encoders & (1 << entry->type)))
++			continue;
++		connector[entry->connector] = 0;
++
++		if (encoders & (1 << OUTPUT_DP)) {
++			type = DRM_MODE_CONNECTOR_DisplayPort;
++		} else if (encoders & (1 << OUTPUT_TMDS)) {
++			if (encoders & (1 << OUTPUT_ANALOG))
++				type = DRM_MODE_CONNECTOR_DVII;
++			else
++				type = DRM_MODE_CONNECTOR_DVID;
++		} else if (encoders & (1 << OUTPUT_ANALOG)) {
++			type = DRM_MODE_CONNECTOR_VGA;
++		} else if (encoders & (1 << OUTPUT_LVDS)) {
++			type = DRM_MODE_CONNECTOR_LVDS;
++		} else {
++			type = DRM_MODE_CONNECTOR_Unknown;
++		}
++
++		if (type == DRM_MODE_CONNECTOR_Unknown)
++			continue;
++
++		nouveau_connector_create(dev, entry->connector, type);
++	}
++
++	ret = nv50_display_init(dev);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
++int nv50_display_destroy(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	drm_mode_config_cleanup(dev);
++
++	nv50_display_disable(dev);
++	nv50_evo_channel_del(&dev_priv->evo);
++
++	return 0;
++}
++
++static inline uint32_t
++nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t mc;
++
++	if (sor) {
++		if (dev_priv->chipset < 0x90 ||
++		    dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
++			mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or));
++		else
++			mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or));
++	} else {
++		mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or));
++	}
++
++	return mc;
++}
++
++static int
++nv50_display_irq_head(struct drm_device *dev, int *phead,
++		      struct dcb_entry **pdcbent)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL);
++	uint32_t dac = 0, sor = 0;
++	int head, i, or = 0, type = OUTPUT_ANY;
++
++	/* We're assuming that head 0 *or* head 1 will be active here,
++	 * and not both.  I'm not sure if the hw will even signal both
++	 * ever, but it definitely shouldn't for us as we commit each
++	 * CRTC separately, and submission will be blocked by the GPU
++	 * until we handle each in turn.
++	 */
++	NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
++	head = ffs((unk30 >> 9) & 3) - 1;
++	if (head < 0)
++		return -EINVAL;
++
++	/* This assumes CRTCs are never bound to multiple encoders, which
++	 * should be the case.
++	 */
++	for (i = 0; i < 3 && type == OUTPUT_ANY; i++) {
++		uint32_t mc = nv50_display_mode_ctrl(dev, false, i);
++		if (!(mc & (1 << head)))
++			continue;
++
++		switch ((mc >> 8) & 0xf) {
++		case 0: type = OUTPUT_ANALOG; break;
++		case 1: type = OUTPUT_TV; break;
++		default:
++			NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac);
++			return -1;
++		}
++
++		or = i;
++	}
++
++	for (i = 0; i < 4 && type == OUTPUT_ANY; i++) {
++		uint32_t mc = nv50_display_mode_ctrl(dev, true, i);
++		if (!(mc & (1 << head)))
++			continue;
++
++		switch ((mc >> 8) & 0xf) {
++		case 0: type = OUTPUT_LVDS; break;
++		case 1: type = OUTPUT_TMDS; break;
++		case 2: type = OUTPUT_TMDS; break;
++		case 5: type = OUTPUT_TMDS; break;
++		case 8: type = OUTPUT_DP; break;
++		case 9: type = OUTPUT_DP; break;
++		default:
++			NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor);
++			return -1;
++		}
++
++		or = i;
++	}
++
++	NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or);
++	if (type == OUTPUT_ANY) {
++		NV_ERROR(dev, "unknown encoder!!\n");
++		return -1;
++	}
++
++	for (i = 0; i < dev_priv->vbios->dcb->entries; i++) {
++		struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i];
++
++		if (dcbent->type != type)
++			continue;
++
++		if (!(dcbent->or & (1 << or)))
++			continue;
++
++		*phead = head;
++		*pdcbent = dcbent;
++		return 0;
++	}
++
++	NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or);
++	return 0;
++}
++
++static uint32_t
++nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
++			   int pxclk)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_connector *nv_connector = NULL;
++	struct drm_encoder *encoder;
++	struct nvbios *bios = &dev_priv->VBIOS;
++	uint32_t mc, script = 0, or;
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++
++		if (nv_encoder->dcb != dcbent)
++			continue;
++
++		nv_connector = nouveau_encoder_connector_get(nv_encoder);
++		break;
++	}
++
++	or = ffs(dcbent->or) - 1;
++	mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
++	switch (dcbent->type) {
++	case OUTPUT_LVDS:
++		script = (mc >> 8) & 0xf;
++		if (bios->pub.fp_no_ddc) {
++			if (bios->fp.dual_link)
++				script |= 0x0100;
++			if (bios->fp.if_is_24bit)
++				script |= 0x0200;
++		} else {
++			if (pxclk >= bios->fp.duallink_transition_clk) {
++				script |= 0x0100;
++				if (bios->fp.strapless_is_24bit & 2)
++					script |= 0x0200;
++			} else
++			if (bios->fp.strapless_is_24bit & 1)
++				script |= 0x0200;
++
++			if (nv_connector && nv_connector->edid &&
++			    (nv_connector->edid->revision >= 4) &&
++			    (nv_connector->edid->input & 0x70) >= 0x20)
++				script |= 0x0200;
++		}
++
++		if (nouveau_uscript_lvds >= 0) {
++			NV_INFO(dev, "override script 0x%04x with 0x%04x "
++				     "for output LVDS-%d\n", script,
++				     nouveau_uscript_lvds, or);
++			script = nouveau_uscript_lvds;
++		}
++		break;
++	case OUTPUT_TMDS:
++		script = (mc >> 8) & 0xf;
++		if (pxclk >= 165000)
++			script |= 0x0100;
++
++		if (nouveau_uscript_tmds >= 0) {
++			NV_INFO(dev, "override script 0x%04x with 0x%04x "
++				     "for output TMDS-%d\n", script,
++				     nouveau_uscript_tmds, or);
++			script = nouveau_uscript_tmds;
++		}
++		break;
++	case OUTPUT_DP:
++		script = (mc >> 8) & 0xf;
++		break;
++	case OUTPUT_ANALOG:
++		script = 0xff;
++		break;
++	default:
++		NV_ERROR(dev, "modeset on unsupported output type!\n");
++		break;
++	}
++
++	return script;
++}
++
++static void
++nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan;
++	struct list_head *entry, *tmp;
++
++	list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
++		chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
++
++		nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
++						chan->nvsw.vblsem_rval);
++		list_del(&chan->nvsw.vbl_wait);
++	}
++}
++
++static void
++nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
++{
++	intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
++
++	if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
++		nv50_display_vblank_crtc_handler(dev, 0);
++
++	if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
++		nv50_display_vblank_crtc_handler(dev, 1);
++
++	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
++		     NV50_PDISPLAY_INTR_EN) & ~intr);
++	nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
++}
++
++static void
++nv50_display_unk10_handler(struct drm_device *dev)
++{
++	struct dcb_entry *dcbent;
++	int head, ret;
++
++	ret = nv50_display_irq_head(dev, &head, &dcbent);
++	if (ret)
++		goto ack;
++
++	nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
++
++	nouveau_bios_run_display_table(dev, dcbent, 0, -1);
++
++ack:
++	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
++	nv_wr32(dev, 0x610030, 0x80000000);
++}
++
++static void
++nv50_display_unk20_handler(struct drm_device *dev)
++{
++	struct dcb_entry *dcbent;
++	uint32_t tmp, pclk, script;
++	int head, or, ret;
++
++	ret = nv50_display_irq_head(dev, &head, &dcbent);
++	if (ret)
++		goto ack;
++	or = ffs(dcbent->or) - 1;
++	pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
++	script = nv50_display_script_select(dev, dcbent, pclk);
++
++	NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk);
++
++	if (dcbent->type != OUTPUT_DP)
++		nouveau_bios_run_display_table(dev, dcbent, 0, -2);
++
++	nv50_crtc_set_clock(dev, head, pclk);
++
++	nouveau_bios_run_display_table(dev, dcbent, script, pclk);
++
++	tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
++	tmp &= ~0x000000f;
++	nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
++
++	if (dcbent->type != OUTPUT_ANALOG) {
++		tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
++		tmp &= ~0x00000f0f;
++		if (script & 0x0100)
++			tmp |= 0x00000101;
++		nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
++	} else {
++		nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
++	}
++
++ack:
++	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
++	nv_wr32(dev, 0x610030, 0x80000000);
++}
++
++static void
++nv50_display_unk40_handler(struct drm_device *dev)
++{
++	struct dcb_entry *dcbent;
++	int head, pclk, script, ret;
++
++	ret = nv50_display_irq_head(dev, &head, &dcbent);
++	if (ret)
++		goto ack;
++	pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
++	script = nv50_display_script_select(dev, dcbent, pclk);
++
++	nouveau_bios_run_display_table(dev, dcbent, script, -pclk);
++
++ack:
++	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
++	nv_wr32(dev, 0x610030, 0x80000000);
++	nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
++}
++
++void
++nv50_display_irq_handler_bh(struct work_struct *work)
++{
++	struct drm_nouveau_private *dev_priv =
++		container_of(work, struct drm_nouveau_private, irq_work);
++	struct drm_device *dev = dev_priv->dev;
++
++	for (;;) {
++		uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
++		uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
++
++		NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
++
++		if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
++			nv50_display_unk10_handler(dev);
++		else
++		if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
++			nv50_display_unk20_handler(dev);
++		else
++		if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
++			nv50_display_unk40_handler(dev);
++		else
++			break;
++	}
++
++	nv_wr32(dev, NV03_PMC_INTR_EN_0, 1);
++}
++
++static void
++nv50_display_error_handler(struct drm_device *dev)
++{
++	uint32_t addr, data;
++
++	nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
++	addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
++	data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
++
++	NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
++		 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
++
++	nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
++}
++
++static void
++nv50_display_irq_hotplug(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct drm_connector *connector;
++	const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
++	uint32_t unplug_mask, plug_mask, change_mask;
++	uint32_t hpd0, hpd1 = 0;
++
++	hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
++	if (dev_priv->chipset >= 0x90)
++		hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
++
++	plug_mask   = (hpd0 & 0x0000ffff) | (hpd1 << 16);
++	unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
++	change_mask = plug_mask | unplug_mask;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		struct drm_encoder_helper_funcs *helper;
++		struct nouveau_connector *nv_connector =
++			nouveau_connector(connector);
++		struct nouveau_encoder *nv_encoder;
++		struct dcb_gpio_entry *gpio;
++		uint32_t reg;
++		bool plugged;
++
++		if (!nv_connector->dcb)
++			continue;
++
++		gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
++		if (!gpio || !(change_mask & (1 << gpio->line)))
++			continue;
++
++		reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
++		plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
++		NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
++			drm_get_connector_name(connector)) ;
++
++		if (!connector->encoder || !connector->encoder->crtc ||
++		    !connector->encoder->crtc->enabled)
++			continue;
++		nv_encoder = nouveau_encoder(connector->encoder);
++		helper = connector->encoder->helper_private;
++
++		if (nv_encoder->dcb->type != OUTPUT_DP)
++			continue;
++
++		if (plugged)
++			helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
++		else
++			helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
++	}
++
++	nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
++	if (dev_priv->chipset >= 0x90)
++		nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
++}
++
++void
++nv50_display_irq_handler(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t delayed = 0;
++
++	while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG)
++		nv50_display_irq_hotplug(dev);
++
++	while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
++		uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
++		uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
++		uint32_t clock;
++
++		NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
++
++		if (!intr0 && !(intr1 & ~delayed))
++			break;
++
++		if (intr0 & 0x00010000) {
++			nv50_display_error_handler(dev);
++			intr0 &= ~0x00010000;
++		}
++
++		if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
++			nv50_display_vblank_handler(dev, intr1);
++			intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
++		}
++
++		clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
++				  NV50_PDISPLAY_INTR_1_CLK_UNK20 |
++				  NV50_PDISPLAY_INTR_1_CLK_UNK40));
++		if (clock) {
++			nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
++			if (!work_pending(&dev_priv->irq_work))
++				queue_work(dev_priv->wq, &dev_priv->irq_work);
++			delayed |= clock;
++			intr1 &= ~clock;
++		}
++
++		if (intr0) {
++			NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
++			nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0);
++		}
++
++		if (intr1) {
++			NV_ERROR(dev,
++				 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
++			nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1);
++		}
++	}
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
+new file mode 100644
+index 0000000..3ae8d07
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv50_display.h
+@@ -0,0 +1,46 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __NV50_DISPLAY_H__
++#define __NV50_DISPLAY_H__
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++#include "nouveau_reg.h"
++#include "nouveau_crtc.h"
++#include "nv50_evo.h"
++
++void nv50_display_irq_handler(struct drm_device *dev);
++void nv50_display_irq_handler_bh(struct work_struct *work);
++int nv50_display_init(struct drm_device *dev);
++int nv50_display_create(struct drm_device *dev);
++int nv50_display_destroy(struct drm_device *dev);
++int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
++int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
++
++#endif /* __NV50_DISPLAY_H__ */
+diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
+new file mode 100644
+index 0000000..aae1334
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv50_evo.h
+@@ -0,0 +1,113 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#define NV50_EVO_UPDATE                                              0x00000080
++#define NV50_EVO_UNK84                                               0x00000084
++#define NV50_EVO_UNK84_NOTIFY                                        0x40000000
++#define NV50_EVO_UNK84_NOTIFY_DISABLED                               0x00000000
++#define NV50_EVO_UNK84_NOTIFY_ENABLED                                0x40000000
++#define NV50_EVO_DMA_NOTIFY                                          0x00000088
++#define NV50_EVO_DMA_NOTIFY_HANDLE                                   0xffffffff
++#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE                              0x00000000
++#define NV50_EVO_UNK8C                                               0x0000008C
++
++#define NV50_EVO_DAC(n, r)                       ((n) * 0x80 + NV50_EVO_DAC_##r)
++#define NV50_EVO_DAC_MODE_CTRL                                       0x00000400
++#define NV50_EVO_DAC_MODE_CTRL_CRTC0                                 0x00000001
++#define NV50_EVO_DAC_MODE_CTRL_CRTC1                                 0x00000002
++#define NV50_EVO_DAC_MODE_CTRL2                                      0x00000404
++#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC                               0x00000001
++#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC                               0x00000002
++
++#define NV50_EVO_SOR(n, r)                       ((n) * 0x40 + NV50_EVO_SOR_##r)
++#define NV50_EVO_SOR_MODE_CTRL                                       0x00000600
++#define NV50_EVO_SOR_MODE_CTRL_CRTC0                                 0x00000001
++#define NV50_EVO_SOR_MODE_CTRL_CRTC1                                 0x00000002
++#define NV50_EVO_SOR_MODE_CTRL_TMDS                                  0x00000100
++#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK                        0x00000400
++#define NV50_EVO_SOR_MODE_CTRL_NHSYNC                                0x00001000
++#define NV50_EVO_SOR_MODE_CTRL_NVSYNC                                0x00002000
++
++#define NV50_EVO_CRTC(n, r)                    ((n) * 0x400 + NV50_EVO_CRTC_##r)
++#define NV84_EVO_CRTC(n, r)                    ((n) * 0x400 + NV84_EVO_CRTC_##r)
++#define NV50_EVO_CRTC_UNK0800                                        0x00000800
++#define NV50_EVO_CRTC_CLOCK                                          0x00000804
++#define NV50_EVO_CRTC_INTERLACE                                      0x00000808
++#define NV50_EVO_CRTC_DISPLAY_START                                  0x00000810
++#define NV50_EVO_CRTC_DISPLAY_TOTAL                                  0x00000814
++#define NV50_EVO_CRTC_SYNC_DURATION                                  0x00000818
++#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END                        0x0000081c
++#define NV50_EVO_CRTC_UNK0820                                        0x00000820
++#define NV50_EVO_CRTC_UNK0824                                        0x00000824
++#define NV50_EVO_CRTC_UNK082C                                        0x0000082c
++#define NV50_EVO_CRTC_CLUT_MODE                                      0x00000840
++/* You can't have a palette in 8 bit mode (=OFF) */
++#define NV50_EVO_CRTC_CLUT_MODE_BLANK                                0x00000000
++#define NV50_EVO_CRTC_CLUT_MODE_OFF                                  0x80000000
++#define NV50_EVO_CRTC_CLUT_MODE_ON                                   0xC0000000
++#define NV50_EVO_CRTC_CLUT_OFFSET                                    0x00000844
++#define NV84_EVO_CRTC_CLUT_DMA                                       0x0000085C
++#define NV84_EVO_CRTC_CLUT_DMA_HANDLE                                0xffffffff
++#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE                           0x00000000
++#define NV50_EVO_CRTC_FB_OFFSET                                      0x00000860
++#define NV50_EVO_CRTC_FB_SIZE                                        0x00000868
++#define NV50_EVO_CRTC_FB_CONFIG                                      0x0000086c
++#define NV50_EVO_CRTC_FB_CONFIG_MODE                                 0x00100000
++#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE                            0x00000000
++#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH                           0x00100000
++#define NV50_EVO_CRTC_FB_DEPTH                                       0x00000870
++#define NV50_EVO_CRTC_FB_DEPTH_8                                     0x00001e00
++#define NV50_EVO_CRTC_FB_DEPTH_15                                    0x0000e900
++#define NV50_EVO_CRTC_FB_DEPTH_16                                    0x0000e800
++#define NV50_EVO_CRTC_FB_DEPTH_24                                    0x0000cf00
++#define NV50_EVO_CRTC_FB_DEPTH_30                                    0x0000d100
++#define NV50_EVO_CRTC_FB_DMA                                         0x00000874
++#define NV50_EVO_CRTC_FB_DMA_HANDLE                                  0xffffffff
++#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE                             0x00000000
++#define NV50_EVO_CRTC_CURSOR_CTRL                                    0x00000880
++#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE                               0x05000000
++#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW                               0x85000000
++#define NV50_EVO_CRTC_CURSOR_OFFSET                                  0x00000884
++#define NV84_EVO_CRTC_CURSOR_DMA                                     0x0000089c
++#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE                              0xffffffff
++#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE                         0x00000000
++#define NV50_EVO_CRTC_DITHER_CTRL                                    0x000008a0
++#define NV50_EVO_CRTC_DITHER_CTRL_OFF                                0x00000000
++#define NV50_EVO_CRTC_DITHER_CTRL_ON                                 0x00000011
++#define NV50_EVO_CRTC_SCALE_CTRL                                     0x000008a4
++#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE                            0x00000000
++#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE                              0x00000009
++#define NV50_EVO_CRTC_COLOR_CTRL                                     0x000008a8
++#define NV50_EVO_CRTC_COLOR_CTRL_COLOR                               0x00040000
++#define NV50_EVO_CRTC_FB_POS                                         0x000008c0
++#define NV50_EVO_CRTC_REAL_RES                                       0x000008c8
++#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET                            0x000008d4
++#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
++	((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
++/* Both of these are needed, otherwise nothing happens. */
++#define NV50_EVO_CRTC_SCALE_RES1                                     0x000008d8
++#define NV50_EVO_CRTC_SCALE_RES2                                     0x000008dc
++
+diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
+new file mode 100644
+index 0000000..0f57cdf
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
+@@ -0,0 +1,267 @@
++#include "drmP.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++#include "nouveau_fbcon.h"
++
++void
++nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
++{
++	struct nouveau_fbcon_par *par = info->par;
++	struct drm_device *dev = par->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = dev_priv->channel;
++
++	if (info->state != FBINFO_STATE_RUNNING)
++		return;
++
++	if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
++	     RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
++		nouveau_fbcon_gpu_lockup(info);
++	}
++
++	if (info->flags & FBINFO_HWACCEL_DISABLED) {
++		cfb_fillrect(info, rect);
++		return;
++	}
++
++	if (rect->rop != ROP_COPY) {
++		BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
++		OUT_RING(chan, 1);
++	}
++	BEGIN_RING(chan, NvSub2D, 0x0588, 1);
++	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
++	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
++		OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
++	else
++		OUT_RING(chan, rect->color);
++	BEGIN_RING(chan, NvSub2D, 0x0600, 4);
++	OUT_RING(chan, rect->dx);
++	OUT_RING(chan, rect->dy);
++	OUT_RING(chan, rect->dx + rect->width);
++	OUT_RING(chan, rect->dy + rect->height);
++	if (rect->rop != ROP_COPY) {
++		BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
++		OUT_RING(chan, 3);
++	}
++	FIRE_RING(chan);
++}
++
++void
++nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
++{
++	struct nouveau_fbcon_par *par = info->par;
++	struct drm_device *dev = par->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = dev_priv->channel;
++
++	if (info->state != FBINFO_STATE_RUNNING)
++		return;
++
++	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
++		nouveau_fbcon_gpu_lockup(info);
++	}
++
++	if (info->flags & FBINFO_HWACCEL_DISABLED) {
++		cfb_copyarea(info, region);
++		return;
++	}
++
++	BEGIN_RING(chan, NvSub2D, 0x0110, 1);
++	OUT_RING(chan, 0);
++	BEGIN_RING(chan, NvSub2D, 0x08b0, 4);
++	OUT_RING(chan, region->dx);
++	OUT_RING(chan, region->dy);
++	OUT_RING(chan, region->width);
++	OUT_RING(chan, region->height);
++	BEGIN_RING(chan, NvSub2D, 0x08d0, 4);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, region->sx);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, region->sy);
++	FIRE_RING(chan);
++}
++
++void
++nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
++{
++	struct nouveau_fbcon_par *par = info->par;
++	struct drm_device *dev = par->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = dev_priv->channel;
++	uint32_t width, dwords, *data = (uint32_t *)image->data;
++	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
++	uint32_t *palette = info->pseudo_palette;
++
++	if (info->state != FBINFO_STATE_RUNNING)
++		return;
++
++	if (image->depth != 1) {
++		cfb_imageblit(info, image);
++		return;
++	}
++
++	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
++		nouveau_fbcon_gpu_lockup(info);
++	}
++
++	if (info->flags & FBINFO_HWACCEL_DISABLED) {
++		cfb_imageblit(info, image);
++		return;
++	}
++
++	width = (image->width + 31) & ~31;
++	dwords = (width * image->height) >> 5;
++
++	BEGIN_RING(chan, NvSub2D, 0x0814, 2);
++	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
++	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++		OUT_RING(chan, palette[image->bg_color] | mask);
++		OUT_RING(chan, palette[image->fg_color] | mask);
++	} else {
++		OUT_RING(chan, image->bg_color);
++		OUT_RING(chan, image->fg_color);
++	}
++	BEGIN_RING(chan, NvSub2D, 0x0838, 2);
++	OUT_RING(chan, image->width);
++	OUT_RING(chan, image->height);
++	BEGIN_RING(chan, NvSub2D, 0x0850, 4);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, image->dx);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, image->dy);
++
++	while (dwords) {
++		int push = dwords > 2047 ? 2047 : dwords;
++
++		if (RING_SPACE(chan, push + 1)) {
++			nouveau_fbcon_gpu_lockup(info);
++			cfb_imageblit(info, image);
++			return;
++		}
++
++		dwords -= push;
++
++		BEGIN_RING(chan, NvSub2D, 0x40000860, push);
++		OUT_RINGp(chan, data, push);
++		data += push;
++	}
++
++	FIRE_RING(chan);
++}
++
++int
++nv50_fbcon_accel_init(struct fb_info *info)
++{
++	struct nouveau_fbcon_par *par = info->par;
++	struct drm_device *dev = par->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = dev_priv->channel;
++	struct nouveau_gpuobj *eng2d = NULL;
++	int ret, format;
++
++	switch (info->var.bits_per_pixel) {
++	case 8:
++		format = 0xf3;
++		break;
++	case 15:
++		format = 0xf8;
++		break;
++	case 16:
++		format = 0xe8;
++		break;
++	case 32:
++		switch (info->var.transp.length) {
++		case 0: /* depth 24 */
++		case 8: /* depth 32, just use 24.. */
++			format = 0xe6;
++			break;
++		case 2: /* depth 30 */
++			format = 0xd1;
++			break;
++		default:
++			return -EINVAL;
++		}
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
++	if (ret)
++		return ret;
++
++	ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL);
++	if (ret)
++		return ret;
++
++	ret = RING_SPACE(chan, 59);
++	if (ret) {
++		nouveau_fbcon_gpu_lockup(info);
++		return ret;
++	}
++
++	BEGIN_RING(chan, NvSub2D, 0x0000, 1);
++	OUT_RING(chan, Nv2D);
++	BEGIN_RING(chan, NvSub2D, 0x0180, 4);
++	OUT_RING(chan, NvNotify0);
++	OUT_RING(chan, chan->vram_handle);
++	OUT_RING(chan, chan->vram_handle);
++	OUT_RING(chan, chan->vram_handle);
++	BEGIN_RING(chan, NvSub2D, 0x0290, 1);
++	OUT_RING(chan, 0);
++	BEGIN_RING(chan, NvSub2D, 0x0888, 1);
++	OUT_RING(chan, 1);
++	BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
++	OUT_RING(chan, 3);
++	BEGIN_RING(chan, NvSub2D, 0x02a0, 1);
++	OUT_RING(chan, 0x55);
++	BEGIN_RING(chan, NvSub2D, 0x08c0, 4);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, 1);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, 1);
++	BEGIN_RING(chan, NvSub2D, 0x0580, 2);
++	OUT_RING(chan, 4);
++	OUT_RING(chan, format);
++	BEGIN_RING(chan, NvSub2D, 0x02e8, 2);
++	OUT_RING(chan, 2);
++	OUT_RING(chan, 1);
++	BEGIN_RING(chan, NvSub2D, 0x0804, 1);
++	OUT_RING(chan, format);
++	BEGIN_RING(chan, NvSub2D, 0x0800, 1);
++	OUT_RING(chan, 1);
++	BEGIN_RING(chan, NvSub2D, 0x0808, 3);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, 0);
++	BEGIN_RING(chan, NvSub2D, 0x081c, 1);
++	OUT_RING(chan, 1);
++	BEGIN_RING(chan, NvSub2D, 0x0840, 4);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, 1);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, 1);
++	BEGIN_RING(chan, NvSub2D, 0x0200, 2);
++	OUT_RING(chan, format);
++	OUT_RING(chan, 1);
++	BEGIN_RING(chan, NvSub2D, 0x0214, 5);
++	OUT_RING(chan, info->fix.line_length);
++	OUT_RING(chan, info->var.xres_virtual);
++	OUT_RING(chan, info->var.yres_virtual);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
++			 dev_priv->vm_vram_base);
++	BEGIN_RING(chan, NvSub2D, 0x0230, 2);
++	OUT_RING(chan, format);
++	OUT_RING(chan, 1);
++	BEGIN_RING(chan, NvSub2D, 0x0244, 5);
++	OUT_RING(chan, info->fix.line_length);
++	OUT_RING(chan, info->var.xres_virtual);
++	OUT_RING(chan, info->var.yres_virtual);
++	OUT_RING(chan, 0);
++	OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
++			 dev_priv->vm_vram_base);
++
++	return 0;
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
+new file mode 100644
+index 0000000..204a79f
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
+@@ -0,0 +1,495 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++struct nv50_fifo_priv {
++	struct nouveau_gpuobj_ref *thingo[2];
++	int cur_thingo;
++};
++
++#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
++
++static void
++nv50_fifo_init_thingo(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
++	struct nouveau_gpuobj_ref *cur;
++	int i, nr;
++
++	NV_DEBUG(dev, "\n");
++
++	cur = priv->thingo[priv->cur_thingo];
++	priv->cur_thingo = !priv->cur_thingo;
++
++	/* We never schedule channel 0 or 127 */
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	for (i = 1, nr = 0; i < 127; i++) {
++		if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
++			nv_wo32(dev, cur->gpuobj, nr++, i);
++	}
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nv_wr32(dev, 0x32f4, cur->instance >> 12);
++	nv_wr32(dev, 0x32ec, nr);
++	nv_wr32(dev, 0x2500, 0x101);
++}
++
++static int
++nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = dev_priv->fifos[channel];
++	uint32_t inst;
++
++	NV_DEBUG(dev, "ch%d\n", channel);
++
++	if (!chan->ramfc)
++		return -EINVAL;
++
++	if (IS_G80)
++		inst = chan->ramfc->instance >> 12;
++	else
++		inst = chan->ramfc->instance >> 8;
++	nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel),
++		 inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
++
++	if (!nt)
++		nv50_fifo_init_thingo(dev);
++	return 0;
++}
++
++static void
++nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t inst;
++
++	NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt);
++
++	if (IS_G80)
++		inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
++	else
++		inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
++	nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
++
++	if (!nt)
++		nv50_fifo_init_thingo(dev);
++}
++
++static void
++nv50_fifo_init_reset(struct drm_device *dev)
++{
++	uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
++
++	NV_DEBUG(dev, "\n");
++
++	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
++	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |  pmc_e);
++}
++
++static void
++nv50_fifo_init_intr(struct drm_device *dev)
++{
++	NV_DEBUG(dev, "\n");
++
++	nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
++	nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
++}
++
++static void
++nv50_fifo_init_context_table(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int i;
++
++	NV_DEBUG(dev, "\n");
++
++	for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
++		if (dev_priv->fifos[i])
++			nv50_fifo_channel_enable(dev, i, true);
++		else
++			nv50_fifo_channel_disable(dev, i, true);
++	}
++
++	nv50_fifo_init_thingo(dev);
++}
++
++static void
++nv50_fifo_init_regs__nv(struct drm_device *dev)
++{
++	NV_DEBUG(dev, "\n");
++
++	nv_wr32(dev, 0x250c, 0x6f3cfc34);
++}
++
++static void
++nv50_fifo_init_regs(struct drm_device *dev)
++{
++	NV_DEBUG(dev, "\n");
++
++	nv_wr32(dev, 0x2500, 0);
++	nv_wr32(dev, 0x3250, 0);
++	nv_wr32(dev, 0x3220, 0);
++	nv_wr32(dev, 0x3204, 0);
++	nv_wr32(dev, 0x3210, 0);
++	nv_wr32(dev, 0x3270, 0);
++
++	/* Enable dummy channels setup by nv50_instmem.c */
++	nv50_fifo_channel_enable(dev, 0, true);
++	nv50_fifo_channel_enable(dev, 127, true);
++}
++
++int
++nv50_fifo_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv50_fifo_priv *priv;
++	int ret;
++
++	NV_DEBUG(dev, "\n");
++
++	priv = dev_priv->engine.fifo.priv;
++	if (priv) {
++		priv->cur_thingo = !priv->cur_thingo;
++		goto just_reset;
++	}
++
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++	dev_priv->engine.fifo.priv = priv;
++
++	ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
++				     NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
++	if (ret) {
++		NV_ERROR(dev, "error creating thingo0: %d\n", ret);
++		return ret;
++	}
++
++	ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
++				     NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
++	if (ret) {
++		NV_ERROR(dev, "error creating thingo1: %d\n", ret);
++		return ret;
++	}
++
++just_reset:
++	nv50_fifo_init_reset(dev);
++	nv50_fifo_init_intr(dev);
++	nv50_fifo_init_context_table(dev);
++	nv50_fifo_init_regs__nv(dev);
++	nv50_fifo_init_regs(dev);
++	dev_priv->engine.fifo.enable(dev);
++	dev_priv->engine.fifo.reassign(dev, true);
++
++	return 0;
++}
++
++void
++nv50_fifo_takedown(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
++
++	NV_DEBUG(dev, "\n");
++
++	if (!priv)
++		return;
++
++	nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
++	nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
++
++	dev_priv->engine.fifo.priv = NULL;
++	kfree(priv);
++}
++
++int
++nv50_fifo_channel_id(struct drm_device *dev)
++{
++	return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
++			NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
++}
++
++int
++nv50_fifo_create_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *ramfc = NULL;
++	int ret;
++
++	NV_DEBUG(dev, "ch%d\n", chan->id);
++
++	if (IS_G80) {
++		uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
++		uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
++
++		ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
++					      0x100, NVOBJ_FLAG_ZERO_ALLOC |
++					      NVOBJ_FLAG_ZERO_FREE, &ramfc,
++					      &chan->ramfc);
++		if (ret)
++			return ret;
++
++		ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
++					      ramin_voffset + 0x0400, 4096,
++					      0, NULL, &chan->cache);
++		if (ret)
++			return ret;
++	} else {
++		ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
++					     NVOBJ_FLAG_ZERO_ALLOC |
++					     NVOBJ_FLAG_ZERO_FREE,
++					     &chan->ramfc);
++		if (ret)
++			return ret;
++		ramfc = chan->ramfc->gpuobj;
++
++		ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
++					     0, &chan->cache);
++		if (ret)
++			return ret;
++	}
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++
++	nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
++	nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
++	nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
++	nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
++	nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
++	nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
++	nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
++	nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
++	nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
++	nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
++	nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff);
++
++	if (!IS_G80) {
++		nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
++		nv_wo32(dev, chan->ramin->gpuobj, 1,
++						chan->ramfc->instance >> 8);
++
++		nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
++		nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
++	}
++
++	dev_priv->engine.instmem.finish_access(dev);
++
++	ret = nv50_fifo_channel_enable(dev, chan->id, false);
++	if (ret) {
++		NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
++		nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++		return ret;
++	}
++
++	return 0;
++}
++
++void
++nv50_fifo_destroy_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
++
++	NV_DEBUG(dev, "ch%d\n", chan->id);
++
++	/* This will ensure the channel is seen as disabled. */
++	chan->ramfc = NULL;
++	nv50_fifo_channel_disable(dev, chan->id, false);
++
++	/* Dummy channel, also used on ch 127 */
++	if (chan->id == 0)
++		nv50_fifo_channel_disable(dev, 127, false);
++
++	nouveau_gpuobj_ref_del(dev, &ramfc);
++	nouveau_gpuobj_ref_del(dev, &chan->cache);
++}
++
++int
++nv50_fifo_load_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
++	struct nouveau_gpuobj *cache = chan->cache->gpuobj;
++	int ptr, cnt;
++
++	NV_DEBUG(dev, "ch%d\n", chan->id);
++
++	dev_priv->engine.instmem.prepare_access(dev, false);
++
++	nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
++	nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
++	nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
++	nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
++	nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
++	nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
++	nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
++	nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
++	nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
++	nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
++	nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
++	nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
++	nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
++	nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
++	nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
++	nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
++	nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
++	nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
++	nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
++	nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
++	nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
++	nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
++	nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
++	nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
++	nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
++	nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
++	nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
++	nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
++	nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
++	nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
++	nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
++	nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
++	nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
++
++	cnt = nv_ro32(dev, ramfc, 0x84/4);
++	for (ptr = 0; ptr < cnt; ptr++) {
++		nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
++			nv_ro32(dev, cache, (ptr * 2) + 0));
++		nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
++			nv_ro32(dev, cache, (ptr * 2) + 1));
++	}
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
++	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
++
++	/* guessing that all the 0x34xx regs aren't on NV50 */
++	if (!IS_G80) {
++		nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
++		nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
++		nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
++		nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
++		nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
++	}
++
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
++	return 0;
++}
++
++int
++nv50_fifo_unload_context(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
++	struct nouveau_gpuobj *ramfc, *cache;
++	struct nouveau_channel *chan = NULL;
++	int chid, get, put, ptr;
++
++	NV_DEBUG(dev, "\n");
++
++	chid = pfifo->channel_id(dev);
++	if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
++		return 0;
++
++	chan = dev_priv->fifos[chid];
++	if (!chan) {
++		NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
++		return -EINVAL;
++	}
++	NV_DEBUG(dev, "ch%d\n", chan->id);
++	ramfc = chan->ramfc->gpuobj;
++	cache = chan->cache->gpuobj;
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++
++	nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
++	nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
++	nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
++	nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
++	nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
++	nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
++	nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
++	nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
++	nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
++	nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
++	nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
++	nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
++	nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
++	nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
++	nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
++	nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
++	nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
++	nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
++	nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
++	nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
++	nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
++	nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
++	nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
++	nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
++	nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
++	nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
++	nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
++	nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
++	nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
++	nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
++	nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
++	nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
++	nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
++
++	put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
++	get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
++	ptr = 0;
++	while (put != get) {
++		nv_wo32(dev, cache, ptr++,
++			    nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
++		nv_wo32(dev, cache, ptr++,
++			    nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
++		get = (get + 1) & 0x1ff;
++	}
++
++	/* guessing that all the 0x34xx regs aren't on NV50 */
++	if (!IS_G80) {
++		nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
++		nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
++		nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
++		nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
++		nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
++		nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
++	}
++
++	dev_priv->engine.instmem.finish_access(dev);
++
++	/*XXX: probably reload ch127 (NULL) state back too */
++	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
++	return 0;
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
+new file mode 100644
+index 0000000..6d50480
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv50_graph.c
+@@ -0,0 +1,394 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++MODULE_FIRMWARE("nouveau/nv50.ctxprog");
++MODULE_FIRMWARE("nouveau/nv50.ctxvals");
++MODULE_FIRMWARE("nouveau/nv84.ctxprog");
++MODULE_FIRMWARE("nouveau/nv84.ctxvals");
++MODULE_FIRMWARE("nouveau/nv86.ctxprog");
++MODULE_FIRMWARE("nouveau/nv86.ctxvals");
++MODULE_FIRMWARE("nouveau/nv92.ctxprog");
++MODULE_FIRMWARE("nouveau/nv92.ctxvals");
++MODULE_FIRMWARE("nouveau/nv94.ctxprog");
++MODULE_FIRMWARE("nouveau/nv94.ctxvals");
++MODULE_FIRMWARE("nouveau/nv96.ctxprog");
++MODULE_FIRMWARE("nouveau/nv96.ctxvals");
++MODULE_FIRMWARE("nouveau/nv98.ctxprog");
++MODULE_FIRMWARE("nouveau/nv98.ctxvals");
++MODULE_FIRMWARE("nouveau/nva0.ctxprog");
++MODULE_FIRMWARE("nouveau/nva0.ctxvals");
++MODULE_FIRMWARE("nouveau/nva5.ctxprog");
++MODULE_FIRMWARE("nouveau/nva5.ctxvals");
++MODULE_FIRMWARE("nouveau/nva8.ctxprog");
++MODULE_FIRMWARE("nouveau/nva8.ctxvals");
++MODULE_FIRMWARE("nouveau/nvaa.ctxprog");
++MODULE_FIRMWARE("nouveau/nvaa.ctxvals");
++MODULE_FIRMWARE("nouveau/nvac.ctxprog");
++MODULE_FIRMWARE("nouveau/nvac.ctxvals");
++
++#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
++
++static void
++nv50_graph_init_reset(struct drm_device *dev)
++{
++	uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
++
++	NV_DEBUG(dev, "\n");
++
++	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
++	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |  pmc_e);
++}
++
++static void
++nv50_graph_init_intr(struct drm_device *dev)
++{
++	NV_DEBUG(dev, "\n");
++
++	nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
++	nv_wr32(dev, 0x400138, 0xffffffff);
++	nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
++}
++
++static void
++nv50_graph_init_regs__nv(struct drm_device *dev)
++{
++	NV_DEBUG(dev, "\n");
++
++	nv_wr32(dev, 0x400804, 0xc0000000);
++	nv_wr32(dev, 0x406800, 0xc0000000);
++	nv_wr32(dev, 0x400c04, 0xc0000000);
++	nv_wr32(dev, 0x401800, 0xc0000000);
++	nv_wr32(dev, 0x405018, 0xc0000000);
++	nv_wr32(dev, 0x402000, 0xc0000000);
++
++	nv_wr32(dev, 0x400108, 0xffffffff);
++
++	nv_wr32(dev, 0x400824, 0x00004000);
++	nv_wr32(dev, 0x400500, 0x00010001);
++}
++
++static void
++nv50_graph_init_regs(struct drm_device *dev)
++{
++	NV_DEBUG(dev, "\n");
++
++	nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
++				(1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
++	nv_wr32(dev, 0x402ca8, 0x800);
++}
++
++static int
++nv50_graph_init_ctxctl(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	NV_DEBUG(dev, "\n");
++
++	nouveau_grctx_prog_load(dev);
++	if (!dev_priv->engine.graph.ctxprog)
++		dev_priv->engine.graph.accel_blocked = true;
++
++	nv_wr32(dev, 0x400320, 4);
++	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
++	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
++	return 0;
++}
++
++int
++nv50_graph_init(struct drm_device *dev)
++{
++	int ret;
++
++	NV_DEBUG(dev, "\n");
++
++	nv50_graph_init_reset(dev);
++	nv50_graph_init_regs__nv(dev);
++	nv50_graph_init_regs(dev);
++	nv50_graph_init_intr(dev);
++
++	ret = nv50_graph_init_ctxctl(dev);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
++void
++nv50_graph_takedown(struct drm_device *dev)
++{
++	NV_DEBUG(dev, "\n");
++	nouveau_grctx_fini(dev);
++}
++
++void
++nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
++{
++	const uint32_t mask = 0x00010001;
++
++	if (enabled)
++		nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
++	else
++		nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
++}
++
++struct nouveau_channel *
++nv50_graph_channel(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	uint32_t inst;
++	int i;
++
++	/* Be sure we're not in the middle of a context switch or bad things
++	 * will happen, such as unloading the wrong pgraph context.
++	 */
++	if (!nv_wait(0x400300, 0x00000001, 0x00000000))
++		NV_ERROR(dev, "Ctxprog is still running\n");
++
++	inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
++	if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
++		return NULL;
++	inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
++
++	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
++		struct nouveau_channel *chan = dev_priv->fifos[i];
++
++		if (chan && chan->ramin && chan->ramin->instance == inst)
++			return chan;
++	}
++
++	return NULL;
++}
++
++int
++nv50_graph_create_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
++	struct nouveau_gpuobj *ctx;
++	uint32_t grctx_size = 0x70000;
++	int hdr, ret;
++
++	NV_DEBUG(dev, "ch%d\n", chan->id);
++
++	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000,
++				     NVOBJ_FLAG_ZERO_ALLOC |
++				     NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
++	if (ret)
++		return ret;
++	ctx = chan->ramin_grctx->gpuobj;
++
++	hdr = IS_G80 ? 0x200 : 0x20;
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
++	nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
++					   grctx_size - 1);
++	nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
++	nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
++	nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
++	nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
++	dev_priv->engine.instmem.finish_access(dev);
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	nouveau_grctx_vals_load(dev, ctx);
++	nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
++	if ((dev_priv->chipset & 0xf0) == 0xa0)
++		nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
++	else
++		nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
++	dev_priv->engine.instmem.finish_access(dev);
++
++	return 0;
++}
++
++void
++nv50_graph_destroy_context(struct nouveau_channel *chan)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	int i, hdr = IS_G80 ? 0x200 : 0x20;
++
++	NV_DEBUG(dev, "ch%d\n", chan->id);
++
++	if (!chan->ramin || !chan->ramin->gpuobj)
++		return;
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	for (i = hdr; i < hdr + 24; i += 4)
++		nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
++}
++
++static int
++nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
++{
++	uint32_t fifo = nv_rd32(dev, 0x400500);
++
++	nv_wr32(dev, 0x400500, fifo & ~1);
++	nv_wr32(dev, 0x400784, inst);
++	nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
++	nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
++	nv_wr32(dev, 0x400040, 0xffffffff);
++	(void)nv_rd32(dev, 0x400040);
++	nv_wr32(dev, 0x400040, 0x00000000);
++	nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
++
++	if (nouveau_wait_for_idle(dev))
++		nv_wr32(dev, 0x40032c, inst | (1<<31));
++	nv_wr32(dev, 0x400500, fifo);
++
++	return 0;
++}
++
++int
++nv50_graph_load_context(struct nouveau_channel *chan)
++{
++	uint32_t inst = chan->ramin->instance >> 12;
++
++	NV_DEBUG(chan->dev, "ch%d\n", chan->id);
++	return nv50_graph_do_load_context(chan->dev, inst);
++}
++
++int
++nv50_graph_unload_context(struct drm_device *dev)
++{
++	uint32_t inst;
++
++	inst  = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
++	if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
++		return 0;
++	inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
++
++	nouveau_wait_for_idle(dev);
++	nv_wr32(dev, 0x400784, inst);
++	nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
++	nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
++	nouveau_wait_for_idle(dev);
++
++	nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
++	return 0;
++}
++
++void
++nv50_graph_context_switch(struct drm_device *dev)
++{
++	uint32_t inst;
++
++	nv50_graph_unload_context(dev);
++
++	inst  = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
++	inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
++	nv50_graph_do_load_context(dev, inst);
++
++	nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
++		NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
++}
++
++static int
++nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
++			   int mthd, uint32_t data)
++{
++	struct nouveau_gpuobj_ref *ref = NULL;
++
++	if (nouveau_gpuobj_ref_find(chan, data, &ref))
++		return -ENOENT;
++
++	if (nouveau_notifier_offset(ref->gpuobj, NULL))
++		return -EINVAL;
++
++	chan->nvsw.vblsem = ref->gpuobj;
++	chan->nvsw.vblsem_offset = ~0;
++	return 0;
++}
++
++static int
++nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
++			      int mthd, uint32_t data)
++{
++	if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
++		return -ERANGE;
++
++	chan->nvsw.vblsem_offset = data >> 2;
++	return 0;
++}
++
++static int
++nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
++				   int mthd, uint32_t data)
++{
++	chan->nvsw.vblsem_rval = data;
++	return 0;
++}
++
++static int
++nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
++			       int mthd, uint32_t data)
++{
++	struct drm_device *dev = chan->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
++		return -EINVAL;
++
++	if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
++		      NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
++		nv_wr32(dev, NV50_PDISPLAY_INTR_1,
++			NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
++		nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
++			NV50_PDISPLAY_INTR_EN) |
++			NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
++	}
++
++	list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
++	return 0;
++}
++
++static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
++	{ 0x018c, nv50_graph_nvsw_dma_vblsem },
++	{ 0x0400, nv50_graph_nvsw_vblsem_offset },
++	{ 0x0404, nv50_graph_nvsw_vblsem_release_val },
++	{ 0x0408, nv50_graph_nvsw_vblsem_release },
++	{}
++};
++
++struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
++	{ 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
++	{ 0x0030, false, NULL }, /* null */
++	{ 0x5039, false, NULL }, /* m2mf */
++	{ 0x502d, false, NULL }, /* 2d */
++	{ 0x50c0, false, NULL }, /* compute */
++	{ 0x5097, false, NULL }, /* tesla (nv50) */
++	{ 0x8297, false, NULL }, /* tesla (nv80/nv90) */
++	{ 0x8397, false, NULL }, /* tesla (nva0) */
++	{ 0x8597, false, NULL }, /* tesla (nva8) */
++	{}
++};
+diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
+new file mode 100644
+index 0000000..f0dc4e3
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
+@@ -0,0 +1,531 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ *
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++struct nv50_instmem_priv {
++	uint32_t save1700[5]; /* 0x1700->0x1710 */
++
++	struct nouveau_gpuobj_ref *pramin_pt;
++	struct nouveau_gpuobj_ref *pramin_bar;
++	struct nouveau_gpuobj_ref *fb_bar;
++
++	bool last_access_wr;
++};
++
++#define NV50_INSTMEM_PAGE_SHIFT 12
++#define NV50_INSTMEM_PAGE_SIZE  (1 << NV50_INSTMEM_PAGE_SHIFT)
++#define NV50_INSTMEM_PT_SIZE(a)	(((a) >> 12) << 3)
++
++/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
++ */
++#define BAR0_WI32(g, o, v) do {                                   \
++	uint32_t offset;                                          \
++	if ((g)->im_backing) {                                    \
++		offset = (g)->im_backing_start;                   \
++	} else {                                                  \
++		offset  = chan->ramin->gpuobj->im_backing_start;  \
++		offset += (g)->im_pramin->start;                  \
++	}                                                         \
++	offset += (o);                                            \
++	nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v));              \
++} while (0)
++
++int
++nv50_instmem_init(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan;
++	uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
++	struct nv50_instmem_priv *priv;
++	int ret, i;
++	uint32_t v, save_nv001700;
++
++	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++	if (!priv)
++		return -ENOMEM;
++	dev_priv->engine.instmem.priv = priv;
++
++	/* Save state, will restore at takedown. */
++	for (i = 0x1700; i <= 0x1710; i += 4)
++		priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
++
++	if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
++		dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
++	else
++		dev_priv->vram_sys_base = 0;
++
++	/* Reserve the last MiB of VRAM, we should probably try to avoid
++	 * setting up the below tables over the top of the VBIOS image at
++	 * some point.
++	 */
++	dev_priv->ramin_rsvd_vram = 1 << 20;
++	c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
++	c_size   = 128 << 10;
++	c_vmpd   = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
++	c_ramfc  = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
++	c_base   = c_vmpd + 0x4000;
++	pt_size  = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size);
++
++	NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset);
++	NV_DEBUG(dev, "    VBIOS image: 0x%08x\n",
++				(nv_rd32(dev, 0x619f04) & ~0xff) << 8);
++	NV_DEBUG(dev, "  Aperture size: %d MiB\n", dev_priv->ramin_size >> 20);
++	NV_DEBUG(dev, "        PT size: %d KiB\n", pt_size >> 10);
++
++	/* Determine VM layout, we need to do this first to make sure
++	 * we allocate enough memory for all the page tables.
++	 */
++	dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
++	dev_priv->vm_gart_size = NV50_VM_BLOCK;
++
++	dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
++	dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev);
++	if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
++		dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
++	dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
++	dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
++
++	dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
++
++	NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
++		 dev_priv->vm_gart_base,
++		 dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
++	NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
++		 dev_priv->vm_vram_base,
++		 dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
++
++	c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8);
++
++	/* Map BAR0 PRAMIN aperture over the memory we want to use */
++	save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN);
++	nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
++
++	/* Create a fake channel, and use it as our "dummy" channels 0/127.
++	 * The main reason for creating a channel is so we can use the gpuobj
++	 * code.  However, it's probably worth noting that NVIDIA also setup
++	 * their channels 0/127 with the same values they configure here.
++	 * So, there may be some other reason for doing this.
++	 *
++	 * Have to create the entire channel manually, as the real channel
++	 * creation code assumes we have PRAMIN access, and we don't until
++	 * we're done here.
++	 */
++	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
++	if (!chan)
++		return -ENOMEM;
++	chan->id = 0;
++	chan->dev = dev;
++	chan->file_priv = (struct drm_file *)-2;
++	dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
++
++	/* Channel's PRAMIN object + heap */
++	ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
++							NULL, &chan->ramin);
++	if (ret)
++		return ret;
++
++	if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
++		return -ENOMEM;
++
++	/* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
++	ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
++						0x4000, 0, NULL, &chan->ramfc);
++	if (ret)
++		return ret;
++
++	for (i = 0; i < c_vmpd; i += 4)
++		BAR0_WI32(chan->ramin->gpuobj, i, 0);
++
++	/* VM page directory */
++	ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
++					   0x4000, 0, &chan->vm_pd, NULL);
++	if (ret)
++		return ret;
++	for (i = 0; i < 0x4000; i += 8) {
++		BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
++		BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
++	}
++
++	/* PRAMIN page table, cheat and map into VM at 0x0000000000.
++	 * We map the entire fake channel into the start of the PRAMIN BAR
++	 */
++	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
++				     0, &priv->pramin_pt);
++	if (ret)
++		return ret;
++
++	v = c_offset | 1;
++	if (dev_priv->vram_sys_base) {
++		v += dev_priv->vram_sys_base;
++		v |= 0x30;
++	}
++
++	i = 0;
++	while (v < dev_priv->vram_sys_base + c_offset + c_size) {
++		BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
++		BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
++		v += 0x1000;
++		i += 8;
++	}
++
++	while (i < pt_size) {
++		BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
++		BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
++		i += 8;
++	}
++
++	BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
++	BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
++
++	/* VRAM page table(s), mapped into VM at +1GiB  */
++	for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
++		ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
++					     NV50_VM_BLOCK/65536*8, 0, 0,
++					     &chan->vm_vram_pt[i]);
++		if (ret) {
++			NV_ERROR(dev, "Error creating VRAM page tables: %d\n",
++									ret);
++			dev_priv->vm_vram_pt_nr = i;
++			return ret;
++		}
++		dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj;
++
++		for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size;
++								v += 4)
++			BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0);
++
++		BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
++			  chan->vm_vram_pt[i]->instance | 0x61);
++		BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
++	}
++
++	/* DMA object for PRAMIN BAR */
++	ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
++							&priv->pramin_bar);
++	if (ret)
++		return ret;
++	BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
++	BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
++	BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
++	BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
++	BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
++	BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
++
++	/* DMA object for FB BAR */
++	ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
++							&priv->fb_bar);
++	if (ret)
++		return ret;
++	BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
++	BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
++					      drm_get_resource_len(dev, 1) - 1);
++	BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
++	BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
++	BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
++	BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000);
++
++	/* Poke the relevant regs, and pray it works :) */
++	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
++	nv_wr32(dev, NV50_PUNK_UNK1710, 0);
++	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
++					 NV50_PUNK_BAR_CFG_BASE_VALID);
++	nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
++					NV50_PUNK_BAR1_CTXDMA_VALID);
++	nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
++					NV50_PUNK_BAR3_CTXDMA_VALID);
++
++	for (i = 0; i < 8; i++)
++		nv_wr32(dev, 0x1900 + (i*4), 0);
++
++	/* Assume that praying isn't enough, check that we can re-read the
++	 * entire fake channel back from the PRAMIN BAR */
++	dev_priv->engine.instmem.prepare_access(dev, false);
++	for (i = 0; i < c_size; i += 4) {
++		if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
++			NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
++									i);
++			dev_priv->engine.instmem.finish_access(dev);
++			return -EINVAL;
++		}
++	}
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
++
++	/* Global PRAMIN heap */
++	if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
++				  c_size, dev_priv->ramin_size - c_size)) {
++		dev_priv->ramin_heap = NULL;
++		NV_ERROR(dev, "Failed to init RAMIN heap\n");
++	}
++
++	/*XXX: incorrect, but needed to make hash func "work" */
++	dev_priv->ramht_offset = 0x10000;
++	dev_priv->ramht_bits   = 9;
++	dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
++	return 0;
++}
++
++void
++nv50_instmem_takedown(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
++	struct nouveau_channel *chan = dev_priv->fifos[0];
++	int i;
++
++	NV_DEBUG(dev, "\n");
++
++	if (!priv)
++		return;
++
++	/* Restore state from before init */
++	for (i = 0x1700; i <= 0x1710; i += 4)
++		nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
++
++	nouveau_gpuobj_ref_del(dev, &priv->fb_bar);
++	nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
++	nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
++
++	/* Destroy dummy channel */
++	if (chan) {
++		for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
++			nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
++			dev_priv->vm_vram_pt[i] = NULL;
++		}
++		dev_priv->vm_vram_pt_nr = 0;
++
++		nouveau_gpuobj_del(dev, &chan->vm_pd);
++		nouveau_gpuobj_ref_del(dev, &chan->ramfc);
++		nouveau_gpuobj_ref_del(dev, &chan->ramin);
++		nouveau_mem_takedown(&chan->ramin_heap);
++
++		dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
++		kfree(chan);
++	}
++
++	dev_priv->engine.instmem.priv = NULL;
++	kfree(priv);
++}
++
++int
++nv50_instmem_suspend(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *chan = dev_priv->fifos[0];
++	struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
++	int i;
++
++	ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size);
++	if (!ramin->im_backing_suspend)
++		return -ENOMEM;
++
++	for (i = 0; i < ramin->im_pramin->size; i += 4)
++		ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
++	return 0;
++}
++
++void
++nv50_instmem_resume(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
++	struct nouveau_channel *chan = dev_priv->fifos[0];
++	struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
++	int i;
++
++	nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16));
++	for (i = 0; i < ramin->im_pramin->size; i += 4)
++		BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]);
++	vfree(ramin->im_backing_suspend);
++	ramin->im_backing_suspend = NULL;
++
++	/* Poke the relevant regs, and pray it works :) */
++	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
++	nv_wr32(dev, NV50_PUNK_UNK1710, 0);
++	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
++					 NV50_PUNK_BAR_CFG_BASE_VALID);
++	nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
++					NV50_PUNK_BAR1_CTXDMA_VALID);
++	nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
++					NV50_PUNK_BAR3_CTXDMA_VALID);
++
++	for (i = 0; i < 8; i++)
++		nv_wr32(dev, 0x1900 + (i*4), 0);
++}
++
++int
++nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
++		      uint32_t *sz)
++{
++	int ret;
++
++	if (gpuobj->im_backing)
++		return -EINVAL;
++
++	*sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
++	if (*sz == 0)
++		return -EINVAL;
++
++	ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
++			     true, false, &gpuobj->im_backing);
++	if (ret) {
++		NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
++		return ret;
++	}
++
++	ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
++	if (ret) {
++		NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
++		nouveau_bo_ref(NULL, &gpuobj->im_backing);
++		return ret;
++	}
++
++	gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
++	gpuobj->im_backing_start <<= PAGE_SHIFT;
++
++	return 0;
++}
++
++void
++nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++
++	if (gpuobj && gpuobj->im_backing) {
++		if (gpuobj->im_bound)
++			dev_priv->engine.instmem.unbind(dev, gpuobj);
++		nouveau_bo_unpin(gpuobj->im_backing);
++		nouveau_bo_ref(NULL, &gpuobj->im_backing);
++		gpuobj->im_backing = NULL;
++	}
++}
++
++int
++nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
++	struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
++	uint32_t pte, pte_end;
++	uint64_t vram;
++
++	if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
++		return -EINVAL;
++
++	NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
++		 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
++
++	pte     = (gpuobj->im_pramin->start >> 12) << 1;
++	pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
++	vram    = gpuobj->im_backing_start;
++
++	NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
++		 gpuobj->im_pramin->start, pte, pte_end);
++	NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
++
++	vram |= 1;
++	if (dev_priv->vram_sys_base) {
++		vram += dev_priv->vram_sys_base;
++		vram |= 0x30;
++	}
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	while (pte < pte_end) {
++		nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
++		nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
++		vram += NV50_INSTMEM_PAGE_SIZE;
++	}
++	dev_priv->engine.instmem.finish_access(dev);
++
++	nv_wr32(dev, 0x100c80, 0x00040001);
++	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
++		NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n");
++		NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
++		return -EBUSY;
++	}
++
++	nv_wr32(dev, 0x100c80, 0x00060001);
++	if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
++		NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
++		NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
++		return -EBUSY;
++	}
++
++	gpuobj->im_bound = 1;
++	return 0;
++}
++
++int
++nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
++	uint32_t pte, pte_end;
++
++	if (gpuobj->im_bound == 0)
++		return -EINVAL;
++
++	pte     = (gpuobj->im_pramin->start >> 12) << 1;
++	pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
++
++	dev_priv->engine.instmem.prepare_access(dev, true);
++	while (pte < pte_end) {
++		nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
++		nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
++	}
++	dev_priv->engine.instmem.finish_access(dev);
++
++	gpuobj->im_bound = 0;
++	return 0;
++}
++
++void
++nv50_instmem_prepare_access(struct drm_device *dev, bool write)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
++
++	priv->last_access_wr = write;
++}
++
++void
++nv50_instmem_finish_access(struct drm_device *dev)
++{
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
++
++	if (priv->last_access_wr) {
++		nv_wr32(dev, 0x070000, 0x00000001);
++		if (!nv_wait(0x070000, 0x00000001, 0x00000000))
++			NV_ERROR(dev, "PRAMIN flush timeout\n");
++	}
++}
++
+diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c
+new file mode 100644
+index 0000000..e0a9c3f
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv50_mc.c
+@@ -0,0 +1,40 @@
++/*
++ * Copyright (C) 2007 Ben Skeggs.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm.h"
++#include "nouveau_drv.h"
++
++int
++nv50_mc_init(struct drm_device *dev)
++{
++	nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
++	return 0;
++}
++
++void nv50_mc_takedown(struct drm_device *dev)
++{
++}
+diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
+new file mode 100644
+index 0000000..c2fff54
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nv50_sor.c
+@@ -0,0 +1,323 @@
++/*
++ * Copyright (C) 2008 Maarten Maathuis.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#include "drmP.h"
++#include "drm_crtc_helper.h"
++
++#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
++#include "nouveau_reg.h"
++#include "nouveau_drv.h"
++#include "nouveau_dma.h"
++#include "nouveau_encoder.h"
++#include "nouveau_connector.h"
++#include "nouveau_crtc.h"
++#include "nv50_display.h"
++
++static void
++nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
++{
++	struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
++	struct drm_nouveau_private *dev_priv = dev->dev_private;
++	struct nouveau_channel *evo = dev_priv->evo;
++	int ret;
++
++	NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or);
++
++	ret = RING_SPACE(evo, 2);
++	if (ret) {
++		NV_ERROR(dev, "no space while disconnecting SOR\n");
++		return;
++	}
++	BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
++	OUT_RING(evo, 0);
++}
++
++static void
++nv50_sor_dp_link_train(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct bit_displayport_encoder_table *dpe;
++	int dpe_headerlen;
++
++	dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
++	if (!dpe) {
++		NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
++		return;
++	}
++
++	if (dpe->script0) {
++		NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
++		nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
++					    nv_encoder->dcb);
++	}
++
++	if (!nouveau_dp_link_train(encoder))
++		NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
++
++	if (dpe->script1) {
++		NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
++		nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
++					    nv_encoder->dcb);
++	}
++}
++
++static void
++nv50_sor_dpms(struct drm_encoder *encoder, int mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_encoder *enc;
++	uint32_t val;
++	int or = nv_encoder->or;
++
++	NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
++
++	nv_encoder->last_dpms = mode;
++	list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
++		struct nouveau_encoder *nvenc = nouveau_encoder(enc);
++
++		if (nvenc == nv_encoder ||
++		    nvenc->disconnect != nv50_sor_disconnect ||
++		    nvenc->dcb->or != nv_encoder->dcb->or)
++			continue;
++
++		if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
++			return;
++	}
++
++	/* wait for it to be done */
++	if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
++		     NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
++		NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
++		NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
++			 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
++	}
++
++	val = nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
++
++	if (mode == DRM_MODE_DPMS_ON)
++		val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
++	else
++		val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
++
++	nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
++		NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
++	if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or),
++		     NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
++		NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
++		NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
++			 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
++	}
++
++	if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON)
++		nv50_sor_dp_link_train(encoder);
++}
++
++static void
++nv50_sor_save(struct drm_encoder *encoder)
++{
++	NV_ERROR(encoder->dev, "!!\n");
++}
++
++static void
++nv50_sor_restore(struct drm_encoder *encoder)
++{
++	NV_ERROR(encoder->dev, "!!\n");
++}
++
++static bool
++nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
++		    struct drm_display_mode *adjusted_mode)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct nouveau_connector *connector;
++
++	NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
++
++	connector = nouveau_encoder_connector_get(nv_encoder);
++	if (!connector) {
++		NV_ERROR(encoder->dev, "Encoder has no connector\n");
++		return false;
++	}
++
++	if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
++	     connector->native_mode) {
++		int id = adjusted_mode->base.id;
++		*adjusted_mode = *connector->native_mode;
++		adjusted_mode->base.id = id;
++	}
++
++	return true;
++}
++
++static void
++nv50_sor_prepare(struct drm_encoder *encoder)
++{
++}
++
++static void
++nv50_sor_commit(struct drm_encoder *encoder)
++{
++}
++
++static void
++nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
++		  struct drm_display_mode *adjusted_mode)
++{
++	struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
++	struct nouveau_channel *evo = dev_priv->evo;
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++	struct drm_device *dev = encoder->dev;
++	struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
++	uint32_t mode_ctl = 0;
++	int ret;
++
++	NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
++
++	nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
++
++	switch (nv_encoder->dcb->type) {
++	case OUTPUT_TMDS:
++		if (nv_encoder->dcb->sorconf.link & 1) {
++			if (adjusted_mode->clock < 165000)
++				mode_ctl = 0x0100;
++			else
++				mode_ctl = 0x0500;
++		} else
++			mode_ctl = 0x0200;
++		break;
++	case OUTPUT_DP:
++		mode_ctl |= 0x00050000;
++		if (nv_encoder->dcb->sorconf.link & 1)
++			mode_ctl |= 0x00000800;
++		else
++			mode_ctl |= 0x00000900;
++		break;
++	default:
++		break;
++	}
++
++	if (crtc->index == 1)
++		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
++	else
++		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
++
++	if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
++		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
++
++	if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
++		mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
++
++	ret = RING_SPACE(evo, 2);
++	if (ret) {
++		NV_ERROR(dev, "no space while connecting SOR\n");
++		return;
++	}
++	BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
++	OUT_RING(evo, mode_ctl);
++}
++
++static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
++	.dpms = nv50_sor_dpms,
++	.save = nv50_sor_save,
++	.restore = nv50_sor_restore,
++	.mode_fixup = nv50_sor_mode_fixup,
++	.prepare = nv50_sor_prepare,
++	.commit = nv50_sor_commit,
++	.mode_set = nv50_sor_mode_set,
++	.detect = NULL
++};
++
++static void
++nv50_sor_destroy(struct drm_encoder *encoder)
++{
++	struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
++
++	if (!encoder)
++		return;
++
++	NV_DEBUG_KMS(encoder->dev, "\n");
++
++	drm_encoder_cleanup(encoder);
++
++	kfree(nv_encoder);
++}
++
++static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
++	.destroy = nv50_sor_destroy,
++};
++
++int
++nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
++{
++	struct nouveau_encoder *nv_encoder = NULL;
++	struct drm_encoder *encoder;
++	bool dum;
++	int type;
++
++	NV_DEBUG_KMS(dev, "\n");
++
++	switch (entry->type) {
++	case OUTPUT_TMDS:
++		NV_INFO(dev, "Detected a TMDS output\n");
++		type = DRM_MODE_ENCODER_TMDS;
++		break;
++	case OUTPUT_LVDS:
++		NV_INFO(dev, "Detected a LVDS output\n");
++		type = DRM_MODE_ENCODER_LVDS;
++
++		if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) {
++			NV_ERROR(dev, "Failed parsing LVDS table\n");
++			return -EINVAL;
++		}
++		break;
++	case OUTPUT_DP:
++		NV_INFO(dev, "Detected a DP output\n");
++		type = DRM_MODE_ENCODER_TMDS;
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
++	if (!nv_encoder)
++		return -ENOMEM;
++	encoder = to_drm_encoder(nv_encoder);
++
++	nv_encoder->dcb = entry;
++	nv_encoder->or = ffs(entry->or) - 1;
++
++	nv_encoder->disconnect = nv50_sor_disconnect;
++
++	drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
++	drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
++
++	encoder->possible_crtcs = entry->heads;
++	encoder->possible_clones = 0;
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
+new file mode 100644
+index 0000000..5998c35
+--- /dev/null
++++ b/drivers/gpu/drm/nouveau/nvreg.h
+@@ -0,0 +1,535 @@
++/* $XConsortium: nvreg.h /main/2 1996/10/28 05:13:41 kaleb $ */
++/*
++ * Copyright 1996-1997  David J. McKay
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * DAVID J. MCKAY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
++ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
++ * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ */
++
++/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nvreg.h,v 1.6 2002/01/25 21:56:06 tsi Exp $ */
++
++#ifndef __NVREG_H_
++#define __NVREG_H_
++
++#define NV_PMC_OFFSET               0x00000000
++#define NV_PMC_SIZE                 0x00001000
++
++#define NV_PBUS_OFFSET              0x00001000
++#define NV_PBUS_SIZE                0x00001000
++
++#define NV_PFIFO_OFFSET             0x00002000
++#define NV_PFIFO_SIZE               0x00002000
++
++#define NV_HDIAG_OFFSET             0x00005000
++#define NV_HDIAG_SIZE               0x00001000
++
++#define NV_PRAM_OFFSET              0x00006000
++#define NV_PRAM_SIZE                0x00001000
++
++#define NV_PVIDEO_OFFSET            0x00008000
++#define NV_PVIDEO_SIZE              0x00001000
++
++#define NV_PTIMER_OFFSET            0x00009000
++#define NV_PTIMER_SIZE              0x00001000
++
++#define NV_PPM_OFFSET               0x0000A000
++#define NV_PPM_SIZE                 0x00001000
++
++#define NV_PTV_OFFSET               0x0000D000
++#define NV_PTV_SIZE                 0x00001000
++
++#define NV_PRMVGA_OFFSET            0x000A0000
++#define NV_PRMVGA_SIZE              0x00020000
++
++#define NV_PRMVIO0_OFFSET           0x000C0000
++#define NV_PRMVIO_SIZE              0x00002000
++#define NV_PRMVIO1_OFFSET           0x000C2000
++
++#define NV_PFB_OFFSET               0x00100000
++#define NV_PFB_SIZE                 0x00001000
++
++#define NV_PEXTDEV_OFFSET           0x00101000
++#define NV_PEXTDEV_SIZE             0x00001000
++
++#define NV_PME_OFFSET               0x00200000
++#define NV_PME_SIZE                 0x00001000
++
++#define NV_PROM_OFFSET              0x00300000
++#define NV_PROM_SIZE                0x00010000
++
++#define NV_PGRAPH_OFFSET            0x00400000
++#define NV_PGRAPH_SIZE              0x00010000
++
++#define NV_PCRTC0_OFFSET            0x00600000
++#define NV_PCRTC0_SIZE              0x00002000 /* empirical */
++
++#define NV_PRMCIO0_OFFSET           0x00601000
++#define NV_PRMCIO_SIZE              0x00002000
++#define NV_PRMCIO1_OFFSET           0x00603000
++
++#define NV50_DISPLAY_OFFSET           0x00610000
++#define NV50_DISPLAY_SIZE             0x0000FFFF
++
++#define NV_PRAMDAC0_OFFSET          0x00680000
++#define NV_PRAMDAC0_SIZE            0x00002000
++
++#define NV_PRMDIO0_OFFSET           0x00681000
++#define NV_PRMDIO_SIZE              0x00002000
++#define NV_PRMDIO1_OFFSET           0x00683000
++
++#define NV_PRAMIN_OFFSET            0x00700000
++#define NV_PRAMIN_SIZE              0x00100000
++
++#define NV_FIFO_OFFSET              0x00800000
++#define NV_FIFO_SIZE                0x00800000
++
++#define NV_PMC_BOOT_0			0x00000000
++#define NV_PMC_ENABLE			0x00000200
++
++#define NV_VIO_VSE2			0x000003c3
++#define NV_VIO_SRX			0x000003c4
++
++#define NV_CIO_CRX__COLOR		0x000003d4
++#define NV_CIO_CR__COLOR		0x000003d5
++
++#define NV_PBUS_DEBUG_1			0x00001084
++#define NV_PBUS_DEBUG_4			0x00001098
++#define NV_PBUS_DEBUG_DUALHEAD_CTL	0x000010f0
++#define NV_PBUS_POWERCTRL_1		0x00001584
++#define NV_PBUS_POWERCTRL_2		0x00001588
++#define NV_PBUS_POWERCTRL_4		0x00001590
++#define NV_PBUS_PCI_NV_19		0x0000184C
++#define NV_PBUS_PCI_NV_20		0x00001850
++#	define NV_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED	(0 << 0)
++#	define NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED	(1 << 0)
++
++#define NV_PFIFO_RAMHT			0x00002210
++
++#define NV_PTV_TV_INDEX			0x0000d220
++#define NV_PTV_TV_DATA			0x0000d224
++#define NV_PTV_HFILTER			0x0000d310
++#define NV_PTV_HFILTER2			0x0000d390
++#define NV_PTV_VFILTER			0x0000d510
++
++#define NV_PRMVIO_MISC__WRITE		0x000c03c2
++#define NV_PRMVIO_SRX			0x000c03c4
++#define NV_PRMVIO_SR			0x000c03c5
++#	define NV_VIO_SR_RESET_INDEX		0x00
++#	define NV_VIO_SR_CLOCK_INDEX		0x01
++#	define NV_VIO_SR_PLANE_MASK_INDEX	0x02
++#	define NV_VIO_SR_CHAR_MAP_INDEX		0x03
++#	define NV_VIO_SR_MEM_MODE_INDEX		0x04
++#define NV_PRMVIO_MISC__READ		0x000c03cc
++#define NV_PRMVIO_GRX			0x000c03ce
++#define NV_PRMVIO_GX			0x000c03cf
++#	define NV_VIO_GX_SR_INDEX		0x00
++#	define NV_VIO_GX_SREN_INDEX		0x01
++#	define NV_VIO_GX_CCOMP_INDEX		0x02
++#	define NV_VIO_GX_ROP_INDEX		0x03
++#	define NV_VIO_GX_READ_MAP_INDEX		0x04
++#	define NV_VIO_GX_MODE_INDEX		0x05
++#	define NV_VIO_GX_MISC_INDEX		0x06
++#	define NV_VIO_GX_DONT_CARE_INDEX	0x07
++#	define NV_VIO_GX_BIT_MASK_INDEX		0x08
++
++#define NV_PFB_BOOT_0			0x00100000
++#define NV_PFB_CFG0			0x00100200
++#define NV_PFB_CFG1			0x00100204
++#define NV_PFB_CSTATUS			0x0010020C
++#define NV_PFB_REFCTRL			0x00100210
++#	define NV_PFB_REFCTRL_VALID_1			(1 << 31)
++#define NV_PFB_PAD			0x0010021C
++#	define NV_PFB_PAD_CKE_NORMAL			(1 << 0)
++#define NV_PFB_TILE_NV10		0x00100240
++#define NV_PFB_TILE_SIZE_NV10		0x00100244
++#define NV_PFB_REF			0x001002D0
++#	define NV_PFB_REF_CMD_REFRESH			(1 << 0)
++#define NV_PFB_PRE			0x001002D4
++#	define NV_PFB_PRE_CMD_PRECHARGE			(1 << 0)
++#define NV_PFB_CLOSE_PAGE2		0x0010033C
++#define NV_PFB_TILE_NV40		0x00100600
++#define NV_PFB_TILE_SIZE_NV40		0x00100604
++
++#define NV_PEXTDEV_BOOT_0		0x00101000
++#	define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT	(8 << 12)
++#define NV_PEXTDEV_BOOT_3		0x0010100c
++
++#define NV_PCRTC_INTR_0					0x00600100
++#	define NV_PCRTC_INTR_0_VBLANK				(1 << 0)
++#define NV_PCRTC_INTR_EN_0				0x00600140
++#define NV_PCRTC_START					0x00600800
++#define NV_PCRTC_CONFIG					0x00600804
++#	define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA		(1 << 0)
++#	define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC		(2 << 0)
++#define NV_PCRTC_CURSOR_CONFIG				0x00600810
++#	define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE		(1 << 0)
++#	define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE	(1 << 4)
++#	define NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM	(1 << 8)
++#	define NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32		(1 << 12)
++#	define NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64		(1 << 16)
++#	define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_32		(2 << 24)
++#	define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64		(4 << 24)
++#	define NV_PCRTC_CURSOR_CONFIG_CUR_BLEND_ALPHA		(1 << 28)
++
++/* note: PCRTC_GPIO is not available on nv10, and in fact aliases 0x600810 */
++#define NV_PCRTC_GPIO					0x00600818
++#define NV_PCRTC_GPIO_EXT				0x0060081c
++#define NV_PCRTC_830					0x00600830
++#define NV_PCRTC_834					0x00600834
++#define NV_PCRTC_850					0x00600850
++#define NV_PCRTC_ENGINE_CTRL				0x00600860
++#	define NV_CRTC_FSEL_I2C					(1 << 4)
++#	define NV_CRTC_FSEL_OVERLAY				(1 << 12)
++
++#define NV_PRMCIO_ARX			0x006013c0
++#define NV_PRMCIO_AR__WRITE		0x006013c0
++#define NV_PRMCIO_AR__READ		0x006013c1
++#	define NV_CIO_AR_MODE_INDEX		0x10
++#	define NV_CIO_AR_OSCAN_INDEX		0x11
++#	define NV_CIO_AR_PLANE_INDEX		0x12
++#	define NV_CIO_AR_HPP_INDEX		0x13
++#	define NV_CIO_AR_CSEL_INDEX		0x14
++#define NV_PRMCIO_INP0			0x006013c2
++#define NV_PRMCIO_CRX__COLOR		0x006013d4
++#define NV_PRMCIO_CR__COLOR		0x006013d5
++	/* Standard VGA CRTC registers */
++#	define NV_CIO_CR_HDT_INDEX		0x00	/* horizontal display total */
++#	define NV_CIO_CR_HDE_INDEX		0x01	/* horizontal display end */
++#	define NV_CIO_CR_HBS_INDEX		0x02	/* horizontal blanking start */
++#	define NV_CIO_CR_HBE_INDEX		0x03	/* horizontal blanking end */
++#		define NV_CIO_CR_HBE_4_0		4:0
++#	define NV_CIO_CR_HRS_INDEX		0x04	/* horizontal retrace start */
++#	define NV_CIO_CR_HRE_INDEX		0x05	/* horizontal retrace end */
++#		define NV_CIO_CR_HRE_4_0		4:0
++#		define NV_CIO_CR_HRE_HBE_5		7:7
++#	define NV_CIO_CR_VDT_INDEX		0x06	/* vertical display total */
++#	define NV_CIO_CR_OVL_INDEX		0x07	/* overflow bits */
++#		define NV_CIO_CR_OVL_VDT_8		0:0
++#		define NV_CIO_CR_OVL_VDE_8		1:1
++#		define NV_CIO_CR_OVL_VRS_8		2:2
++#		define NV_CIO_CR_OVL_VBS_8		3:3
++#		define NV_CIO_CR_OVL_VDT_9		5:5
++#		define NV_CIO_CR_OVL_VDE_9		6:6
++#		define NV_CIO_CR_OVL_VRS_9		7:7
++#	define NV_CIO_CR_RSAL_INDEX		0x08	/* normally "preset row scan" */
++#	define NV_CIO_CR_CELL_HT_INDEX		0x09	/* cell height?! normally "max scan line" */
++#		define NV_CIO_CR_CELL_HT_VBS_9		5:5
++#		define NV_CIO_CR_CELL_HT_SCANDBL	7:7
++#	define NV_CIO_CR_CURS_ST_INDEX		0x0a	/* cursor start */
++#	define NV_CIO_CR_CURS_END_INDEX		0x0b	/* cursor end */
++#	define NV_CIO_CR_SA_HI_INDEX		0x0c	/* screen start address high */
++#	define NV_CIO_CR_SA_LO_INDEX		0x0d	/* screen start address low */
++#	define NV_CIO_CR_TCOFF_HI_INDEX		0x0e	/* cursor offset high */
++#	define NV_CIO_CR_TCOFF_LO_INDEX		0x0f	/* cursor offset low */
++#	define NV_CIO_CR_VRS_INDEX		0x10	/* vertical retrace start */
++#	define NV_CIO_CR_VRE_INDEX		0x11	/* vertical retrace end */
++#		define NV_CIO_CR_VRE_3_0		3:0
++#	define NV_CIO_CR_VDE_INDEX		0x12	/* vertical display end */
++#	define NV_CIO_CR_OFFSET_INDEX		0x13	/* sets screen pitch */
++#	define NV_CIO_CR_ULINE_INDEX		0x14	/* underline location */
++#	define NV_CIO_CR_VBS_INDEX		0x15	/* vertical blank start */
++#	define NV_CIO_CR_VBE_INDEX		0x16	/* vertical blank end */
++#	define NV_CIO_CR_MODE_INDEX		0x17	/* crtc mode control */
++#	define NV_CIO_CR_LCOMP_INDEX		0x18	/* line compare */
++	/* Extended VGA CRTC registers */
++#	define NV_CIO_CRE_RPC0_INDEX		0x19	/* repaint control 0 */
++#		define NV_CIO_CRE_RPC0_OFFSET_10_8	7:5
++#	define NV_CIO_CRE_RPC1_INDEX		0x1a	/* repaint control 1 */
++#		define NV_CIO_CRE_RPC1_LARGE		2:2
++#	define NV_CIO_CRE_FF_INDEX		0x1b	/* fifo control */
++#	define NV_CIO_CRE_ENH_INDEX		0x1c	/* enhanced? */
++#	define NV_CIO_SR_LOCK_INDEX		0x1f	/* crtc lock */
++#		define NV_CIO_SR_UNLOCK_RW_VALUE	0x57
++#		define NV_CIO_SR_LOCK_VALUE		0x99
++#	define NV_CIO_CRE_FFLWM__INDEX		0x20	/* fifo low water mark */
++#	define NV_CIO_CRE_21			0x21	/* vga shadow crtc lock */
++#	define NV_CIO_CRE_LSR_INDEX		0x25	/* ? */
++#		define NV_CIO_CRE_LSR_VDT_10		0:0
++#		define NV_CIO_CRE_LSR_VDE_10		1:1
++#		define NV_CIO_CRE_LSR_VRS_10		2:2
++#		define NV_CIO_CRE_LSR_VBS_10		3:3
++#		define NV_CIO_CRE_LSR_HBE_6		4:4
++#	define NV_CIO_CR_ARX_INDEX		0x26	/* attribute index -- ro copy of 0x60.3c0 */
++#	define NV_CIO_CRE_CHIP_ID_INDEX		0x27	/* chip revision */
++#	define NV_CIO_CRE_PIXEL_INDEX		0x28
++#		define NV_CIO_CRE_PIXEL_FORMAT		1:0
++#	define NV_CIO_CRE_HEB__INDEX		0x2d	/* horizontal extra bits? */
++#		define NV_CIO_CRE_HEB_HDT_8		0:0
++#		define NV_CIO_CRE_HEB_HDE_8		1:1
++#		define NV_CIO_CRE_HEB_HBS_8		2:2
++#		define NV_CIO_CRE_HEB_HRS_8		3:3
++#		define NV_CIO_CRE_HEB_ILC_8		4:4
++#	define NV_CIO_CRE_2E			0x2e	/* some scratch or dummy reg to force writes to sink in */
++#	define NV_CIO_CRE_HCUR_ADDR2_INDEX	0x2f	/* cursor */
++#	define NV_CIO_CRE_HCUR_ADDR0_INDEX	0x30		/* pixmap */
++#		define NV_CIO_CRE_HCUR_ADDR0_ADR	6:0
++#		define NV_CIO_CRE_HCUR_ASI		7:7
++#	define NV_CIO_CRE_HCUR_ADDR1_INDEX	0x31			/* address */
++#		define NV_CIO_CRE_HCUR_ADDR1_ENABLE	0:0
++#		define NV_CIO_CRE_HCUR_ADDR1_CUR_DBL	1:1
++#		define NV_CIO_CRE_HCUR_ADDR1_ADR	7:2
++#	define NV_CIO_CRE_LCD__INDEX		0x33
++#		define NV_CIO_CRE_LCD_LCD_SELECT	0:0
++#	define NV_CIO_CRE_DDC0_STATUS__INDEX	0x36
++#	define NV_CIO_CRE_DDC0_WR__INDEX	0x37
++#	define NV_CIO_CRE_ILACE__INDEX		0x39	/* interlace */
++#	define NV_CIO_CRE_SCRATCH3__INDEX	0x3b
++#	define NV_CIO_CRE_SCRATCH4__INDEX	0x3c
++#	define NV_CIO_CRE_DDC_STATUS__INDEX	0x3e
++#	define NV_CIO_CRE_DDC_WR__INDEX		0x3f
++#	define NV_CIO_CRE_EBR_INDEX		0x41	/* extra bits ? (vertical) */
++#		define NV_CIO_CRE_EBR_VDT_11		0:0
++#		define NV_CIO_CRE_EBR_VDE_11		2:2
++#		define NV_CIO_CRE_EBR_VRS_11		4:4
++#		define NV_CIO_CRE_EBR_VBS_11		6:6
++#	define NV_CIO_CRE_43			0x43
++#	define NV_CIO_CRE_44			0x44	/* head control */
++#	define NV_CIO_CRE_CSB			0x45	/* colour saturation boost */
++#	define NV_CIO_CRE_RCR			0x46
++#		define NV_CIO_CRE_RCR_ENDIAN_BIG	7:7
++#	define NV_CIO_CRE_47			0x47	/* extended fifo lwm, used on nv30+ */
++#	define NV_CIO_CRE_49			0x49
++#	define NV_CIO_CRE_4B			0x4b	/* given patterns in 0x[2-3][a-c] regs, probably scratch 6 */
++#	define NV_CIO_CRE_TVOUT_LATENCY		0x52
++#	define NV_CIO_CRE_53			0x53	/* `fp_htiming' according to Haiku */
++#	define NV_CIO_CRE_54			0x54	/* `fp_vtiming' according to Haiku */
++#	define NV_CIO_CRE_57			0x57	/* index reg for cr58 */
++#	define NV_CIO_CRE_58			0x58	/* data reg for cr57 */
++#	define NV_CIO_CRE_59			0x59	/* related to on/off-chip-ness of digital outputs */
++#	define NV_CIO_CRE_5B			0x5B	/* newer colour saturation reg */
++#	define NV_CIO_CRE_85			0x85
++#	define NV_CIO_CRE_86			0x86
++#define NV_PRMCIO_INP0__COLOR		0x006013da
++
++#define NV_PRAMDAC_CU_START_POS				0x00680300
++#	define NV_PRAMDAC_CU_START_POS_X			15:0
++#	define NV_PRAMDAC_CU_START_POS_Y			31:16
++#define NV_RAMDAC_NV10_CURSYNC				0x00680404
++
++#define NV_PRAMDAC_NVPLL_COEFF				0x00680500
++#define NV_PRAMDAC_MPLL_COEFF				0x00680504
++#define NV_PRAMDAC_VPLL_COEFF				0x00680508
++#	define NV30_RAMDAC_ENABLE_VCO2				(8 << 4)
++
++#define NV_PRAMDAC_PLL_COEFF_SELECT			0x0068050c
++#	define NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE	(4 << 0)
++#	define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL	(1 << 8)
++#	define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL	(2 << 8)
++#	define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL	(4 << 8)
++#	define NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2	(8 << 8)
++#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1		(1 << 16)
++#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1		(2 << 16)
++#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2		(4 << 16)
++#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2		(8 << 16)
++#	define NV_PRAMDAC_PLL_COEFF_SELECT_TV_CLK_SOURCE_VIP	(1 << 20)
++#	define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2	(1 << 28)
++#	define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2	(2 << 28)
++
++#define NV_PRAMDAC_PLL_SETUP_CONTROL			0x00680510
++#define NV_RAMDAC_VPLL2					0x00680520
++#define NV_PRAMDAC_SEL_CLK				0x00680524
++#define NV_RAMDAC_DITHER_NV11				0x00680528
++#define NV_PRAMDAC_DACCLK				0x0068052c
++#	define NV_PRAMDAC_DACCLK_SEL_DACCLK			(1 << 0)
++
++#define NV_RAMDAC_NVPLL_B				0x00680570
++#define NV_RAMDAC_MPLL_B				0x00680574
++#define NV_RAMDAC_VPLL_B				0x00680578
++#define NV_RAMDAC_VPLL2_B				0x0068057c
++#	define NV31_RAMDAC_ENABLE_VCO2				(8 << 28)
++#define NV_PRAMDAC_580					0x00680580
++#	define NV_RAMDAC_580_VPLL1_ACTIVE			(1 << 8)
++#	define NV_RAMDAC_580_VPLL2_ACTIVE			(1 << 28)
++
++#define NV_PRAMDAC_GENERAL_CONTROL			0x00680600
++#	define NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON		(3 << 4)
++#	define NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL		(1 << 8)
++#	define NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL		(1 << 12)
++#	define NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM	(2 << 16)
++#	define NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS		(1 << 20)
++#	define NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG		(2 << 28)
++#define NV_PRAMDAC_TEST_CONTROL				0x00680608
++#	define NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED	(1 << 12)
++#	define NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF		(1 << 16)
++#	define NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI		(1 << 28)
++#define NV_PRAMDAC_TESTPOINT_DATA			0x00680610
++#	define NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK		(8 << 28)
++#define NV_PRAMDAC_630					0x00680630
++#define NV_PRAMDAC_634					0x00680634
++
++#define NV_PRAMDAC_TV_SETUP				0x00680700
++#define NV_PRAMDAC_TV_VTOTAL				0x00680720
++#define NV_PRAMDAC_TV_VSKEW				0x00680724
++#define NV_PRAMDAC_TV_VSYNC_DELAY			0x00680728
++#define NV_PRAMDAC_TV_HTOTAL				0x0068072c
++#define NV_PRAMDAC_TV_HSKEW				0x00680730
++#define NV_PRAMDAC_TV_HSYNC_DELAY			0x00680734
++#define NV_PRAMDAC_TV_HSYNC_DELAY2			0x00680738
++
++#define NV_PRAMDAC_TV_SETUP                             0x00680700
++
++#define NV_PRAMDAC_FP_VDISPLAY_END			0x00680800
++#define NV_PRAMDAC_FP_VTOTAL				0x00680804
++#define NV_PRAMDAC_FP_VCRTC				0x00680808
++#define NV_PRAMDAC_FP_VSYNC_START			0x0068080c
++#define NV_PRAMDAC_FP_VSYNC_END				0x00680810
++#define NV_PRAMDAC_FP_VVALID_START			0x00680814
++#define NV_PRAMDAC_FP_VVALID_END			0x00680818
++#define NV_PRAMDAC_FP_HDISPLAY_END			0x00680820
++#define NV_PRAMDAC_FP_HTOTAL				0x00680824
++#define NV_PRAMDAC_FP_HCRTC				0x00680828
++#define NV_PRAMDAC_FP_HSYNC_START			0x0068082c
++#define NV_PRAMDAC_FP_HSYNC_END				0x00680830
++#define NV_PRAMDAC_FP_HVALID_START			0x00680834
++#define NV_PRAMDAC_FP_HVALID_END			0x00680838
++
++#define NV_RAMDAC_FP_DITHER				0x0068083c
++#define NV_PRAMDAC_FP_TG_CONTROL			0x00680848
++#	define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS		(1 << 0)
++#	define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE		(2 << 0)
++#	define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS		(1 << 4)
++#	define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE		(2 << 4)
++#	define NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE		(0 << 8)
++#	define NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER		(1 << 8)
++#	define NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE		(2 << 8)
++#	define NV_PRAMDAC_FP_TG_CONTROL_READ_PROG		(1 << 20)
++#	define NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12		(1 << 24)
++#	define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS		(1 << 28)
++#	define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE		(2 << 28)
++#define NV_PRAMDAC_FP_MARGIN_COLOR			0x0068084c
++#define NV_PRAMDAC_850					0x00680850
++#define NV_PRAMDAC_85C					0x0068085c
++#define NV_PRAMDAC_FP_DEBUG_0				0x00680880
++#	define NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE		(1 << 0)
++#	define NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE		(1 << 4)
++/* This doesn't seem to be essential for tmds, but still often set */
++#	define NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED		(8 << 4)
++#	define NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR		(1 << 8)
++#	define NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR		(1 << 12)
++#	define NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND		(1 << 20)
++#	define NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND		(1 << 24)
++#       define NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK              (1 << 28)
++#define NV_PRAMDAC_FP_DEBUG_1				0x00680884
++#	define NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE		11:0
++#	define NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE	(1 << 12)
++#	define NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE		27:16
++#	define NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE	(1 << 28)
++#define NV_PRAMDAC_FP_DEBUG_2				0x00680888
++#define NV_PRAMDAC_FP_DEBUG_3				0x0068088C
++
++/* see NV_PRAMDAC_INDIR_TMDS in rules.xml */
++#define NV_PRAMDAC_FP_TMDS_CONTROL			0x006808b0
++#	define NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE		(1 << 16)
++#define NV_PRAMDAC_FP_TMDS_DATA				0x006808b4
++
++#define NV_PRAMDAC_8C0                                  0x006808c0
++
++/* Some kind of switch */
++#define NV_PRAMDAC_900					0x00680900
++#define NV_PRAMDAC_A20					0x00680A20
++#define NV_PRAMDAC_A24					0x00680A24
++#define NV_PRAMDAC_A34					0x00680A34
++
++#define NV_PRAMDAC_CTV					0x00680c00
++
++/* names fabricated from NV_USER_DAC info */
++#define NV_PRMDIO_PIXEL_MASK		0x006813c6
++#	define NV_PRMDIO_PIXEL_MASK_MASK	0xff
++#define NV_PRMDIO_READ_MODE_ADDRESS	0x006813c7
++#define NV_PRMDIO_WRITE_MODE_ADDRESS	0x006813c8
++#define NV_PRMDIO_PALETTE_DATA		0x006813c9
++
++#define NV_PGRAPH_DEBUG_0		0x00400080
++#define NV_PGRAPH_DEBUG_1		0x00400084
++#define NV_PGRAPH_DEBUG_2_NV04		0x00400088
++#define NV_PGRAPH_DEBUG_2		0x00400620
++#define NV_PGRAPH_DEBUG_3		0x0040008c
++#define NV_PGRAPH_DEBUG_4		0x00400090
++#define NV_PGRAPH_INTR			0x00400100
++#define NV_PGRAPH_INTR_EN		0x00400140
++#define NV_PGRAPH_CTX_CONTROL		0x00400144
++#define NV_PGRAPH_CTX_CONTROL_NV04	0x00400170
++#define NV_PGRAPH_ABS_UCLIP_XMIN	0x0040053C
++#define NV_PGRAPH_ABS_UCLIP_YMIN	0x00400540
++#define NV_PGRAPH_ABS_UCLIP_XMAX	0x00400544
++#define NV_PGRAPH_ABS_UCLIP_YMAX	0x00400548
++#define NV_PGRAPH_BETA_AND		0x00400608
++#define NV_PGRAPH_LIMIT_VIOL_PIX	0x00400610
++#define NV_PGRAPH_BOFFSET0		0x00400640
++#define NV_PGRAPH_BOFFSET1		0x00400644
++#define NV_PGRAPH_BOFFSET2		0x00400648
++#define NV_PGRAPH_BLIMIT0		0x00400684
++#define NV_PGRAPH_BLIMIT1		0x00400688
++#define NV_PGRAPH_BLIMIT2		0x0040068c
++#define NV_PGRAPH_STATUS		0x00400700
++#define NV_PGRAPH_SURFACE		0x00400710
++#define NV_PGRAPH_STATE			0x00400714
++#define NV_PGRAPH_FIFO			0x00400720
++#define NV_PGRAPH_PATTERN_SHAPE		0x00400810
++#define NV_PGRAPH_TILE			0x00400b00
++
++#define NV_PVIDEO_INTR_EN		0x00008140
++#define NV_PVIDEO_BUFFER		0x00008700
++#define NV_PVIDEO_STOP			0x00008704
++#define NV_PVIDEO_UVPLANE_BASE(buff)	(0x00008800+(buff)*4)
++#define NV_PVIDEO_UVPLANE_LIMIT(buff)	(0x00008808+(buff)*4)
++#define NV_PVIDEO_UVPLANE_OFFSET_BUFF(buff)	(0x00008820+(buff)*4)
++#define NV_PVIDEO_BASE(buff)		(0x00008900+(buff)*4)
++#define NV_PVIDEO_LIMIT(buff)		(0x00008908+(buff)*4)
++#define NV_PVIDEO_LUMINANCE(buff)	(0x00008910+(buff)*4)
++#define NV_PVIDEO_CHROMINANCE(buff)	(0x00008918+(buff)*4)
++#define NV_PVIDEO_OFFSET_BUFF(buff)	(0x00008920+(buff)*4)
++#define NV_PVIDEO_SIZE_IN(buff)		(0x00008928+(buff)*4)
++#define NV_PVIDEO_POINT_IN(buff)	(0x00008930+(buff)*4)
++#define NV_PVIDEO_DS_DX(buff)		(0x00008938+(buff)*4)
++#define NV_PVIDEO_DT_DY(buff)		(0x00008940+(buff)*4)
++#define NV_PVIDEO_POINT_OUT(buff)	(0x00008948+(buff)*4)
++#define NV_PVIDEO_SIZE_OUT(buff)	(0x00008950+(buff)*4)
++#define NV_PVIDEO_FORMAT(buff)		(0x00008958+(buff)*4)
++#	define NV_PVIDEO_FORMAT_PLANAR			(1 << 0)
++#	define NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8	(1 << 16)
++#	define NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY	(1 << 20)
++#	define NV_PVIDEO_FORMAT_MATRIX_ITURBT709	(1 << 24)
++#define NV_PVIDEO_COLOR_KEY		0x00008B00
++
++/* NV04 overlay defines from VIDIX & Haiku */
++#define NV_PVIDEO_INTR_EN_0		0x00680140
++#define NV_PVIDEO_STEP_SIZE		0x00680200
++#define NV_PVIDEO_CONTROL_Y		0x00680204
++#define NV_PVIDEO_CONTROL_X		0x00680208
++#define NV_PVIDEO_BUFF0_START_ADDRESS	0x0068020c
++#define NV_PVIDEO_BUFF0_PITCH_LENGTH	0x00680214
++#define NV_PVIDEO_BUFF0_OFFSET		0x0068021c
++#define NV_PVIDEO_BUFF1_START_ADDRESS	0x00680210
++#define NV_PVIDEO_BUFF1_PITCH_LENGTH	0x00680218
++#define NV_PVIDEO_BUFF1_OFFSET		0x00680220
++#define NV_PVIDEO_OE_STATE		0x00680224
++#define NV_PVIDEO_SU_STATE		0x00680228
++#define NV_PVIDEO_RM_STATE		0x0068022c
++#define NV_PVIDEO_WINDOW_START		0x00680230
++#define NV_PVIDEO_WINDOW_SIZE		0x00680234
++#define NV_PVIDEO_FIFO_THRES_SIZE	0x00680238
++#define NV_PVIDEO_FIFO_BURST_LENGTH	0x0068023c
++#define NV_PVIDEO_KEY			0x00680240
++#define NV_PVIDEO_OVERLAY		0x00680244
++#define NV_PVIDEO_RED_CSC_OFFSET	0x00680280
++#define NV_PVIDEO_GREEN_CSC_OFFSET	0x00680284
++#define NV_PVIDEO_BLUE_CSC_OFFSET	0x00680288
++#define NV_PVIDEO_CSC_ADJUST		0x0068028c
++
++#endif
+diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
+index 601f4c0..b806fdc 100644
+--- a/drivers/gpu/drm/r128/r128_drv.c
++++ b/drivers/gpu/drm/r128/r128_drv.c
+@@ -64,7 +64,7 @@ static struct drm_driver driver = {
+ 		.owner = THIS_MODULE,
+ 		.open = drm_open,
+ 		.release = drm_release,
+-		.ioctl = drm_ioctl,
++		.unlocked_ioctl = drm_ioctl,
+ 		.mmap = drm_mmap,
+ 		.poll = drm_poll,
+ 		.fasync = drm_fasync,
+diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
+index d3cb676..51c99fc 100644
+--- a/drivers/gpu/drm/r128/r128_ioc32.c
++++ b/drivers/gpu/drm/r128/r128_ioc32.c
+@@ -95,8 +95,7 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
+ 			  &init->agp_textures_offset))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_R128_INIT, (unsigned long)init);
++	return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init);
+ }
+ 
+ typedef struct drm_r128_depth32 {
+@@ -129,8 +128,7 @@ static int compat_r128_depth(struct file *file, unsigned int cmd,
+ 			  &depth->mask))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
++	return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
+ 
+ }
+ 
+@@ -153,8 +151,7 @@ static int compat_r128_stipple(struct file *file, unsigned int cmd,
+ 			  &stipple->mask))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
++	return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
+ }
+ 
+ typedef struct drm_r128_getparam32 {
+@@ -178,8 +175,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
+ 			  &getparam->value))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
++	return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
+ }
+ 
+ drm_ioctl_compat_t *r128_compat_ioctls[] = {
+@@ -210,12 +206,10 @@ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
+ 		fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
+ 
+-	lock_kernel();		/* XXX for now */
+ 	if (fn != NULL)
+ 		ret = (*fn) (filp, cmd, arg);
+ 	else
+-		ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+-	unlock_kernel();
++		ret = drm_ioctl(filp, cmd, arg);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
+index 5982321..1c02d23 100644
+--- a/drivers/gpu/drm/radeon/Kconfig
++++ b/drivers/gpu/drm/radeon/Kconfig
+@@ -1,10 +1,14 @@
+ config DRM_RADEON_KMS
+-	bool "Enable modesetting on radeon by default"
++	bool "Enable modesetting on radeon by default - NEW DRIVER"
+ 	depends on DRM_RADEON
+ 	help
+-	  Choose this option if you want kernel modesetting enabled by default,
+-	  and you have a new enough userspace to support this. Running old
+-	  userspaces with this enabled will cause pain.
++	  Choose this option if you want kernel modesetting enabled by default.
++
++	  This is a completely new driver. It's only part of the existing drm
++	  for compatibility reasons. It requires an entirely different graphics
++	  stack above it and works very differently from the old drm stack.
++	  i.e. don't enable this unless you know what you are doing it may
++	  cause issues or bugs compared to the previous userspace driver stack.
+ 
+ 	  When kernel modesetting is enabled the IOCTL of radeon/drm
+ 	  driver are considered as invalid and an error message is printed
+diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
+index b5713ee..1cc7b93 100644
+--- a/drivers/gpu/drm/radeon/Makefile
++++ b/drivers/gpu/drm/radeon/Makefile
+@@ -24,6 +24,9 @@ $(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
+ $(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
+ 	$(call if_changed,mkregtable)
+ 
++$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
++	$(call if_changed,mkregtable)
++
+ $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
+ 	$(call if_changed,mkregtable)
+ 
+@@ -35,6 +38,8 @@ $(obj)/rv515.o: $(obj)/rv515_reg_safe.h
+ 
+ $(obj)/r300.o: $(obj)/r300_reg_safe.h
+ 
++$(obj)/r420.o: $(obj)/r420_reg_safe.h
++
+ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h
+ 
+ radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
+@@ -49,7 +54,7 @@ radeon-y += radeon_device.o radeon_kms.o \
+ 	radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
+ 	rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
+ 	r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
+-	r600_blit_kms.o radeon_pm.o
++	r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o
+ 
+ radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
+ 
+diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
+index 6d0183c..c714179 100644
+--- a/drivers/gpu/drm/radeon/ObjectID.h
++++ b/drivers/gpu/drm/radeon/ObjectID.h
+@@ -1,5 +1,5 @@
+ /*
+-* Copyright 2006-2007 Advanced Micro Devices, Inc.
++* Copyright 2006-2007 Advanced Micro Devices, Inc.  
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+@@ -41,14 +41,14 @@
+ /****************************************************/
+ /* Encoder Object ID Definition                     */
+ /****************************************************/
+-#define ENCODER_OBJECT_ID_NONE                    0x00
++#define ENCODER_OBJECT_ID_NONE                    0x00 
+ 
+ /* Radeon Class Display Hardware */
+ #define ENCODER_OBJECT_ID_INTERNAL_LVDS           0x01
+ #define ENCODER_OBJECT_ID_INTERNAL_TMDS1          0x02
+ #define ENCODER_OBJECT_ID_INTERNAL_TMDS2          0x03
+ #define ENCODER_OBJECT_ID_INTERNAL_DAC1           0x04
+-#define ENCODER_OBJECT_ID_INTERNAL_DAC2           0x05	/* TV/CV DAC */
++#define ENCODER_OBJECT_ID_INTERNAL_DAC2           0x05     /* TV/CV DAC */
+ #define ENCODER_OBJECT_ID_INTERNAL_SDVOA          0x06
+ #define ENCODER_OBJECT_ID_INTERNAL_SDVOB          0x07
+ 
+@@ -56,11 +56,11 @@
+ #define ENCODER_OBJECT_ID_SI170B                  0x08
+ #define ENCODER_OBJECT_ID_CH7303                  0x09
+ #define ENCODER_OBJECT_ID_CH7301                  0x0A
+-#define ENCODER_OBJECT_ID_INTERNAL_DVO1           0x0B	/* This belongs to Radeon Class Display Hardware */
++#define ENCODER_OBJECT_ID_INTERNAL_DVO1           0x0B    /* This belongs to Radeon Class Display Hardware */
+ #define ENCODER_OBJECT_ID_EXTERNAL_SDVOA          0x0C
+ #define ENCODER_OBJECT_ID_EXTERNAL_SDVOB          0x0D
+ #define ENCODER_OBJECT_ID_TITFP513                0x0E
+-#define ENCODER_OBJECT_ID_INTERNAL_LVTM1          0x0F	/* not used for Radeon */
++#define ENCODER_OBJECT_ID_INTERNAL_LVTM1          0x0F    /* not used for Radeon */
+ #define ENCODER_OBJECT_ID_VT1623                  0x10
+ #define ENCODER_OBJECT_ID_HDMI_SI1930             0x11
+ #define ENCODER_OBJECT_ID_HDMI_INTERNAL           0x12
+@@ -68,9 +68,9 @@
+ #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1   0x13
+ #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1    0x14
+ #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1    0x15
+-#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2    0x16	/* Shared with CV/TV and CRT */
+-#define ENCODER_OBJECT_ID_SI178                   0X17	/* External TMDS (dual link, no HDCP.) */
+-#define ENCODER_OBJECT_ID_MVPU_FPGA               0x18	/* MVPU FPGA chip */
++#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2    0x16  /* Shared with CV/TV and CRT */
++#define ENCODER_OBJECT_ID_SI178                   0X17  /* External TMDS (dual link, no HDCP.) */
++#define ENCODER_OBJECT_ID_MVPU_FPGA               0x18  /* MVPU FPGA chip */
+ #define ENCODER_OBJECT_ID_INTERNAL_DDI            0x19
+ #define ENCODER_OBJECT_ID_VT1625                  0x1A
+ #define ENCODER_OBJECT_ID_HDMI_SI1932             0x1B
+@@ -86,7 +86,7 @@
+ /****************************************************/
+ /* Connector Object ID Definition                   */
+ /****************************************************/
+-#define CONNECTOR_OBJECT_ID_NONE                  0x00
++#define CONNECTOR_OBJECT_ID_NONE                  0x00 
+ #define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I     0x01
+ #define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I       0x02
+ #define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D     0x03
+@@ -96,7 +96,7 @@
+ #define CONNECTOR_OBJECT_ID_SVIDEO                0x07
+ #define CONNECTOR_OBJECT_ID_YPbPr                 0x08
+ #define CONNECTOR_OBJECT_ID_D_CONNECTOR           0x09
+-#define CONNECTOR_OBJECT_ID_9PIN_DIN              0x0A	/* Supports both CV & TV */
++#define CONNECTOR_OBJECT_ID_9PIN_DIN              0x0A  /* Supports both CV & TV */
+ #define CONNECTOR_OBJECT_ID_SCART                 0x0B
+ #define CONNECTOR_OBJECT_ID_HDMI_TYPE_A           0x0C
+ #define CONNECTOR_OBJECT_ID_HDMI_TYPE_B           0x0D
+@@ -106,6 +106,8 @@
+ #define CONNECTOR_OBJECT_ID_CROSSFIRE             0x11
+ #define CONNECTOR_OBJECT_ID_HARDCODE_DVI          0x12
+ #define CONNECTOR_OBJECT_ID_DISPLAYPORT           0x13
++#define CONNECTOR_OBJECT_ID_eDP                   0x14
++#define CONNECTOR_OBJECT_ID_MXM                   0x15
+ 
+ /* deleted */
+ 
+@@ -116,6 +118,14 @@
+ #define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL				0x01
+ 
+ /****************************************************/
++/* Generic Object ID Definition                     */
++/****************************************************/
++#define GENERIC_OBJECT_ID_NONE                    0x00
++#define GENERIC_OBJECT_ID_GLSYNC                  0x01
++#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE        0x02
++#define GENERIC_OBJECT_ID_MXM_OPM                 0x03
++
++/****************************************************/
+ /* Graphics Object ENUM ID Definition               */
+ /****************************************************/
+ #define GRAPH_OBJECT_ENUM_ID1                     0x01
+@@ -124,6 +134,7 @@
+ #define GRAPH_OBJECT_ENUM_ID4                     0x04
+ #define GRAPH_OBJECT_ENUM_ID5                     0x05
+ #define GRAPH_OBJECT_ENUM_ID6                     0x06
++#define GRAPH_OBJECT_ENUM_ID7                     0x07
+ 
+ /****************************************************/
+ /* Graphics Object ID Bit definition                */
+@@ -133,35 +144,35 @@
+ #define RESERVED1_ID_MASK                         0x0800
+ #define OBJECT_TYPE_MASK                          0x7000
+ #define RESERVED2_ID_MASK                         0x8000
+-
++                                                  
+ #define OBJECT_ID_SHIFT                           0x00
+ #define ENUM_ID_SHIFT                             0x08
+ #define OBJECT_TYPE_SHIFT                         0x0C
+ 
++
+ /****************************************************/
+ /* Graphics Object family definition                */
+ /****************************************************/
+-#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \
+-	(GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
+-	 GRAPHICS_OBJECT_ID   << OBJECT_ID_SHIFT)
++#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
++                                                                           GRAPHICS_OBJECT_ID   << OBJECT_ID_SHIFT)
+ /****************************************************/
+ /* GPU Object ID definition - Shared with BIOS      */
+ /****************************************************/
+-#define GPU_ENUM_ID1	(GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
+-			 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
++#define GPU_ENUM_ID1                            ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
+ 
+ /****************************************************/
+ /* Encoder Object ID definition - Shared with BIOS  */
+ /****************************************************/
+ /*
+-#define ENCODER_INTERNAL_LVDS_ENUM_ID1        0x2101
++#define ENCODER_INTERNAL_LVDS_ENUM_ID1        0x2101      
+ #define ENCODER_INTERNAL_TMDS1_ENUM_ID1       0x2102
+ #define ENCODER_INTERNAL_TMDS2_ENUM_ID1       0x2103
+ #define ENCODER_INTERNAL_DAC1_ENUM_ID1        0x2104
+ #define ENCODER_INTERNAL_DAC2_ENUM_ID1        0x2105
+ #define ENCODER_INTERNAL_SDVOA_ENUM_ID1       0x2106
+ #define ENCODER_INTERNAL_SDVOB_ENUM_ID1       0x2107
+-#define ENCODER_SIL170B_ENUM_ID1              0x2108
++#define ENCODER_SIL170B_ENUM_ID1              0x2108  
+ #define ENCODER_CH7303_ENUM_ID1               0x2109
+ #define ENCODER_CH7301_ENUM_ID1               0x210A
+ #define ENCODER_INTERNAL_DVO1_ENUM_ID1        0x210B
+@@ -175,8 +186,8 @@
+ #define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   0x2113
+ #define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    0x2114
+ #define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    0x2115
+-#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    0x2116
+-#define ENCODER_SI178_ENUM_ID1                   0x2117
++#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    0x2116  
++#define ENCODER_SI178_ENUM_ID1                   0x2117 
+ #define ENCODER_MVPU_FPGA_ENUM_ID1               0x2118
+ #define ENCODER_INTERNAL_DDI_ENUM_ID1            0x2119
+ #define ENCODER_VT1625_ENUM_ID1                  0x211A
+@@ -185,205 +196,169 @@
+ #define ENCODER_DP_DP501_ENUM_ID1                0x211D
+ #define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         0x211E
+ */
+-#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_SIL170B_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_CH7303_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_CH7301_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_TITFP513_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_VT1623_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_HDMI_SI1930_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */
+-
+-#define ENCODER_SI178_ENUM_ID1  \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_MVPU_FPGA_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_DDI_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_VT1625_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_HDMI_SI1932_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_DP_DP501_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_DP_AN9801_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+-
+-#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
++#define ENCODER_INTERNAL_LVDS_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_TMDS1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_TMDS2_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_DAC1_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_DAC2_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_SDVOA_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_SDVOA_ENUM_ID2    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_SDVOB_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
++
++#define ENCODER_SIL170B_ENUM_ID1           ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
++
++#define ENCODER_CH7303_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
++
++#define ENCODER_CH7301_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_DVO1_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
++
++#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
++
++#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
++
++
++#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
++
++
++#define ENCODER_TITFP513_ENUM_ID1          ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_LVTM1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
++
++#define ENCODER_VT1623_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
++
++#define ENCODER_HDMI_SI1930_ENUM_ID1       ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
++
++#define ENCODER_HDMI_INTERNAL_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
++
++
++#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                   GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
++
++
++#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT)  // Shared with CV/TV and CRT
++
++#define ENCODER_SI178_ENUM_ID1                    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                   ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)  
++
++#define ENCODER_MVPU_FPGA_ENUM_ID1                ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                   ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_DDI_ENUM_ID1     (  GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) 
++
++#define ENCODER_VT1625_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
++
++#define ENCODER_HDMI_SI1932_ENUM_ID1       ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
++
++#define ENCODER_DP_DP501_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
++
++#define ENCODER_DP_AN9801_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                             ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)  
++
++#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
++
++#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
++
++#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
++                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                  ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+ 
+ /****************************************************/
+ /* Connector Object ID definition - Shared with BIOS */
+@@ -406,167 +381,253 @@
+ #define CONNECTOR_7PIN_DIN_ENUM_ID1                 0x310F
+ #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1           0x3110
+ */
+-#define CONNECTOR_LVDS_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_VGA_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_VGA_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_COMPOSITE_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_SVIDEO_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_YPbPr_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_9PIN_DIN_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_SCART_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_7PIN_DIN_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_CROSSFIRE_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_CROSSFIRE_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+-
+-#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \
+-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+-	 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
++#define CONNECTOR_LVDS_ENUM_ID1                ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_LVDS_ENUM_ID2                ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_eDP_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_eDP_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_VGA_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_VGA_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_COMPOSITE_ENUM_ID1           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_COMPOSITE_ENUM_ID2           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_SVIDEO_ENUM_ID1              ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_SVIDEO_ENUM_ID2              ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_YPbPr_ENUM_ID1               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_YPbPr_ENUM_ID2               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_D_CONNECTOR_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_D_CONNECTOR_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_9PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_9PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_SCART_ENUM_ID1               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_SCART_ENUM_ID2               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_7PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
++#define CONNECTOR_7PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1      ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2      ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_CROSSFIRE_ENUM_ID1           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_CROSSFIRE_ENUM_ID2           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
++
++
++#define CONNECTOR_HARDCODE_DVI_ENUM_ID1        ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_HARDCODE_DVI_ENUM_ID2        ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_DISPLAYPORT_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_DISPLAYPORT_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_DISPLAYPORT_ENUM_ID3         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_DISPLAYPORT_ENUM_ID4         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_DISPLAYPORT_ENUM_ID5         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_DISPLAYPORT_ENUM_ID6         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
++
++#define CONNECTOR_MXM_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_A
++
++#define CONNECTOR_MXM_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_B
++
++#define CONNECTOR_MXM_ENUM_ID3                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_C
++
++#define CONNECTOR_MXM_ENUM_ID4                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_D
++
++#define CONNECTOR_MXM_ENUM_ID5                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_LVDS_TXxx
++
++#define CONNECTOR_MXM_ENUM_ID6                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_LVDS_UXxx
++
++#define CONNECTOR_MXM_ENUM_ID7                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
++                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DAC
+ 
+ /****************************************************/
+ /* Router Object ID definition - Shared with BIOS   */
+ /****************************************************/
+-#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \
+-	(GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
+-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+-	 ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
++#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1      ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
++                                                GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
+ 
+ /* deleted */
+ 
+ /****************************************************/
++/* Generic Object ID definition - Shared with BIOS  */
++/****************************************************/
++#define GENERICOBJECT_GLSYNC_ENUM_ID1           (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
++
++#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
++
++#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
++                                                 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
++
++#define GENERICOBJECT_MXM_OPM_ENUM_ID1           (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
++                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
++                                                 GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
++
++/****************************************************/
+ /* Object Cap definition - Shared with BIOS         */
+ /****************************************************/
+ #define GRAPHICS_OBJECT_CAP_I2C                 0x00000001L
+ #define GRAPHICS_OBJECT_CAP_TABLE_ID            0x00000002L
+ 
++
+ #define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID                   0x01
+ #define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID     0x02
+ #define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID    0x03
+@@ -575,4 +636,8 @@
+ #pragma pack()
+ #endif
+ 
+-#endif /*GRAPHICTYPE */
++#endif  /*GRAPHICTYPE */
++
++
++
++
+diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
+index fed2291..7f152f6 100644
+--- a/drivers/gpu/drm/radeon/atom.c
++++ b/drivers/gpu/drm/radeon/atom.c
+@@ -24,6 +24,7 @@
+ 
+ #include <linux/module.h>
+ #include <linux/sched.h>
++#include <asm/unaligned.h>
+ 
+ #define ATOM_DEBUG
+ 
+@@ -58,6 +59,7 @@ typedef struct {
+ } atom_exec_context;
+ 
+ int atom_debug = 0;
++static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
+ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+ 
+ static uint32_t atom_arg_mask[8] =
+@@ -211,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
+ 	case ATOM_ARG_PS:
+ 		idx = U8(*ptr);
+ 		(*ptr)++;
+-		val = le32_to_cpu(ctx->ps[idx]);
++		/* get_unaligned_le32 avoids unaligned accesses from atombios
++		 * tables, noticed on a DEC Alpha. */
++		val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+ 		if (print)
+ 			DEBUG("PS[0x%02X,0x%04X]", idx, val);
+ 		break;
+@@ -245,6 +249,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
+ 		case ATOM_WS_ATTRIBUTES:
+ 			val = gctx->io_attr;
+ 			break;
++		case ATOM_WS_REGPTR:
++			val = gctx->reg_block;
++			break;
+ 		default:
+ 			val = ctx->ws[idx];
+ 		}
+@@ -263,10 +270,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
+ 	case ATOM_ARG_FB:
+ 		idx = U8(*ptr);
+ 		(*ptr)++;
++		val = gctx->scratch[((gctx->fb_base + idx) / 4)];
+ 		if (print)
+ 			DEBUG("FB[0x%02X]", idx);
+-		printk(KERN_INFO "FB access is not implemented.\n");
+-		return 0;
++		break;
+ 	case ATOM_ARG_IMM:
+ 		switch (align) {
+ 		case ATOM_SRC_DWORD:
+@@ -384,6 +391,32 @@ static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr)
+ 	return atom_get_src_int(ctx, attr, ptr, NULL, 1);
+ }
+ 
++static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr)
++{
++	uint32_t val = 0xCDCDCDCD;
++
++	switch (align) {
++	case ATOM_SRC_DWORD:
++		val = U32(*ptr);
++		(*ptr) += 4;
++		break;
++	case ATOM_SRC_WORD0:
++	case ATOM_SRC_WORD8:
++	case ATOM_SRC_WORD16:
++		val = U16(*ptr);
++		(*ptr) += 2;
++		break;
++	case ATOM_SRC_BYTE0:
++	case ATOM_SRC_BYTE8:
++	case ATOM_SRC_BYTE16:
++	case ATOM_SRC_BYTE24:
++		val = U8(*ptr);
++		(*ptr)++;
++		break;
++	}
++	return val;
++}
++
+ static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+ 			     int *ptr, uint32_t *saved, int print)
+ {
+@@ -481,6 +514,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+ 		case ATOM_WS_ATTRIBUTES:
+ 			gctx->io_attr = val;
+ 			break;
++		case ATOM_WS_REGPTR:
++			gctx->reg_block = val;
++			break;
+ 		default:
+ 			ctx->ws[idx] = val;
+ 		}
+@@ -488,9 +524,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
+ 	case ATOM_ARG_FB:
+ 		idx = U8(*ptr);
+ 		(*ptr)++;
++		gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
+ 		DEBUG("FB[0x%02X]", idx);
+-		printk(KERN_INFO "FB access is not implemented.\n");
+-		return;
++		break;
+ 	case ATOM_ARG_PLL:
+ 		idx = U8(*ptr);
+ 		(*ptr)++;
+@@ -573,7 +609,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
+ 	else
+ 		SDEBUG("   table: %d\n", idx);
+ 	if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
+-		atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
++		atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ }
+ 
+ static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
+@@ -676,7 +712,7 @@ static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
+ 	SDEBUG("   dst: ");
+ 	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+ 	SDEBUG("   src1: ");
+-	src1 = atom_get_src(ctx, attr, ptr);
++	src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
+ 	SDEBUG("   src2: ");
+ 	src2 = atom_get_src(ctx, attr, ptr);
+ 	dst &= src1;
+@@ -808,6 +844,38 @@ static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg)
+ 	SDEBUG("   base: 0x%04X\n", ctx->ctx->reg_block);
+ }
+ 
++static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg)
++{
++	uint8_t attr = U8((*ptr)++), shift;
++	uint32_t saved, dst;
++	int dptr = *ptr;
++	attr &= 0x38;
++	attr |= atom_def_dst[attr >> 3] << 6;
++	SDEBUG("   dst: ");
++	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
++	SDEBUG("   shift: %d\n", shift);
++	dst <<= shift;
++	SDEBUG("   dst: ");
++	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
++}
++
++static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg)
++{
++	uint8_t attr = U8((*ptr)++), shift;
++	uint32_t saved, dst;
++	int dptr = *ptr;
++	attr &= 0x38;
++	attr |= atom_def_dst[attr >> 3] << 6;
++	SDEBUG("   dst: ");
++	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
++	shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr);
++	SDEBUG("   shift: %d\n", shift);
++	dst >>= shift;
++	SDEBUG("   dst: ");
++	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
++}
++
+ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
+ {
+ 	uint8_t attr = U8((*ptr)++), shift;
+@@ -817,7 +885,7 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
+ 	attr |= atom_def_dst[attr >> 3] << 6;
+ 	SDEBUG("   dst: ");
+ 	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+-	shift = U8((*ptr)++);
++	shift = atom_get_src(ctx, attr, ptr);
+ 	SDEBUG("   shift: %d\n", shift);
+ 	dst <<= shift;
+ 	SDEBUG("   dst: ");
+@@ -833,7 +901,7 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
+ 	attr |= atom_def_dst[attr >> 3] << 6;
+ 	SDEBUG("   dst: ");
+ 	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
+-	shift = U8((*ptr)++);
++	shift = atom_get_src(ctx, attr, ptr);
+ 	SDEBUG("   shift: %d\n", shift);
+ 	dst >>= shift;
+ 	SDEBUG("   dst: ");
+@@ -936,18 +1004,18 @@ static struct {
+ 	atom_op_or, ATOM_ARG_FB}, {
+ 	atom_op_or, ATOM_ARG_PLL}, {
+ 	atom_op_or, ATOM_ARG_MC}, {
+-	atom_op_shl, ATOM_ARG_REG}, {
+-	atom_op_shl, ATOM_ARG_PS}, {
+-	atom_op_shl, ATOM_ARG_WS}, {
+-	atom_op_shl, ATOM_ARG_FB}, {
+-	atom_op_shl, ATOM_ARG_PLL}, {
+-	atom_op_shl, ATOM_ARG_MC}, {
+-	atom_op_shr, ATOM_ARG_REG}, {
+-	atom_op_shr, ATOM_ARG_PS}, {
+-	atom_op_shr, ATOM_ARG_WS}, {
+-	atom_op_shr, ATOM_ARG_FB}, {
+-	atom_op_shr, ATOM_ARG_PLL}, {
+-	atom_op_shr, ATOM_ARG_MC}, {
++	atom_op_shift_left, ATOM_ARG_REG}, {
++	atom_op_shift_left, ATOM_ARG_PS}, {
++	atom_op_shift_left, ATOM_ARG_WS}, {
++	atom_op_shift_left, ATOM_ARG_FB}, {
++	atom_op_shift_left, ATOM_ARG_PLL}, {
++	atom_op_shift_left, ATOM_ARG_MC}, {
++	atom_op_shift_right, ATOM_ARG_REG}, {
++	atom_op_shift_right, ATOM_ARG_PS}, {
++	atom_op_shift_right, ATOM_ARG_WS}, {
++	atom_op_shift_right, ATOM_ARG_FB}, {
++	atom_op_shift_right, ATOM_ARG_PLL}, {
++	atom_op_shift_right, ATOM_ARG_MC}, {
+ 	atom_op_mul, ATOM_ARG_REG}, {
+ 	atom_op_mul, ATOM_ARG_PS}, {
+ 	atom_op_mul, ATOM_ARG_WS}, {
+@@ -1040,7 +1108,7 @@ static struct {
+ 	atom_op_shr, ATOM_ARG_MC}, {
+ atom_op_debug, 0},};
+ 
+-void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
++static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+ {
+ 	int base = CU16(ctx->cmd_table + 4 + 2 * index);
+ 	int len, ws, ps, ptr;
+@@ -1057,8 +1125,6 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+ 
+ 	SDEBUG(">> execute %04X (len %d, WS %d, PS %d)\n", base, len, ws, ps);
+ 
+-	/* reset reg block */
+-	ctx->reg_block = 0;
+ 	ectx.ctx = ctx;
+ 	ectx.ps_shift = ps / 4;
+ 	ectx.start = base;
+@@ -1092,6 +1158,19 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+ 		kfree(ectx.ws);
+ }
+ 
++void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
++{
++	mutex_lock(&ctx->mutex);
++	/* reset reg block */
++	ctx->reg_block = 0;
++	/* reset fb window */
++	ctx->fb_base = 0;
++	/* reset io mode */
++	ctx->io_mode = ATOM_IO_MM;
++	atom_execute_table_locked(ctx, index, params);
++	mutex_unlock(&ctx->mutex);
++}
++
+ static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
+ 
+ static void atom_index_iio(struct atom_context *ctx, int base)
+@@ -1214,3 +1293,28 @@ void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
+ 		*crev = CU8(idx + 3);
+ 	return;
+ }
++
++int atom_allocate_fb_scratch(struct atom_context *ctx)
++{
++	int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
++	uint16_t data_offset;
++	int usage_bytes;
++	struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
++
++	atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
++
++	firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
++
++	DRM_DEBUG("atom firmware requested %08x %dkb\n",
++		  firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
++		  firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
++
++	usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
++	if (usage_bytes == 0)
++		usage_bytes = 20 * 1024;
++	/* allocate some scratch memory */
++	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
++	if (!ctx->scratch)
++		return -ENOMEM;
++	return 0;
++}
+diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
+index e6eb38f..bc73781 100644
+--- a/drivers/gpu/drm/radeon/atom.h
++++ b/drivers/gpu/drm/radeon/atom.h
+@@ -91,6 +91,7 @@
+ #define ATOM_WS_AND_MASK	0x45
+ #define ATOM_WS_FB_WINDOW	0x46
+ #define ATOM_WS_ATTRIBUTES	0x47
++#define ATOM_WS_REGPTR  	0x48
+ 
+ #define ATOM_IIO_NOP		0
+ #define ATOM_IIO_START		1
+@@ -120,6 +121,7 @@ struct card_info {
+ 
+ struct atom_context {
+ 	struct card_info *card;
++	struct mutex mutex;
+ 	void *bios;
+ 	uint32_t cmd_table, data_table;
+ 	uint16_t *iio;
+@@ -132,6 +134,7 @@ struct atom_context {
+ 	uint8_t shift;
+ 	int cs_equal, cs_above;
+ 	int io_mode;
++	uint32_t *scratch;
+ };
+ 
+ extern int atom_debug;
+@@ -142,6 +145,7 @@ int atom_asic_init(struct atom_context *);
+ void atom_destroy(struct atom_context *);
+ void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
+ void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
++int atom_allocate_fb_scratch(struct atom_context *ctx);
+ #include "atom-types.h"
+ #include "atombios.h"
+ #include "ObjectID.h"
+diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
+index c11dddd..91ad0d1 100644
+--- a/drivers/gpu/drm/radeon/atombios.h
++++ b/drivers/gpu/drm/radeon/atombios.h
+@@ -1141,7 +1141,7 @@ typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS {
+ /* ucTableFormatRevision=1,ucTableContentRevision=2 */
+ typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
+ 	USHORT usPixelClock;	/*  in 10KHz; for bios convenient */
+-	UCHAR ucMisc;		/*  see PANEL_ENCODER_MISC_xx defintions below */
++	UCHAR ucMisc;		/*  see PANEL_ENCODER_MISC_xx definitions below */
+ 	UCHAR ucAction;		/*  0: turn off encoder */
+ 	/*  1: setup and turn on encoder */
+ 	UCHAR ucTruncate;	/*  bit0=0: Disable truncate */
+@@ -1424,7 +1424,7 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
+ /*  Structures used in FirmwareInfoTable */
+ /****************************************************************************/
+ 
+-/*  usBIOSCapability Defintion: */
++/*  usBIOSCapability Definition: */
+ /*  Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */
+ /*  Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */
+ /*  Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */
+@@ -2386,7 +2386,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
+ } ATOM_ANALOG_TV_INFO_V1_2;
+ 
+ /**************************************************************************/
+-/*  VRAM usage and their defintions */
++/*  VRAM usage and their definitions */
+ 
+ /*  One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */
+ /*  Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */
+@@ -2680,7 +2680,7 @@ typedef struct _ATOM_I2C_RECORD {
+ typedef struct _ATOM_HPD_INT_RECORD {
+ 	ATOM_COMMON_RECORD_HEADER sheader;
+ 	UCHAR ucHPDIntGPIOID;	/* Corresponding block in GPIO_PIN_INFO table gives the pin info */
+-	UCHAR ucPluggged_PinState;
++	UCHAR ucPlugged_PinState;
+ } ATOM_HPD_INT_RECORD;
+ 
+ typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
+@@ -3046,7 +3046,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
+ #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC     2
+ #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+ 
+-/* Byte aligned defintion for BIOS usage */
++/* Byte aligned definition for BIOS usage */
+ #define ATOM_S0_CRT1_MONOb0             0x01
+ #define ATOM_S0_CRT1_COLORb0            0x02
+ #define ATOM_S0_CRT1_MASKb0             (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
+@@ -3131,7 +3131,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
+ #define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
+ #define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK   0xC0000000L
+ 
+-/* Byte aligned defintion for BIOS usage */
++/* Byte aligned definition for BIOS usage */
+ #define ATOM_S2_TV1_STANDARD_MASKb0     0x0F
+ #define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
+ #define ATOM_S2_CRT1_DPMS_STATEb2       0x01
+@@ -3190,7 +3190,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
+ #define ATOM_S3_ALLOW_FAST_PWR_SWITCH   0x40000000L
+ #define ATOM_S3_RQST_GPU_USE_MIN_PWR    0x80000000L
+ 
+-/* Byte aligned defintion for BIOS usage */
++/* Byte aligned definition for BIOS usage */
+ #define ATOM_S3_CRT1_ACTIVEb0           0x01
+ #define ATOM_S3_LCD1_ACTIVEb0           0x02
+ #define ATOM_S3_TV1_ACTIVEb0            0x04
+@@ -3230,7 +3230,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
+ #define ATOM_S4_LCD1_REFRESH_MASK       0x0000FF00L
+ #define ATOM_S4_LCD1_REFRESH_SHIFT      8
+ 
+-/* Byte aligned defintion for BIOS usage */
++/* Byte aligned definition for BIOS usage */
+ #define ATOM_S4_LCD1_PANEL_ID_MASKb0	  0x0FF
+ #define ATOM_S4_LCD1_REFRESH_MASKb1		  ATOM_S4_LCD1_PANEL_ID_MASKb0
+ #define ATOM_S4_VRAM_INFO_MASKb2        ATOM_S4_LCD1_PANEL_ID_MASKb0
+@@ -3310,7 +3310,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
+ #define ATOM_S6_VRI_BRIGHTNESS_CHANGE       0x40000000L
+ #define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK  0x80000000L
+ 
+-/* Byte aligned defintion for BIOS usage */
++/* Byte aligned definition for BIOS usage */
+ #define ATOM_S6_DEVICE_CHANGEb0         0x01
+ #define ATOM_S6_SCALER_CHANGEb0         0x02
+ #define ATOM_S6_LID_CHANGEb0            0x04
+@@ -4690,6 +4690,205 @@ typedef struct _ATOM_POWERPLAY_INFO_V3 {
+ 	ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+ } ATOM_POWERPLAY_INFO_V3;
+ 
++/* New PPlib */
++/**************************************************************************/
++typedef struct _ATOM_PPLIB_THERMALCONTROLLER
++
++{
++    UCHAR ucType;           // one of ATOM_PP_THERMALCONTROLLER_*
++    UCHAR ucI2cLine;        // as interpreted by DAL I2C
++    UCHAR ucI2cAddress;
++    UCHAR ucFanParameters;  // Fan Control Parameters.
++    UCHAR ucFanMinRPM;      // Fan Minimum RPM (hundreds) -- for display purposes only.
++    UCHAR ucFanMaxRPM;      // Fan Maximum RPM (hundreds) -- for display purposes only.
++    UCHAR ucReserved;       // ----
++    UCHAR ucFlags;          // to be defined
++} ATOM_PPLIB_THERMALCONTROLLER;
++
++#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
++#define ATOM_PP_FANPARAMETERS_NOFAN                                 0x80    // No fan is connected to this controller.
++
++#define ATOM_PP_THERMALCONTROLLER_NONE      0
++#define ATOM_PP_THERMALCONTROLLER_LM63      1  // Not used by PPLib
++#define ATOM_PP_THERMALCONTROLLER_ADM1032   2  // Not used by PPLib
++#define ATOM_PP_THERMALCONTROLLER_ADM1030   3  // Not used by PPLib
++#define ATOM_PP_THERMALCONTROLLER_MUA6649   4  // Not used by PPLib
++#define ATOM_PP_THERMALCONTROLLER_LM64      5
++#define ATOM_PP_THERMALCONTROLLER_F75375    6  // Not used by PPLib
++#define ATOM_PP_THERMALCONTROLLER_RV6xx     7
++#define ATOM_PP_THERMALCONTROLLER_RV770     8
++#define ATOM_PP_THERMALCONTROLLER_ADT7473   9
++
++typedef struct _ATOM_PPLIB_STATE
++{
++    UCHAR ucNonClockStateIndex;
++    UCHAR ucClockStateIndices[1]; // variable-sized
++} ATOM_PPLIB_STATE;
++
++//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
++#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
++#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
++#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
++#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
++#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
++#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
++#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
++#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
++#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
++#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
++#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
++#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
++
++typedef struct _ATOM_PPLIB_POWERPLAYTABLE
++{
++      ATOM_COMMON_TABLE_HEADER sHeader;
++
++      UCHAR ucDataRevision;
++
++      UCHAR ucNumStates;
++      UCHAR ucStateEntrySize;
++      UCHAR ucClockInfoSize;
++      UCHAR ucNonClockSize;
++
++      // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
++      USHORT usStateArrayOffset;
++
++      // offset from start of this table to array of ASIC-specific structures,
++      // currently ATOM_PPLIB_CLOCK_INFO.
++      USHORT usClockInfoArrayOffset;
++
++      // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
++      USHORT usNonClockInfoArrayOffset;
++
++      USHORT usBackbiasTime;    // in microseconds
++      USHORT usVoltageTime;     // in microseconds
++      USHORT usTableSize;       //the size of this structure, or the extended structure
++
++      ULONG ulPlatformCaps;            // See ATOM_PPLIB_CAPS_*
++
++      ATOM_PPLIB_THERMALCONTROLLER    sThermalController;
++
++      USHORT usBootClockInfoOffset;
++      USHORT usBootNonClockInfoOffset;
++
++} ATOM_PPLIB_POWERPLAYTABLE;
++
++//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
++#define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
++#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
++#define ATOM_PPLIB_CLASSIFICATION_UI_NONE          0
++#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY       1
++#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED      3
++#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE   5
++// 2, 4, 6, 7 are reserved
++
++#define ATOM_PPLIB_CLASSIFICATION_BOOT                   0x0008
++#define ATOM_PPLIB_CLASSIFICATION_THERMAL                0x0010
++#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE     0x0020
++#define ATOM_PPLIB_CLASSIFICATION_REST                   0x0040
++#define ATOM_PPLIB_CLASSIFICATION_FORCED                 0x0080
++#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE          0x0100
++#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE      0x0200
++#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE               0x0400
++#define ATOM_PPLIB_CLASSIFICATION_3DLOW                  0x0800
++#define ATOM_PPLIB_CLASSIFICATION_ACPI                   0x1000
++// remaining 3 bits are reserved
++
++//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
++#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
++#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK         0x00000002
++
++// 0 is 2.5Gb/s, 1 is 5Gb/s
++#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK            0x00000004
++#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT           2
++
++// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
++#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK            0x000000F8
++#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT           3
++
++// lookup into reduced refresh-rate table
++#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK  0x00000F00
++#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
++
++#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED    0
++#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ         1
++// 2-15 TBD as needed.
++
++#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING        0x00001000
++#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS  0x00002000
++#define ATOM_PPLIB_ENABLE_VARIBRIGHT                     0x00008000
++
++#define ATOM_PPLIB_DISALLOW_ON_DC                       0x00004000
++
++// Contained in an array starting at the offset
++// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
++// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
++typedef struct _ATOM_PPLIB_NONCLOCK_INFO
++{
++      USHORT usClassification;
++      UCHAR  ucMinTemperature;
++      UCHAR  ucMaxTemperature;
++      ULONG  ulCapsAndSettings;
++      UCHAR  ucRequiredPower;
++      UCHAR  ucUnused1[3];
++} ATOM_PPLIB_NONCLOCK_INFO;
++
++// Contained in an array starting at the offset
++// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
++// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
++typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
++{
++      USHORT usEngineClockLow;
++      UCHAR ucEngineClockHigh;
++
++      USHORT usMemoryClockLow;
++      UCHAR ucMemoryClockHigh;
++
++      USHORT usVDDC;
++      USHORT usUnused1;
++      USHORT usUnused2;
++
++      ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
++
++} ATOM_PPLIB_R600_CLOCK_INFO;
++
++// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
++#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2          1
++#define ATOM_PPLIB_R600_FLAGS_UVDSAFE           2
++#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE    4
++#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF    8
++#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF    16
++
++typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
++
++{
++      USHORT usLowEngineClockLow;         // Low Engine clock in MHz (the same way as on the R600).
++      UCHAR  ucLowEngineClockHigh;
++      USHORT usHighEngineClockLow;        // High Engine clock in MHz.
++      UCHAR  ucHighEngineClockHigh;
++      USHORT usMemoryClockLow;            // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
++      UCHAR  ucMemoryClockHigh;           // Currentyl unused.
++      UCHAR  ucPadding;                   // For proper alignment and size.
++      USHORT usVDDC;                      // For the 780, use: None, Low, High, Variable
++      UCHAR  ucMaxHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}
++      UCHAR  ucMinHTLinkWidth;            // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
++      USHORT usHTLinkFreq;                // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
++      ULONG  ulFlags;
++} ATOM_PPLIB_RS780_CLOCK_INFO;
++
++#define ATOM_PPLIB_RS780_VOLTAGE_NONE       0
++#define ATOM_PPLIB_RS780_VOLTAGE_LOW        1
++#define ATOM_PPLIB_RS780_VOLTAGE_HIGH       2
++#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE   3
++
++#define ATOM_PPLIB_RS780_SPMCLK_NONE        0   // We cannot change the side port memory clock, leave it as it is.
++#define ATOM_PPLIB_RS780_SPMCLK_LOW         1
++#define ATOM_PPLIB_RS780_SPMCLK_HIGH        2
++
++#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE       0
++#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW        1
++#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH       2
++
+ /**************************************************************************/
+ 
+ /*  Following definitions are for compatiblity issue in different SW components. */
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 19f93f2..af464e3 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -249,15 +249,13 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 		if (ASIC_IS_DCE3(rdev))
+ 			atombios_enable_crtc_memreq(crtc, 1);
+ 		atombios_blank_crtc(crtc, 0);
+-		if (rdev->family < CHIP_R600)
+-			drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
++		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ 		radeon_crtc_load_lut(crtc);
+ 		break;
+ 	case DRM_MODE_DPMS_STANDBY:
+ 	case DRM_MODE_DPMS_SUSPEND:
+ 	case DRM_MODE_DPMS_OFF:
+-		if (rdev->family < CHIP_R600)
+-			drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
++		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ 		atombios_blank_crtc(crtc, 1);
+ 		if (ASIC_IS_DCE3(rdev))
+ 			atombios_enable_crtc_memreq(crtc, 0);
+@@ -309,7 +307,6 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
+ 	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ 	args.ucCRTC = radeon_crtc->crtc_id;
+ 
+-	printk("executing set crtc dtd timing\n");
+ 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ 
+@@ -349,7 +346,6 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
+ 	args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
+ 	args.ucCRTC = radeon_crtc->crtc_id;
+ 
+-	printk("executing set crtc timing\n");
+ 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ 
+@@ -411,60 +407,57 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
+ 	}
+ }
+ 
+-void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
++union adjust_pixel_clock {
++	ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
++};
++
++static u32 atombios_adjust_pll(struct drm_crtc *crtc,
++			       struct drm_display_mode *mode,
++			       struct radeon_pll *pll)
+ {
+-	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 	struct drm_device *dev = crtc->dev;
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_encoder *encoder = NULL;
+ 	struct radeon_encoder *radeon_encoder = NULL;
+-	uint8_t frev, crev;
+-	int index;
+-	SET_PIXEL_CLOCK_PS_ALLOCATION args;
+-	PIXEL_CLOCK_PARAMETERS *spc1_ptr;
+-	PIXEL_CLOCK_PARAMETERS_V2 *spc2_ptr;
+-	PIXEL_CLOCK_PARAMETERS_V3 *spc3_ptr;
+-	uint32_t pll_clock = mode->clock;
+-	uint32_t adjusted_clock;
+-	uint32_t ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+-	struct radeon_pll *pll;
+-	int pll_flags = 0;
++	u32 adjusted_clock = mode->clock;
+ 
+-	memset(&args, 0, sizeof(args));
++	/* reset the pll flags */
++	pll->flags = 0;
+ 
+ 	if (ASIC_IS_AVIVO(rdev)) {
+ 		if ((rdev->family == CHIP_RS600) ||
+ 		    (rdev->family == CHIP_RS690) ||
+ 		    (rdev->family == CHIP_RS740))
+-			pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
+-				      RADEON_PLL_PREFER_CLOSEST_LOWER);
++			pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
++				       RADEON_PLL_PREFER_CLOSEST_LOWER);
+ 
+ 		if (ASIC_IS_DCE32(rdev) && mode->clock > 200000)	/* range limits??? */
+-			pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
++			pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ 		else
+-			pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
++			pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ 	} else {
+-		pll_flags |= RADEON_PLL_LEGACY;
++		pll->flags |= RADEON_PLL_LEGACY;
+ 
+ 		if (mode->clock > 200000)	/* range limits??? */
+-			pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
++			pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ 		else
+-			pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
++			pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ 
+ 	}
+ 
+ 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ 		if (encoder->crtc == crtc) {
+-			if (!ASIC_IS_AVIVO(rdev)) {
+-				if (encoder->encoder_type !=
+-				    DRM_MODE_ENCODER_DAC)
+-					pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
+-				if (!ASIC_IS_AVIVO(rdev)
+-				    && (encoder->encoder_type ==
+-					DRM_MODE_ENCODER_LVDS))
+-					pll_flags |= RADEON_PLL_USE_REF_DIV;
+-			}
+ 			radeon_encoder = to_radeon_encoder(encoder);
++			if (ASIC_IS_AVIVO(rdev)) {
++				/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
++				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
++					adjusted_clock = mode->clock * 2;
++			} else {
++				if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
++					pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
++				if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
++					pll->flags |= RADEON_PLL_USE_REF_DIV;
++			}
+ 			break;
+ 		}
+ 	}
+@@ -474,36 +467,101 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+ 	 * special hw requirements.
+ 	 */
+ 	if (ASIC_IS_DCE3(rdev)) {
+-		ADJUST_DISPLAY_PLL_PS_ALLOCATION adjust_pll_args;
+-
+-		if (!encoder)
+-			return;
++		union adjust_pixel_clock args;
++		struct radeon_encoder_atom_dig *dig;
++		u8 frev, crev;
++		int index;
+ 
+-		memset(&adjust_pll_args, 0, sizeof(adjust_pll_args));
+-		adjust_pll_args.usPixelClock = cpu_to_le16(mode->clock / 10);
+-		adjust_pll_args.ucTransmitterID = radeon_encoder->encoder_id;
+-		adjust_pll_args.ucEncodeMode = atombios_get_encoder_mode(encoder);
++		if (!radeon_encoder->enc_priv)
++			return adjusted_clock;
++		dig = radeon_encoder->enc_priv;
+ 
+ 		index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
+-		atom_execute_table(rdev->mode_info.atom_context,
+-				   index, (uint32_t *)&adjust_pll_args);
+-		adjusted_clock = le16_to_cpu(adjust_pll_args.usPixelClock) * 10;
+-	} else {
+-		/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
+-		if (ASIC_IS_AVIVO(rdev) &&
+-		    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
+-			adjusted_clock = mode->clock * 2;
+-		else
+-			adjusted_clock = mode->clock;
++		atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
++				      &crev);
++
++		memset(&args, 0, sizeof(args));
++
++		switch (frev) {
++		case 1:
++			switch (crev) {
++			case 1:
++			case 2:
++				args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
++				args.v1.ucTransmitterID = radeon_encoder->encoder_id;
++				args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
++
++				atom_execute_table(rdev->mode_info.atom_context,
++						   index, (uint32_t *)&args);
++				adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
++				break;
++			default:
++				DRM_ERROR("Unknown table version %d %d\n", frev, crev);
++				return adjusted_clock;
++			}
++			break;
++		default:
++			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
++			return adjusted_clock;
++		}
++	}
++	return adjusted_clock;
++}
++
++union set_pixel_clock {
++	SET_PIXEL_CLOCK_PS_ALLOCATION base;
++	PIXEL_CLOCK_PARAMETERS v1;
++	PIXEL_CLOCK_PARAMETERS_V2 v2;
++	PIXEL_CLOCK_PARAMETERS_V3 v3;
++};
++
++void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
++{
++	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct drm_encoder *encoder = NULL;
++	struct radeon_encoder *radeon_encoder = NULL;
++	u8 frev, crev;
++	int index;
++	union set_pixel_clock args;
++	u32 pll_clock = mode->clock;
++	u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
++	struct radeon_pll *pll;
++	u32 adjusted_clock;
++
++	memset(&args, 0, sizeof(args));
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		if (encoder->crtc == crtc) {
++			radeon_encoder = to_radeon_encoder(encoder);
++			break;
++		}
+ 	}
+ 
++	if (!radeon_encoder)
++		return;
++
+ 	if (radeon_crtc->crtc_id == 0)
+ 		pll = &rdev->clock.p1pll;
+ 	else
+ 		pll = &rdev->clock.p2pll;
+ 
+-	radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+-			   &ref_div, &post_div, pll_flags);
++	/* adjust pixel clock as needed */
++	adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
++
++	if (ASIC_IS_AVIVO(rdev)) {
++		if (radeon_new_pll)
++			radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
++						 &fb_div, &frac_fb_div,
++						 &ref_div, &post_div);
++		else
++			radeon_compute_pll(pll, adjusted_clock, &pll_clock,
++					   &fb_div, &frac_fb_div,
++					   &ref_div, &post_div);
++	} else
++		radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
++				   &ref_div, &post_div);
+ 
+ 	index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+ 	atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+@@ -513,45 +571,38 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+ 	case 1:
+ 		switch (crev) {
+ 		case 1:
+-			spc1_ptr = (PIXEL_CLOCK_PARAMETERS *) & args.sPCLKInput;
+-			spc1_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
+-			spc1_ptr->usRefDiv = cpu_to_le16(ref_div);
+-			spc1_ptr->usFbDiv = cpu_to_le16(fb_div);
+-			spc1_ptr->ucFracFbDiv = frac_fb_div;
+-			spc1_ptr->ucPostDiv = post_div;
+-			spc1_ptr->ucPpll =
++			args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
++			args.v1.usRefDiv = cpu_to_le16(ref_div);
++			args.v1.usFbDiv = cpu_to_le16(fb_div);
++			args.v1.ucFracFbDiv = frac_fb_div;
++			args.v1.ucPostDiv = post_div;
++			args.v1.ucPpll =
+ 			    radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+-			spc1_ptr->ucCRTC = radeon_crtc->crtc_id;
+-			spc1_ptr->ucRefDivSrc = 1;
++			args.v1.ucCRTC = radeon_crtc->crtc_id;
++			args.v1.ucRefDivSrc = 1;
+ 			break;
+ 		case 2:
+-			spc2_ptr =
+-			    (PIXEL_CLOCK_PARAMETERS_V2 *) & args.sPCLKInput;
+-			spc2_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
+-			spc2_ptr->usRefDiv = cpu_to_le16(ref_div);
+-			spc2_ptr->usFbDiv = cpu_to_le16(fb_div);
+-			spc2_ptr->ucFracFbDiv = frac_fb_div;
+-			spc2_ptr->ucPostDiv = post_div;
+-			spc2_ptr->ucPpll =
++			args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
++			args.v2.usRefDiv = cpu_to_le16(ref_div);
++			args.v2.usFbDiv = cpu_to_le16(fb_div);
++			args.v2.ucFracFbDiv = frac_fb_div;
++			args.v2.ucPostDiv = post_div;
++			args.v2.ucPpll =
+ 			    radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+-			spc2_ptr->ucCRTC = radeon_crtc->crtc_id;
+-			spc2_ptr->ucRefDivSrc = 1;
++			args.v2.ucCRTC = radeon_crtc->crtc_id;
++			args.v2.ucRefDivSrc = 1;
+ 			break;
+ 		case 3:
+-			if (!encoder)
+-				return;
+-			spc3_ptr =
+-			    (PIXEL_CLOCK_PARAMETERS_V3 *) & args.sPCLKInput;
+-			spc3_ptr->usPixelClock = cpu_to_le16(mode->clock / 10);
+-			spc3_ptr->usRefDiv = cpu_to_le16(ref_div);
+-			spc3_ptr->usFbDiv = cpu_to_le16(fb_div);
+-			spc3_ptr->ucFracFbDiv = frac_fb_div;
+-			spc3_ptr->ucPostDiv = post_div;
+-			spc3_ptr->ucPpll =
++			args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
++			args.v3.usRefDiv = cpu_to_le16(ref_div);
++			args.v3.usFbDiv = cpu_to_le16(fb_div);
++			args.v3.ucFracFbDiv = frac_fb_div;
++			args.v3.ucPostDiv = post_div;
++			args.v3.ucPpll =
+ 			    radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+-			spc3_ptr->ucMiscInfo = (radeon_crtc->crtc_id << 2);
+-			spc3_ptr->ucTransmitterId = radeon_encoder->encoder_id;
+-			spc3_ptr->ucEncoderMode =
++			args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
++			args.v3.ucTransmitterId = radeon_encoder->encoder_id;
++			args.v3.ucEncoderMode =
+ 			    atombios_get_encoder_mode(encoder);
+ 			break;
+ 		default:
+@@ -564,33 +615,43 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+ 		return;
+ 	}
+ 
+-	printk("executing set pll\n");
+ 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ }
+ 
+-int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+-			   struct drm_framebuffer *old_fb)
++static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
++			       struct drm_framebuffer *old_fb)
+ {
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 	struct drm_device *dev = crtc->dev;
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct radeon_framebuffer *radeon_fb;
+ 	struct drm_gem_object *obj;
+-	struct drm_radeon_gem_object *obj_priv;
++	struct radeon_bo *rbo;
+ 	uint64_t fb_location;
+ 	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
++	int r;
+ 
+-	if (!crtc->fb)
+-		return -EINVAL;
++	/* no fb bound */
++	if (!crtc->fb) {
++		DRM_DEBUG("No FB bound\n");
++		return 0;
++	}
+ 
+ 	radeon_fb = to_radeon_framebuffer(crtc->fb);
+ 
++	/* Pin framebuffer & get tilling informations */
+ 	obj = radeon_fb->obj;
+-	obj_priv = obj->driver_private;
+-
+-	if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
++	rbo = obj->driver_private;
++	r = radeon_bo_reserve(rbo, false);
++	if (unlikely(r != 0))
++		return r;
++	r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
++	if (unlikely(r != 0)) {
++		radeon_bo_unreserve(rbo);
+ 		return -EINVAL;
+ 	}
++	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
++	radeon_bo_unreserve(rbo);
+ 
+ 	switch (crtc->fb->bits_per_pixel) {
+ 	case 8:
+@@ -620,8 +681,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ 		return -EINVAL;
+ 	}
+ 
+-	radeon_object_get_tiling_flags(obj->driver_private,
+-				       &tiling_flags, NULL);
+ 	if (tiling_flags & RADEON_TILING_MACRO)
+ 		fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
+ 
+@@ -676,7 +735,12 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ 
+ 	if (old_fb && old_fb != crtc->fb) {
+ 		radeon_fb = to_radeon_framebuffer(old_fb);
+-		radeon_gem_object_unpin(radeon_fb->obj);
++		rbo = radeon_fb->obj->driver_private;
++		r = radeon_bo_reserve(rbo, false);
++		if (unlikely(r != 0))
++			return r;
++		radeon_bo_unpin(rbo);
++		radeon_bo_unreserve(rbo);
+ 	}
+ 
+ 	/* Bytes per pixel may have changed */
+@@ -685,6 +749,42 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ 	return 0;
+ }
+ 
++int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
++			   struct drm_framebuffer *old_fb)
++{
++	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
++
++	if (ASIC_IS_AVIVO(rdev))
++		return avivo_crtc_set_base(crtc, x, y, old_fb);
++	else
++		return radeon_crtc_set_base(crtc, x, y, old_fb);
++}
++
++/* properly set additional regs when using atombios */
++static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
++{
++	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++	u32 disp_merge_cntl;
++
++	switch (radeon_crtc->crtc_id) {
++	case 0:
++		disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
++		disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
++		WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
++		break;
++	case 1:
++		disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
++		disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
++		WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
++		WREG32(RADEON_FP_H2_SYNC_STRT_WID,   RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
++		WREG32(RADEON_FP_V2_SYNC_STRT_WID,   RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
++		break;
++	}
++}
++
+ int atombios_crtc_mode_set(struct drm_crtc *crtc,
+ 			   struct drm_display_mode *mode,
+ 			   struct drm_display_mode *adjusted_mode,
+@@ -706,8 +806,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
+ 	else {
+ 		if (radeon_crtc->crtc_id == 0)
+ 			atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+-		radeon_crtc_set_base(crtc, x, y, old_fb);
+-		radeon_legacy_atom_set_surface(crtc);
++		atombios_crtc_set_base(crtc, x, y, old_fb);
++		radeon_legacy_atom_fixup(crtc);
+ 	}
+ 	atombios_overscan_setup(crtc, mode, adjusted_mode);
+ 	atombios_scaler_setup(crtc);
+@@ -725,8 +825,8 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
+ 
+ static void atombios_crtc_prepare(struct drm_crtc *crtc)
+ {
+-	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ 	atombios_lock_crtc(crtc, 1);
++	atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ }
+ 
+ static void atombios_crtc_commit(struct drm_crtc *crtc)
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+new file mode 100644
+index 0000000..99915a6
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -0,0 +1,789 @@
++/*
++ * Copyright 2007-8 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Dave Airlie
++ *          Alex Deucher
++ */
++#include "drmP.h"
++#include "radeon_drm.h"
++#include "radeon.h"
++
++#include "atom.h"
++#include "atom-bits.h"
++#include "drm_dp_helper.h"
++
++/* move these to drm_dp_helper.c/h */
++#define DP_LINK_CONFIGURATION_SIZE 9
++#define DP_LINK_STATUS_SIZE	   6
++#define DP_DPCD_SIZE	           8
++
++static char *voltage_names[] = {
++        "0.4V", "0.6V", "0.8V", "1.2V"
++};
++static char *pre_emph_names[] = {
++        "0dB", "3.5dB", "6dB", "9.5dB"
++};
++
++static const int dp_clocks[] = {
++	54000,  /* 1 lane, 1.62 Ghz */
++	90000,  /* 1 lane, 2.70 Ghz */
++	108000, /* 2 lane, 1.62 Ghz */
++	180000, /* 2 lane, 2.70 Ghz */
++	216000, /* 4 lane, 1.62 Ghz */
++	360000, /* 4 lane, 2.70 Ghz */
++};
++
++static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int);
++
++/* common helper functions */
++static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
++{
++	int i;
++	u8 max_link_bw;
++	u8 max_lane_count;
++
++	if (!dpcd)
++		return 0;
++
++	max_link_bw = dpcd[DP_MAX_LINK_RATE];
++	max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
++
++	switch (max_link_bw) {
++	case DP_LINK_BW_1_62:
++	default:
++		for (i = 0; i < num_dp_clocks; i++) {
++			if (i % 2)
++				continue;
++			switch (max_lane_count) {
++			case 1:
++				if (i > 1)
++					return 0;
++				break;
++			case 2:
++				if (i > 3)
++					return 0;
++				break;
++			case 4:
++			default:
++				break;
++			}
++			if (dp_clocks[i] > mode_clock) {
++				if (i < 2)
++					return 1;
++				else if (i < 4)
++					return 2;
++				else
++					return 4;
++			}
++		}
++		break;
++	case DP_LINK_BW_2_7:
++		for (i = 0; i < num_dp_clocks; i++) {
++			switch (max_lane_count) {
++			case 1:
++				if (i > 1)
++					return 0;
++				break;
++			case 2:
++				if (i > 3)
++					return 0;
++				break;
++			case 4:
++			default:
++				break;
++			}
++			if (dp_clocks[i] > mode_clock) {
++				if (i < 2)
++					return 1;
++				else if (i < 4)
++					return 2;
++				else
++					return 4;
++			}
++		}
++		break;
++	}
++
++	return 0;
++}
++
++static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
++{
++	int i;
++	u8 max_link_bw;
++	u8 max_lane_count;
++
++	if (!dpcd)
++		return 0;
++
++	max_link_bw = dpcd[DP_MAX_LINK_RATE];
++	max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
++
++	switch (max_link_bw) {
++	case DP_LINK_BW_1_62:
++	default:
++		for (i = 0; i < num_dp_clocks; i++) {
++			if (i % 2)
++				continue;
++			switch (max_lane_count) {
++			case 1:
++				if (i > 1)
++					return 0;
++				break;
++			case 2:
++				if (i > 3)
++					return 0;
++				break;
++			case 4:
++			default:
++				break;
++			}
++			if (dp_clocks[i] > mode_clock)
++				return 162000;
++		}
++		break;
++	case DP_LINK_BW_2_7:
++		for (i = 0; i < num_dp_clocks; i++) {
++			switch (max_lane_count) {
++			case 1:
++				if (i > 1)
++					return 0;
++				break;
++			case 2:
++				if (i > 3)
++					return 0;
++				break;
++			case 4:
++			default:
++				break;
++			}
++			if (dp_clocks[i] > mode_clock)
++				return (i % 2) ? 270000 : 162000;
++		}
++	}
++
++	return 0;
++}
++
++int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
++{
++	int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
++	int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
++
++	if ((lanes == 0) || (bw == 0))
++		return MODE_CLOCK_HIGH;
++
++	return MODE_OK;
++}
++
++static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
++{
++	return link_status[r - DP_LANE0_1_STATUS];
++}
++
++static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
++			     int lane)
++{
++	int i = DP_LANE0_1_STATUS + (lane >> 1);
++	int s = (lane & 1) * 4;
++	u8 l = dp_link_status(link_status, i);
++	return (l >> s) & 0xf;
++}
++
++static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
++				 int lane_count)
++{
++	int lane;
++	u8 lane_status;
++
++	for (lane = 0; lane < lane_count; lane++) {
++		lane_status = dp_get_lane_status(link_status, lane);
++		if ((lane_status & DP_LANE_CR_DONE) == 0)
++			return false;
++	}
++	return true;
++}
++
++static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
++			     int lane_count)
++{
++	u8 lane_align;
++	u8 lane_status;
++	int lane;
++
++	lane_align = dp_link_status(link_status,
++				    DP_LANE_ALIGN_STATUS_UPDATED);
++	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
++		return false;
++	for (lane = 0; lane < lane_count; lane++) {
++		lane_status = dp_get_lane_status(link_status, lane);
++		if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
++			return false;
++	}
++	return true;
++}
++
++static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
++					int lane)
++
++{
++	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
++	int s = ((lane & 1) ?
++		 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
++		 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
++	u8 l = dp_link_status(link_status, i);
++
++	return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
++}
++
++static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
++					     int lane)
++{
++	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
++	int s = ((lane & 1) ?
++		 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
++		 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
++	u8 l = dp_link_status(link_status, i);
++
++	return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
++}
++
++/* XXX fix me -- chip specific */
++#define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_1200
++static u8 dp_pre_emphasis_max(u8 voltage_swing)
++{
++	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
++	case DP_TRAIN_VOLTAGE_SWING_400:
++		return DP_TRAIN_PRE_EMPHASIS_6;
++	case DP_TRAIN_VOLTAGE_SWING_600:
++		return DP_TRAIN_PRE_EMPHASIS_6;
++	case DP_TRAIN_VOLTAGE_SWING_800:
++		return DP_TRAIN_PRE_EMPHASIS_3_5;
++	case DP_TRAIN_VOLTAGE_SWING_1200:
++	default:
++		return DP_TRAIN_PRE_EMPHASIS_0;
++	}
++}
++
++static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
++				int lane_count,
++				u8 train_set[4])
++{
++	u8 v = 0;
++	u8 p = 0;
++	int lane;
++
++	for (lane = 0; lane < lane_count; lane++) {
++		u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
++		u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
++
++		DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n",
++			  lane,
++			  voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
++			  pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
++
++		if (this_v > v)
++			v = this_v;
++		if (this_p > p)
++			p = this_p;
++	}
++
++	if (v >= DP_VOLTAGE_MAX)
++		v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
++
++	if (p >= dp_pre_emphasis_max(v))
++		p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
++
++	DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n",
++		  voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
++		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
++
++	for (lane = 0; lane < 4; lane++)
++		train_set[lane] = v | p;
++}
++
++
++/* radeon aux chan functions */
++bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
++			   int num_bytes, u8 *read_byte,
++			   u8 read_buf_len, u8 delay)
++{
++	struct drm_device *dev = chan->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
++	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
++	unsigned char *base;
++	int retry_count = 0;
++
++	memset(&args, 0, sizeof(args));
++
++	base = (unsigned char *)rdev->mode_info.atom_context->scratch;
++
++retry:
++	memcpy(base, req_bytes, num_bytes);
++
++	args.lpAuxRequest = 0;
++	args.lpDataOut = 16;
++	args.ucDataOutLen = 0;
++	args.ucChannelID = chan->rec.i2c_id;
++	args.ucDelay = delay / 10;
++
++	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
++
++	if (args.ucReplyStatus && !args.ucDataOutLen) {
++		if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
++			goto retry;
++		DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
++			  req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
++			  chan->rec.i2c_id, args.ucReplyStatus, retry_count);
++		return false;
++	}
++
++	if (args.ucDataOutLen && read_byte && read_buf_len) {
++		if (read_buf_len < args.ucDataOutLen) {
++			DRM_ERROR("Buffer to small for return answer %d %d\n",
++				  read_buf_len, args.ucDataOutLen);
++			return false;
++		}
++		{
++			int len = min(read_buf_len, args.ucDataOutLen);
++			memcpy(read_byte, base + 16, len);
++		}
++	}
++	return true;
++}
++
++bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address,
++				uint8_t send_bytes, uint8_t *send)
++{
++	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
++	u8 msg[20];
++	u8 msg_len, dp_msg_len;
++	bool ret;
++
++	dp_msg_len = 4;
++	msg[0] = address;
++	msg[1] = address >> 8;
++	msg[2] = AUX_NATIVE_WRITE << 4;
++	dp_msg_len += send_bytes;
++	msg[3] = (dp_msg_len << 4) | (send_bytes - 1);
++
++	if (send_bytes > 16)
++		return false;
++
++	memcpy(&msg[4], send, send_bytes);
++	msg_len = 4 + send_bytes;
++	ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0);
++	return ret;
++}
++
++bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address,
++			       uint8_t delay, uint8_t expected_bytes,
++			       uint8_t *read_p)
++{
++	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
++	u8 msg[20];
++	u8 msg_len, dp_msg_len;
++	bool ret = false;
++	msg_len = 4;
++	dp_msg_len = 4;
++	msg[0] = address;
++	msg[1] = address >> 8;
++	msg[2] = AUX_NATIVE_READ << 4;
++	msg[3] = (dp_msg_len) << 4;
++	msg[3] |= expected_bytes - 1;
++
++	ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay);
++	return ret;
++}
++
++/* radeon dp functions */
++static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock,
++				    uint8_t ucconfig, uint8_t lane_num)
++{
++	DP_ENCODER_SERVICE_PARAMETERS args;
++	int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
++
++	memset(&args, 0, sizeof(args));
++	args.ucLinkClock = dp_clock / 10;
++	args.ucConfig = ucconfig;
++	args.ucAction = action;
++	args.ucLaneNum = lane_num;
++	args.ucStatus = 0;
++
++	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
++	return args.ucStatus;
++}
++
++u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
++{
++	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
++	struct drm_device *dev = radeon_connector->base.dev;
++	struct radeon_device *rdev = dev->dev_private;
++
++	return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
++					 dig_connector->dp_i2c_bus->rec.i2c_id, 0);
++}
++
++bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
++{
++	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
++	u8 msg[25];
++	int ret;
++
++	ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg);
++	if (ret) {
++		memcpy(dig_connector->dpcd, msg, 8);
++		{
++			int i;
++			DRM_DEBUG("DPCD: ");
++			for (i = 0; i < 8; i++)
++				DRM_DEBUG("%02x ", msg[i]);
++			DRM_DEBUG("\n");
++		}
++		return true;
++	}
++	dig_connector->dpcd[0] = 0;
++	return false;
++}
++
++void radeon_dp_set_link_config(struct drm_connector *connector,
++			       struct drm_display_mode *mode)
++{
++	struct radeon_connector *radeon_connector;
++	struct radeon_connector_atom_dig *dig_connector;
++
++	if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
++	    (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
++		return;
++
++	radeon_connector = to_radeon_connector(connector);
++	if (!radeon_connector->con_priv)
++		return;
++	dig_connector = radeon_connector->con_priv;
++
++	dig_connector->dp_clock =
++		dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock);
++	dig_connector->dp_lane_count =
++		dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock);
++}
++
++int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
++				struct drm_display_mode *mode)
++{
++	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
++
++	return dp_mode_valid(dig_connector->dpcd, mode->clock);
++}
++
++static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
++				    u8 link_status[DP_LINK_STATUS_SIZE])
++{
++	int ret;
++	ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100,
++					DP_LINK_STATUS_SIZE, link_status);
++	if (!ret) {
++		DRM_ERROR("displayport link status failed\n");
++		return false;
++	}
++
++	DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n",
++		  link_status[0], link_status[1], link_status[2],
++		  link_status[3], link_status[4], link_status[5]);
++	return true;
++}
++
++bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
++{
++	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
++	u8 link_status[DP_LINK_STATUS_SIZE];
++
++	if (!atom_dp_get_link_status(radeon_connector, link_status))
++		return false;
++	if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count))
++		return false;
++	return true;
++}
++
++static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state)
++{
++	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
++
++	if (dig_connector->dpcd[0] >= 0x11) {
++		radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1,
++					   &power_state);
++	}
++}
++
++static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread)
++{
++	radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1,
++				   &downspread);
++}
++
++static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector,
++				 u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
++{
++	radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2,
++				   link_configuration);
++}
++
++static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector,
++				struct drm_encoder *encoder,
++				u8 train_set[4])
++{
++	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
++	int i;
++
++	for (i = 0; i < dig_connector->dp_lane_count; i++)
++		atombios_dig_transmitter_setup(encoder,
++					       ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
++					       i, train_set[i]);
++
++	radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET,
++				   dig_connector->dp_lane_count, train_set);
++}
++
++static void dp_set_training(struct radeon_connector *radeon_connector,
++			    u8 training)
++{
++	radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
++				   1, &training);
++}
++
++void dp_link_train(struct drm_encoder *encoder,
++		   struct drm_connector *connector)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	struct radeon_encoder_atom_dig *dig;
++	struct radeon_connector *radeon_connector;
++	struct radeon_connector_atom_dig *dig_connector;
++	int enc_id = 0;
++	bool clock_recovery, channel_eq;
++	u8 link_status[DP_LINK_STATUS_SIZE];
++	u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
++	u8 tries, voltage;
++	u8 train_set[4];
++	int i;
++
++	if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
++	    (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
++		return;
++
++	if (!radeon_encoder->enc_priv)
++		return;
++	dig = radeon_encoder->enc_priv;
++
++	radeon_connector = to_radeon_connector(connector);
++	if (!radeon_connector->con_priv)
++		return;
++	dig_connector = radeon_connector->con_priv;
++
++	if (dig->dig_encoder)
++		enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
++	else
++		enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
++	if (dig_connector->linkb)
++		enc_id |= ATOM_DP_CONFIG_LINK_B;
++	else
++		enc_id |= ATOM_DP_CONFIG_LINK_A;
++
++	memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
++	if (dig_connector->dp_clock == 270000)
++		link_configuration[0] = DP_LINK_BW_2_7;
++	else
++		link_configuration[0] = DP_LINK_BW_1_62;
++	link_configuration[1] = dig_connector->dp_lane_count;
++	if (dig_connector->dpcd[0] >= 0x11)
++		link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
++
++	/* power up the sink */
++	dp_set_power(radeon_connector, DP_SET_POWER_D0);
++	/* disable the training pattern on the sink */
++	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
++	/* set link bw and lanes on the sink */
++	dp_set_link_bw_lanes(radeon_connector, link_configuration);
++	/* disable downspread on the sink */
++	dp_set_downspread(radeon_connector, 0);
++	/* start training on the source */
++	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
++				  dig_connector->dp_clock, enc_id, 0);
++	/* set training pattern 1 on the source */
++	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
++				  dig_connector->dp_clock, enc_id, 0);
++
++	/* set initial vs/emph */
++	memset(train_set, 0, 4);
++	udelay(400);
++	/* set training pattern 1 on the sink */
++	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);
++
++	dp_update_dpvs_emph(radeon_connector, encoder, train_set);
++
++	/* clock recovery loop */
++	clock_recovery = false;
++	tries = 0;
++	voltage = 0xff;
++	for (;;) {
++		udelay(100);
++		if (!atom_dp_get_link_status(radeon_connector, link_status))
++			break;
++
++		if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
++			clock_recovery = true;
++			break;
++		}
++
++		for (i = 0; i < dig_connector->dp_lane_count; i++) {
++			if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
++				break;
++		}
++		if (i == dig_connector->dp_lane_count) {
++			DRM_ERROR("clock recovery reached max voltage\n");
++			break;
++		}
++
++		if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
++			++tries;
++			if (tries == 5) {
++				DRM_ERROR("clock recovery tried 5 times\n");
++				break;
++			}
++		} else
++			tries = 0;
++
++		voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
++
++		/* Compute new train_set as requested by sink */
++		dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
++		dp_update_dpvs_emph(radeon_connector, encoder, train_set);
++	}
++	if (!clock_recovery)
++		DRM_ERROR("clock recovery failed\n");
++	else
++		DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
++			  train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
++			  (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
++			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
++
++
++	/* set training pattern 2 on the sink */
++	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
++	/* set training pattern 2 on the source */
++	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
++				  dig_connector->dp_clock, enc_id, 1);
++
++	/* channel equalization loop */
++	tries = 0;
++	channel_eq = false;
++	for (;;) {
++		udelay(400);
++		if (!atom_dp_get_link_status(radeon_connector, link_status))
++			break;
++
++		if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
++			channel_eq = true;
++			break;
++		}
++
++		/* Try 5 times */
++		if (tries > 5) {
++			DRM_ERROR("channel eq failed: 5 tries\n");
++			break;
++		}
++
++		/* Compute new train_set as requested by sink */
++		dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
++		dp_update_dpvs_emph(radeon_connector, encoder, train_set);
++
++		tries++;
++	}
++
++	if (!channel_eq)
++		DRM_ERROR("channel eq failed\n");
++	else
++		DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
++			  train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
++			  (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
++			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
++
++	/* disable the training pattern on the sink */
++	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
++
++	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
++				  dig_connector->dp_clock, enc_id, 0);
++}
++
++int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
++			 uint8_t write_byte, uint8_t *read_byte)
++{
++	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
++	struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
++	int ret = 0;
++	uint16_t address = algo_data->address;
++	uint8_t msg[5];
++	uint8_t reply[2];
++	int msg_len, dp_msg_len;
++	int reply_bytes;
++
++	/* Set up the command byte */
++	if (mode & MODE_I2C_READ)
++		msg[2] = AUX_I2C_READ << 4;
++	else
++		msg[2] = AUX_I2C_WRITE << 4;
++
++	if (!(mode & MODE_I2C_STOP))
++		msg[2] |= AUX_I2C_MOT << 4;
++
++	msg[0] = address;
++	msg[1] = address >> 8;
++
++	reply_bytes = 1;
++
++	msg_len = 4;
++	dp_msg_len = 3;
++	switch (mode) {
++	case MODE_I2C_WRITE:
++		msg[4] = write_byte;
++		msg_len++;
++		dp_msg_len += 2;
++		break;
++	case MODE_I2C_READ:
++		dp_msg_len += 1;
++		break;
++	default:
++		break;
++	}
++
++	msg[3] = (dp_msg_len) << 4;
++	ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0);
++
++	if (ret) {
++		if (read_byte)
++			*read_byte = reply[0];
++		return reply_bytes;
++	}
++	return -EREMOTEIO;
++}
++
+diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
+index 0d79577..607241c 100644
+--- a/drivers/gpu/drm/radeon/mkregtable.c
++++ b/drivers/gpu/drm/radeon/mkregtable.c
+@@ -661,8 +661,10 @@ static int parser_auth(struct table *t, const char *filename)
+ 	fseek(file, 0, SEEK_SET);
+ 
+ 	/* get header */
+-	if (fgets(buf, 1024, file) == NULL)
++	if (fgets(buf, 1024, file) == NULL) {
++		fclose(file);
+ 		return -1;
++	}
+ 
+ 	/* first line will contain the last register
+ 	 * and gpu name */
+diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
+index c9e93ea..c0d4650 100644
+--- a/drivers/gpu/drm/radeon/r100.c
++++ b/drivers/gpu/drm/radeon/r100.c
+@@ -65,6 +65,96 @@ MODULE_FIRMWARE(FIRMWARE_R520);
+  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+  */
+ 
++/* hpd for digital panel detect/disconnect */
++bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
++{
++	bool connected = false;
++
++	switch (hpd) {
++	case RADEON_HPD_1:
++		if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
++			connected = true;
++		break;
++	case RADEON_HPD_2:
++		if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
++			connected = true;
++		break;
++	default:
++		break;
++	}
++	return connected;
++}
++
++void r100_hpd_set_polarity(struct radeon_device *rdev,
++			   enum radeon_hpd_id hpd)
++{
++	u32 tmp;
++	bool connected = r100_hpd_sense(rdev, hpd);
++
++	switch (hpd) {
++	case RADEON_HPD_1:
++		tmp = RREG32(RADEON_FP_GEN_CNTL);
++		if (connected)
++			tmp &= ~RADEON_FP_DETECT_INT_POL;
++		else
++			tmp |= RADEON_FP_DETECT_INT_POL;
++		WREG32(RADEON_FP_GEN_CNTL, tmp);
++		break;
++	case RADEON_HPD_2:
++		tmp = RREG32(RADEON_FP2_GEN_CNTL);
++		if (connected)
++			tmp &= ~RADEON_FP2_DETECT_INT_POL;
++		else
++			tmp |= RADEON_FP2_DETECT_INT_POL;
++		WREG32(RADEON_FP2_GEN_CNTL, tmp);
++		break;
++	default:
++		break;
++	}
++}
++
++void r100_hpd_init(struct radeon_device *rdev)
++{
++	struct drm_device *dev = rdev->ddev;
++	struct drm_connector *connector;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++		switch (radeon_connector->hpd.hpd) {
++		case RADEON_HPD_1:
++			rdev->irq.hpd[0] = true;
++			break;
++		case RADEON_HPD_2:
++			rdev->irq.hpd[1] = true;
++			break;
++		default:
++			break;
++		}
++	}
++	if (rdev->irq.installed)
++		r100_irq_set(rdev);
++}
++
++void r100_hpd_fini(struct radeon_device *rdev)
++{
++	struct drm_device *dev = rdev->ddev;
++	struct drm_connector *connector;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++		switch (radeon_connector->hpd.hpd) {
++		case RADEON_HPD_1:
++			rdev->irq.hpd[0] = false;
++			break;
++		case RADEON_HPD_2:
++			rdev->irq.hpd[1] = false;
++			break;
++		default:
++			break;
++		}
++	}
++}
++
+ /*
+  * PCI GART
+  */
+@@ -94,6 +184,15 @@ int r100_pci_gart_init(struct radeon_device *rdev)
+ 	return radeon_gart_table_ram_alloc(rdev);
+ }
+ 
++/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
++void r100_enable_bm(struct radeon_device *rdev)
++{
++	uint32_t tmp;
++	/* Enable bus mastering */
++	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
++	WREG32(RADEON_BUS_CNTL, tmp);
++}
++
+ int r100_pci_gart_enable(struct radeon_device *rdev)
+ {
+ 	uint32_t tmp;
+@@ -105,9 +204,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
+ 	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
+ 	tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ 	WREG32(RADEON_AIC_HI_ADDR, tmp);
+-	/* Enable bus mastering */
+-	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+-	WREG32(RADEON_BUS_CNTL, tmp);
+ 	/* set PCI GART page-table base address */
+ 	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
+ 	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
+@@ -148,6 +244,11 @@ int r100_irq_set(struct radeon_device *rdev)
+ {
+ 	uint32_t tmp = 0;
+ 
++	if (!rdev->irq.installed) {
++		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
++		WREG32(R_000040_GEN_INT_CNTL, 0);
++		return -EINVAL;
++	}
+ 	if (rdev->irq.sw_int) {
+ 		tmp |= RADEON_SW_INT_ENABLE;
+ 	}
+@@ -157,6 +258,12 @@ int r100_irq_set(struct radeon_device *rdev)
+ 	if (rdev->irq.crtc_vblank_int[1]) {
+ 		tmp |= RADEON_CRTC2_VBLANK_MASK;
+ 	}
++	if (rdev->irq.hpd[0]) {
++		tmp |= RADEON_FP_DETECT_MASK;
++	}
++	if (rdev->irq.hpd[1]) {
++		tmp |= RADEON_FP2_DETECT_MASK;
++	}
+ 	WREG32(RADEON_GEN_INT_CNTL, tmp);
+ 	return 0;
+ }
+@@ -175,8 +282,9 @@ void r100_irq_disable(struct radeon_device *rdev)
+ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
+ {
+ 	uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
+-	uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
+-		RADEON_CRTC2_VBLANK_STAT;
++	uint32_t irq_mask = RADEON_SW_INT_TEST |
++		RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
++		RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
+ 
+ 	if (irqs) {
+ 		WREG32(RADEON_GEN_INT_STATUS, irqs);
+@@ -187,6 +295,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
+ int r100_irq_process(struct radeon_device *rdev)
+ {
+ 	uint32_t status, msi_rearm;
++	bool queue_hotplug = false;
+ 
+ 	status = r100_irq_ack(rdev);
+ 	if (!status) {
+@@ -207,8 +316,18 @@ int r100_irq_process(struct radeon_device *rdev)
+ 		if (status & RADEON_CRTC2_VBLANK_STAT) {
+ 			drm_handle_vblank(rdev->ddev, 1);
+ 		}
++		if (status & RADEON_FP_DETECT_STAT) {
++			queue_hotplug = true;
++			DRM_DEBUG("HPD1\n");
++		}
++		if (status & RADEON_FP2_DETECT_STAT) {
++			queue_hotplug = true;
++			DRM_DEBUG("HPD2\n");
++		}
+ 		status = r100_irq_ack(rdev);
+ 	}
++	if (queue_hotplug)
++		queue_work(rdev->wq, &rdev->hotplug_work);
+ 	if (rdev->msi_enabled) {
+ 		switch (rdev->family) {
+ 		case CHIP_RS400:
+@@ -235,14 +354,25 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
+ 		return RREG32(RADEON_CRTC2_CRNT_FRAME);
+ }
+ 
++/* Who ever call radeon_fence_emit should call ring_lock and ask
++ * for enough space (today caller are ib schedule and buffer move) */
+ void r100_fence_ring_emit(struct radeon_device *rdev,
+ 			  struct radeon_fence *fence)
+ {
+-	/* Who ever call radeon_fence_emit should call ring_lock and ask
+-	 * for enough space (today caller are ib schedule and buffer move) */
++	/* We have to make sure that caches are flushed before
++	 * CPU might read something from VRAM. */
++	radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
++	radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
++	radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
++	radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
+ 	/* Wait until IDLE & CLEAN */
+ 	radeon_ring_write(rdev, PACKET0(0x1720, 0));
+ 	radeon_ring_write(rdev, (1 << 16) | (1 << 17));
++	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
++	radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
++				RADEON_HDP_READ_BUFFER_INVALIDATE);
++	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
++	radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
+ 	/* Emit fence sequence & fire IRQ */
+ 	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
+ 	radeon_ring_write(rdev, fence->seq);
+@@ -255,24 +385,27 @@ int r100_wb_init(struct radeon_device *rdev)
+ 	int r;
+ 
+ 	if (rdev->wb.wb_obj == NULL) {
+-		r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
+-					 true,
+-					 RADEON_GEM_DOMAIN_GTT,
+-					 false, &rdev->wb.wb_obj);
++		r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
++					RADEON_GEM_DOMAIN_GTT,
++					&rdev->wb.wb_obj);
+ 		if (r) {
+-			DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
++			dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
+ 			return r;
+ 		}
+-		r = radeon_object_pin(rdev->wb.wb_obj,
+-				      RADEON_GEM_DOMAIN_GTT,
+-				      &rdev->wb.gpu_addr);
++		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
++		if (unlikely(r != 0))
++			return r;
++		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
++					&rdev->wb.gpu_addr);
+ 		if (r) {
+-			DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
++			dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
++			radeon_bo_unreserve(rdev->wb.wb_obj);
+ 			return r;
+ 		}
+-		r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
++		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
++		radeon_bo_unreserve(rdev->wb.wb_obj);
+ 		if (r) {
+-			DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
++			dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
+ 			return r;
+ 		}
+ 	}
+@@ -290,11 +423,19 @@ void r100_wb_disable(struct radeon_device *rdev)
+ 
+ void r100_wb_fini(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	r100_wb_disable(rdev);
+ 	if (rdev->wb.wb_obj) {
+-		radeon_object_kunmap(rdev->wb.wb_obj);
+-		radeon_object_unpin(rdev->wb.wb_obj);
+-		radeon_object_unref(&rdev->wb.wb_obj);
++		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
++		if (unlikely(r != 0)) {
++			dev_err(rdev->dev, "(%d) can't finish WB\n", r);
++			return;
++		}
++		radeon_bo_kunmap(rdev->wb.wb_obj);
++		radeon_bo_unpin(rdev->wb.wb_obj);
++		radeon_bo_unreserve(rdev->wb.wb_obj);
++		radeon_bo_unref(&rdev->wb.wb_obj);
+ 		rdev->wb.wb = NULL;
+ 		rdev->wb.wb_obj = NULL;
+ 	}
+@@ -1250,7 +1391,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
+ 		case RADEON_TXFORMAT_ARGB4444:
+ 		case RADEON_TXFORMAT_VYUY422:
+ 		case RADEON_TXFORMAT_YVYU422:
+-		case RADEON_TXFORMAT_DXT1:
+ 		case RADEON_TXFORMAT_SHADOW16:
+ 		case RADEON_TXFORMAT_LDUDV655:
+ 		case RADEON_TXFORMAT_DUDV88:
+@@ -1258,12 +1398,19 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
+ 			break;
+ 		case RADEON_TXFORMAT_ARGB8888:
+ 		case RADEON_TXFORMAT_RGBA8888:
+-		case RADEON_TXFORMAT_DXT23:
+-		case RADEON_TXFORMAT_DXT45:
+ 		case RADEON_TXFORMAT_SHADOW32:
+ 		case RADEON_TXFORMAT_LDUDUV8888:
+ 			track->textures[i].cpp = 4;
+ 			break;
++		case RADEON_TXFORMAT_DXT1:
++			track->textures[i].cpp = 1;
++			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
++			break;
++		case RADEON_TXFORMAT_DXT23:
++		case RADEON_TXFORMAT_DXT45:
++			track->textures[i].cpp = 1;
++			track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
++			break;
+ 		}
+ 		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+ 		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+@@ -1288,17 +1435,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
+ 
+ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+ 					 struct radeon_cs_packet *pkt,
+-					 struct radeon_object *robj)
++					 struct radeon_bo *robj)
+ {
+ 	unsigned idx;
+ 	u32 value;
+ 	idx = pkt->idx + 1;
+ 	value = radeon_get_ib_value(p, idx + 2);
+-	if ((value + 1) > radeon_object_size(robj)) {
++	if ((value + 1) > radeon_bo_size(robj)) {
+ 		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
+ 			  "(need %u have %lu) !\n",
+ 			  value + 1,
+-			  radeon_object_size(robj));
++			  radeon_bo_size(robj));
+ 		return -EINVAL;
+ 	}
+ 	return 0;
+@@ -1363,6 +1510,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
+ 			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ 			return -EINVAL;
+ 		}
++		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
+ 		track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
+ 		track->immd_dwords = pkt->count - 1;
+ 		r = r100_cs_track_check(p->rdev, track);
+@@ -1650,6 +1798,17 @@ int r100_gpu_reset(struct radeon_device *rdev)
+ 	return 0;
+ }
+ 
++void r100_set_common_regs(struct radeon_device *rdev)
++{
++	/* set these so they don't interfere with anything */
++	WREG32(RADEON_OV0_SCALE_CNTL, 0);
++	WREG32(RADEON_SUBPIC_CNTL, 0);
++	WREG32(RADEON_VIPH_CONTROL, 0);
++	WREG32(RADEON_I2C_CNTL_1, 0);
++	WREG32(RADEON_DVI_I2C_CNTL_1, 0);
++	WREG32(RADEON_CAP0_TRIG_CNTL, 0);
++	WREG32(RADEON_CAP1_TRIG_CNTL, 0);
++}
+ 
+ /*
+  * VRAM info
+@@ -2588,13 +2747,14 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
+ 	DRM_ERROR("coordinate type            %d\n", t->tex_coord_type);
+ 	DRM_ERROR("width round to power of 2  %d\n", t->roundup_w);
+ 	DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
++	DRM_ERROR("compress format            %d\n", t->compress_format);
+ }
+ 
+ static int r100_cs_track_cube(struct radeon_device *rdev,
+ 			      struct r100_cs_track *track, unsigned idx)
+ {
+ 	unsigned face, w, h;
+-	struct radeon_object *cube_robj;
++	struct radeon_bo *cube_robj;
+ 	unsigned long size;
+ 
+ 	for (face = 0; face < 5; face++) {
+@@ -2607,9 +2767,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
+ 
+ 		size += track->textures[idx].cube_info[face].offset;
+ 
+-		if (size > radeon_object_size(cube_robj)) {
++		if (size > radeon_bo_size(cube_robj)) {
+ 			DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
+-				  size, radeon_object_size(cube_robj));
++				  size, radeon_bo_size(cube_robj));
+ 			r100_cs_track_texture_print(&track->textures[idx]);
+ 			return -1;
+ 		}
+@@ -2617,10 +2777,40 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
+ 	return 0;
+ }
+ 
++static int r100_track_compress_size(int compress_format, int w, int h)
++{
++	int block_width, block_height, block_bytes;
++	int wblocks, hblocks;
++	int min_wblocks;
++	int sz;
++
++	block_width = 4;
++	block_height = 4;
++
++	switch (compress_format) {
++	case R100_TRACK_COMP_DXT1:
++		block_bytes = 8;
++		min_wblocks = 4;
++		break;
++	default:
++	case R100_TRACK_COMP_DXT35:
++		block_bytes = 16;
++		min_wblocks = 2;
++		break;
++	}
++
++	hblocks = (h + block_height - 1) / block_height;
++	wblocks = (w + block_width - 1) / block_width;
++	if (wblocks < min_wblocks)
++		wblocks = min_wblocks;
++	sz = wblocks * hblocks * block_bytes;
++	return sz;
++}
++
+ static int r100_cs_track_texture_check(struct radeon_device *rdev,
+ 				       struct r100_cs_track *track)
+ {
+-	struct radeon_object *robj;
++	struct radeon_bo *robj;
+ 	unsigned long size;
+ 	unsigned u, i, w, h;
+ 	int ret;
+@@ -2654,9 +2844,15 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
+ 			h = h / (1 << i);
+ 			if (track->textures[u].roundup_h)
+ 				h = roundup_pow_of_two(h);
+-			size += w * h;
++			if (track->textures[u].compress_format) {
++
++				size += r100_track_compress_size(track->textures[u].compress_format, w, h);
++				/* compressed textures are block based */
++			} else
++				size += w * h;
+ 		}
+ 		size *= track->textures[u].cpp;
++
+ 		switch (track->textures[u].tex_coord_type) {
+ 		case 0:
+ 			break;
+@@ -2676,9 +2872,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
+ 				  "%u\n", track->textures[u].tex_coord_type, u);
+ 			return -EINVAL;
+ 		}
+-		if (size > radeon_object_size(robj)) {
++		if (size > radeon_bo_size(robj)) {
+ 			DRM_ERROR("Texture of unit %u needs %lu bytes but is "
+-				  "%lu\n", u, size, radeon_object_size(robj));
++				  "%lu\n", u, size, radeon_bo_size(robj));
+ 			r100_cs_track_texture_print(&track->textures[u]);
+ 			return -EINVAL;
+ 		}
+@@ -2695,15 +2891,19 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+ 
+ 	for (i = 0; i < track->num_cb; i++) {
+ 		if (track->cb[i].robj == NULL) {
++			if (!(track->fastfill || track->color_channel_mask ||
++			      track->blend_read_enable)) {
++				continue;
++			}
+ 			DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
+ 			return -EINVAL;
+ 		}
+ 		size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
+ 		size += track->cb[i].offset;
+-		if (size > radeon_object_size(track->cb[i].robj)) {
++		if (size > radeon_bo_size(track->cb[i].robj)) {
+ 			DRM_ERROR("[drm] Buffer too small for color buffer %d "
+ 				  "(need %lu have %lu) !\n", i, size,
+-				  radeon_object_size(track->cb[i].robj));
++				  radeon_bo_size(track->cb[i].robj));
+ 			DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
+ 				  i, track->cb[i].pitch, track->cb[i].cpp,
+ 				  track->cb[i].offset, track->maxy);
+@@ -2717,10 +2917,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+ 		}
+ 		size = track->zb.pitch * track->zb.cpp * track->maxy;
+ 		size += track->zb.offset;
+-		if (size > radeon_object_size(track->zb.robj)) {
++		if (size > radeon_bo_size(track->zb.robj)) {
+ 			DRM_ERROR("[drm] Buffer too small for z buffer "
+ 				  "(need %lu have %lu) !\n", size,
+-				  radeon_object_size(track->zb.robj));
++				  radeon_bo_size(track->zb.robj));
+ 			DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
+ 				  track->zb.pitch, track->zb.cpp,
+ 				  track->zb.offset, track->maxy);
+@@ -2738,11 +2938,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+ 					  "bound\n", prim_walk, i);
+ 				return -EINVAL;
+ 			}
+-			if (size > radeon_object_size(track->arrays[i].robj)) {
+-				DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
+-					   "have %lu dwords\n", prim_walk, i,
+-					   size >> 2,
+-					   radeon_object_size(track->arrays[i].robj) >> 2);
++			if (size > radeon_bo_size(track->arrays[i].robj)) {
++				dev_err(rdev->dev, "(PW %u) Vertex array %u "
++					"need %lu dwords have %lu dwords\n",
++					prim_walk, i, size >> 2,
++					radeon_bo_size(track->arrays[i].robj)
++					>> 2);
+ 				DRM_ERROR("Max indices %u\n", track->max_indx);
+ 				return -EINVAL;
+ 			}
+@@ -2756,10 +2957,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
+ 					  "bound\n", prim_walk, i);
+ 				return -EINVAL;
+ 			}
+-			if (size > radeon_object_size(track->arrays[i].robj)) {
+-				DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
+-					   "have %lu dwords\n", prim_walk, i, size >> 2,
+-					   radeon_object_size(track->arrays[i].robj) >> 2);
++			if (size > radeon_bo_size(track->arrays[i].robj)) {
++				dev_err(rdev->dev, "(PW %u) Vertex array %u "
++					"need %lu dwords have %lu dwords\n",
++					prim_walk, i, size >> 2,
++					radeon_bo_size(track->arrays[i].robj)
++					>> 2);
+ 				return -EINVAL;
+ 			}
+ 		}
+@@ -2821,6 +3024,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
+ 		track->arrays[i].esize = 0x7F;
+ 	}
+ 	for (i = 0; i < track->num_texture; i++) {
++		track->textures[i].compress_format = R100_TRACK_COMP_NONE;
+ 		track->textures[i].pitch = 16536;
+ 		track->textures[i].width = 16536;
+ 		track->textures[i].height = 16536;
+@@ -3101,6 +3305,9 @@ static int r100_startup(struct radeon_device *rdev)
+ {
+ 	int r;
+ 
++	/* set common regs */
++	r100_set_common_regs(rdev);
++	/* program mc */
+ 	r100_mc_program(rdev);
+ 	/* Resume clock */
+ 	r100_clock_startup(rdev);
+@@ -3108,14 +3315,15 @@ static int r100_startup(struct radeon_device *rdev)
+ 	r100_gpu_init(rdev);
+ 	/* Initialize GART (initialize after TTM so we can allocate
+ 	 * memory through TTM but finalize after TTM) */
++	r100_enable_bm(rdev);
+ 	if (rdev->flags & RADEON_IS_PCI) {
+ 		r = r100_pci_gart_enable(rdev);
+ 		if (r)
+ 			return r;
+ 	}
+ 	/* Enable IRQ */
+-	rdev->irq.sw_int = true;
+ 	r100_irq_set(rdev);
++	rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ 	/* 1M ring buffer */
+ 	r = r100_cp_init(rdev, 1024 * 1024);
+ 	if (r) {
+@@ -3150,6 +3358,8 @@ int r100_resume(struct radeon_device *rdev)
+ 	radeon_combios_asic_init(rdev->ddev);
+ 	/* Resume clock after posting */
+ 	r100_clock_startup(rdev);
++	/* Initialize surface registers */
++	radeon_surface_init(rdev);
+ 	return r100_startup(rdev);
+ }
+ 
+@@ -3165,16 +3375,16 @@ int r100_suspend(struct radeon_device *rdev)
+ 
+ void r100_fini(struct radeon_device *rdev)
+ {
+-	r100_suspend(rdev);
+ 	r100_cp_fini(rdev);
+ 	r100_wb_fini(rdev);
+ 	r100_ib_fini(rdev);
+ 	radeon_gem_fini(rdev);
+ 	if (rdev->flags & RADEON_IS_PCI)
+ 		r100_pci_gart_fini(rdev);
++	radeon_agp_fini(rdev);
+ 	radeon_irq_kms_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+-	radeon_object_fini(rdev);
++	radeon_bo_fini(rdev);
+ 	radeon_atombios_fini(rdev);
+ 	kfree(rdev->bios);
+ 	rdev->bios = NULL;
+@@ -3195,9 +3405,7 @@ int r100_mc_init(struct radeon_device *rdev)
+ 	if (rdev->flags & RADEON_IS_AGP) {
+ 		r = radeon_agp_init(rdev);
+ 		if (r) {
+-			printk(KERN_WARNING "[drm] Disabling AGP\n");
+-			rdev->flags &= ~RADEON_IS_AGP;
+-			rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
++			radeon_agp_disable(rdev);
+ 		} else {
+ 			rdev->mc.gtt_location = rdev->mc.agp_base;
+ 		}
+@@ -3242,14 +3450,14 @@ int r100_init(struct radeon_device *rdev)
+ 			RREG32(R_0007C0_CP_STAT));
+ 	}
+ 	/* check if cards are posted or not */
+-	if (!radeon_card_posted(rdev) && rdev->bios) {
+-		DRM_INFO("GPU not posted. posting now...\n");
+-		radeon_combios_asic_init(rdev->ddev);
+-	}
++	if (radeon_boot_test_post_card(rdev) == false)
++		return -EINVAL;
+ 	/* Set asic errata */
+ 	r100_errata(rdev);
+ 	/* Initialize clocks */
+ 	radeon_get_clock_info(rdev->ddev);
++	/* Initialize power management */
++	radeon_pm_init(rdev);
+ 	/* Get vram informations */
+ 	r100_vram_info(rdev);
+ 	/* Initialize memory controller (also test AGP) */
+@@ -3264,7 +3472,7 @@ int r100_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	/* Memory manager */
+-	r = radeon_object_init(rdev);
++	r = radeon_bo_init(rdev);
+ 	if (r)
+ 		return r;
+ 	if (rdev->flags & RADEON_IS_PCI) {
+@@ -3278,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+ 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+-		r100_suspend(rdev);
+ 		r100_cp_fini(rdev);
+ 		r100_wb_fini(rdev);
+ 		r100_ib_fini(rdev);
++		radeon_irq_kms_fini(rdev);
+ 		if (rdev->flags & RADEON_IS_PCI)
+ 			r100_pci_gart_fini(rdev);
+-		radeon_irq_kms_fini(rdev);
+ 		rdev->accel_working = false;
+ 	}
+ 	return 0;
+diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
+index 0daf0d7..b27a699 100644
+--- a/drivers/gpu/drm/radeon/r100_track.h
++++ b/drivers/gpu/drm/radeon/r100_track.h
+@@ -10,26 +10,30 @@
+  * CS functions
+  */
+ struct r100_cs_track_cb {
+-	struct radeon_object	*robj;
++	struct radeon_bo	*robj;
+ 	unsigned		pitch;
+ 	unsigned		cpp;
+ 	unsigned		offset;
+ };
+ 
+ struct r100_cs_track_array {
+-	struct radeon_object	*robj;
++	struct radeon_bo	*robj;
+ 	unsigned		esize;
+ };
+ 
+ struct r100_cs_cube_info {
+-	struct radeon_object	*robj;
+-	unsigned                offset;
++	struct radeon_bo	*robj;
++	unsigned		offset;
+ 	unsigned		width;
+ 	unsigned		height;
+ };
+ 
++#define R100_TRACK_COMP_NONE   0
++#define R100_TRACK_COMP_DXT1   1
++#define R100_TRACK_COMP_DXT35  2
++
+ struct r100_cs_track_texture {
+-	struct radeon_object	*robj;
++	struct radeon_bo	*robj;
+ 	struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
+ 	unsigned		pitch;
+ 	unsigned		width;
+@@ -44,6 +48,7 @@ struct r100_cs_track_texture {
+ 	bool			enabled;
+ 	bool			roundup_w;
+ 	bool			roundup_h;
++	unsigned                compress_format;
+ };
+ 
+ struct r100_cs_track_limits {
+@@ -62,13 +67,15 @@ struct r100_cs_track {
+ 	unsigned			immd_dwords;
+ 	unsigned			num_arrays;
+ 	unsigned			max_indx;
++	unsigned			color_channel_mask;
+ 	struct r100_cs_track_array	arrays[11];
+ 	struct r100_cs_track_cb 	cb[R300_MAX_CB];
+ 	struct r100_cs_track_cb 	zb;
+ 	struct r100_cs_track_texture	textures[R300_TRACK_MAX_TEXTURE];
+ 	bool				z_enabled;
+ 	bool                            separate_cube;
+-
++	bool				fastfill;
++	bool				blend_read_enable;
+ };
+ 
+ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
+diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
+index eb740fc..ff1e0cd 100644
+--- a/drivers/gpu/drm/radeon/r200.c
++++ b/drivers/gpu/drm/radeon/r200.c
+@@ -371,13 +371,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
+ 		case 5:
+ 		case 6:
+ 		case 7:
++			/* 1D/2D */
+ 			track->textures[i].tex_coord_type = 0;
+ 			break;
+ 		case 1:
+-			track->textures[i].tex_coord_type = 1;
++			/* CUBE */
++			track->textures[i].tex_coord_type = 2;
+ 			break;
+ 		case 2:
+-			track->textures[i].tex_coord_type = 2;
++			/* 3D */
++			track->textures[i].tex_coord_type = 1;
+ 			break;
+ 		}
+ 		break;
+@@ -401,7 +404,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
+ 		case R200_TXFORMAT_Y8:
+ 			track->textures[i].cpp = 1;
+ 			break;
+-		case R200_TXFORMAT_DXT1:
+ 		case R200_TXFORMAT_AI88:
+ 		case R200_TXFORMAT_ARGB1555:
+ 		case R200_TXFORMAT_RGB565:
+@@ -418,9 +420,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
+ 		case R200_TXFORMAT_ABGR8888:
+ 		case R200_TXFORMAT_BGR111110:
+ 		case R200_TXFORMAT_LDVDU8888:
++			track->textures[i].cpp = 4;
++			break;
++		case R200_TXFORMAT_DXT1:
++			track->textures[i].cpp = 1;
++			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
++			break;
+ 		case R200_TXFORMAT_DXT23:
+ 		case R200_TXFORMAT_DXT45:
+-			track->textures[i].cpp = 4;
++			track->textures[i].cpp = 1;
++			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
+ 			break;
+ 		}
+ 		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
+diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
+index 2f43ee8..43b55a0 100644
+--- a/drivers/gpu/drm/radeon/r300.c
++++ b/drivers/gpu/drm/radeon/r300.c
+@@ -36,7 +36,15 @@
+ #include "rv350d.h"
+ #include "r300_reg_safe.h"
+ 
+-/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */
++/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
++ *
++ * GPU Errata:
++ * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
++ *   using MMIO to flush host path read cache, this lead to HARDLOCKUP.
++ *   However, scheduling such write to the ring seems harmless, i suspect
++ *   the CP read collide with the flush somehow, or maybe the MC, hard to
++ *   tell. (Jerome Glisse)
++ */
+ 
+ /*
+  * rv370,rv380 PCIE GART
+@@ -137,14 +145,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
+ 
+ void rv370_pcie_gart_disable(struct radeon_device *rdev)
+ {
+-	uint32_t tmp;
++	u32 tmp;
++	int r;
+ 
+ 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
+ 	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
+ 	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
+ 	if (rdev->gart.table.vram.robj) {
+-		radeon_object_kunmap(rdev->gart.table.vram.robj);
+-		radeon_object_unpin(rdev->gart.table.vram.robj);
++		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
++		if (likely(r == 0)) {
++			radeon_bo_kunmap(rdev->gart.table.vram.robj);
++			radeon_bo_unpin(rdev->gart.table.vram.robj);
++			radeon_bo_unreserve(rdev->gart.table.vram.robj);
++		}
+ 	}
+ }
+ 
+@@ -173,6 +186,11 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
+ 	/* Wait until IDLE & CLEAN */
+ 	radeon_ring_write(rdev, PACKET0(0x1720, 0));
+ 	radeon_ring_write(rdev, (1 << 17) | (1 << 16)  | (1 << 9));
++	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
++	radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
++				RADEON_HDP_READ_BUFFER_INVALIDATE);
++	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
++	radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
+ 	/* Emit fence sequence & fire IRQ */
+ 	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
+ 	radeon_ring_write(rdev, fence->seq);
+@@ -488,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
+ 
+ 	/* DDR for all card after R300 & IGP */
+ 	rdev->mc.vram_is_ddr = true;
++
+ 	tmp = RREG32(RADEON_MEM_CNTL);
+-	if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
+-		rdev->mc.vram_width = 128;
+-	} else {
+-		rdev->mc.vram_width = 64;
++	tmp &= R300_MEM_NUM_CHANNELS_MASK;
++	switch (tmp) {
++	case 0: rdev->mc.vram_width = 64; break;
++	case 1: rdev->mc.vram_width = 128; break;
++	case 2: rdev->mc.vram_width = 256; break;
++	default:  rdev->mc.vram_width = 128; break;
+ 	}
+ 
+ 	r100_vram_init_sizes(rdev);
+@@ -681,7 +702,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
+ 			r100_cs_dump_packet(p, pkt);
+ 			return r;
+ 		}
+-		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
++
++		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
++			tile_flags |= R300_TXO_MACRO_TILE;
++		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
++			tile_flags |= R300_TXO_MICRO_TILE;
++
++		tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
++		tmp |= tile_flags;
++		ib[idx] = tmp;
+ 		track->textures[i].robj = reloc->robj;
+ 		break;
+ 	/* Tracked registers */
+@@ -847,7 +876,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
+ 		case R300_TX_FORMAT_Z6Y5X5:
+ 		case R300_TX_FORMAT_W4Z4Y4X4:
+ 		case R300_TX_FORMAT_W1Z5Y5X5:
+-		case R300_TX_FORMAT_DXT1:
+ 		case R300_TX_FORMAT_D3DMFT_CxV8U8:
+ 		case R300_TX_FORMAT_B8G8_B8G8:
+ 		case R300_TX_FORMAT_G8R8_G8B8:
+@@ -861,8 +889,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
+ 		case 0x17:
+ 		case R300_TX_FORMAT_FL_I32:
+ 		case 0x1e:
+-		case R300_TX_FORMAT_DXT3:
+-		case R300_TX_FORMAT_DXT5:
+ 			track->textures[i].cpp = 4;
+ 			break;
+ 		case R300_TX_FORMAT_W16Z16Y16X16:
+@@ -873,6 +899,23 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
+ 		case R300_TX_FORMAT_FL_R32G32B32A32:
+ 			track->textures[i].cpp = 16;
+ 			break;
++		case R300_TX_FORMAT_DXT1:
++			track->textures[i].cpp = 1;
++			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
++			break;
++		case R300_TX_FORMAT_ATI2N:
++			if (p->rdev->family < CHIP_R420) {
++				DRM_ERROR("Invalid texture format %u\n",
++					  (idx_value & 0x1F));
++				return -EINVAL;
++			}
++			/* The same rules apply as for DXT3/5. */
++			/* Pass through. */
++		case R300_TX_FORMAT_DXT3:
++		case R300_TX_FORMAT_DXT5:
++			track->textures[i].cpp = 1;
++			track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
++			break;
+ 		default:
+ 			DRM_ERROR("Invalid texture format %u\n",
+ 				  (idx_value & 0x1F));
+@@ -932,6 +975,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
+ 			track->textures[i].width_11 = tmp;
+ 			tmp = ((idx_value >> 16) & 1) << 11;
+ 			track->textures[i].height_11 = tmp;
++
++			/* ATI1N */
++			if (idx_value & (1 << 14)) {
++				/* The same rules apply as for DXT1. */
++				track->textures[i].compress_format =
++					R100_TRACK_COMP_DXT1;
++			}
++		} else if (idx_value & (1 << 14)) {
++			DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
++			return -EINVAL;
+ 		}
+ 		break;
+ 	case 0x4480:
+@@ -973,6 +1026,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
+ 		}
+ 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+ 		break;
++	case 0x4e0c:
++		/* RB3D_COLOR_CHANNEL_MASK */
++		track->color_channel_mask = idx_value;
++		break;
++	case 0x4d1c:
++		/* ZB_BW_CNTL */
++		track->fastfill = !!(idx_value & (1 << 2));
++		break;
++	case 0x4e04:
++		/* RB3D_BLENDCNTL */
++		track->blend_read_enable = !!(idx_value & (1 << 2));
++		break;
+ 	case 0x4be8:
+ 		/* valid register only on RV530 */
+ 		if (p->rdev->family == CHIP_RV530)
+@@ -1181,6 +1246,9 @@ static int r300_startup(struct radeon_device *rdev)
+ {
+ 	int r;
+ 
++	/* set common regs */
++	r100_set_common_regs(rdev);
++	/* program mc */
+ 	r300_mc_program(rdev);
+ 	/* Resume clock */
+ 	r300_clock_startup(rdev);
+@@ -1193,14 +1261,20 @@ static int r300_startup(struct radeon_device *rdev)
+ 		if (r)
+ 			return r;
+ 	}
++
++	if (rdev->family == CHIP_R300 ||
++	    rdev->family == CHIP_R350 ||
++	    rdev->family == CHIP_RV350)
++		r100_enable_bm(rdev);
++
+ 	if (rdev->flags & RADEON_IS_PCI) {
+ 		r = r100_pci_gart_enable(rdev);
+ 		if (r)
+ 			return r;
+ 	}
+ 	/* Enable IRQ */
+-	rdev->irq.sw_int = true;
+ 	r100_irq_set(rdev);
++	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ 	/* 1M ring buffer */
+ 	r = r100_cp_init(rdev, 1024 * 1024);
+ 	if (r) {
+@@ -1237,6 +1311,8 @@ int r300_resume(struct radeon_device *rdev)
+ 	radeon_combios_asic_init(rdev->ddev);
+ 	/* Resume clock after posting */
+ 	r300_clock_startup(rdev);
++	/* Initialize surface registers */
++	radeon_surface_init(rdev);
+ 	return r300_startup(rdev);
+ }
+ 
+@@ -1254,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
+ 
+ void r300_fini(struct radeon_device *rdev)
+ {
+-	r300_suspend(rdev);
+ 	r100_cp_fini(rdev);
+ 	r100_wb_fini(rdev);
+ 	r100_ib_fini(rdev);
+@@ -1263,9 +1338,10 @@ void r300_fini(struct radeon_device *rdev)
+ 		rv370_pcie_gart_fini(rdev);
+ 	if (rdev->flags & RADEON_IS_PCI)
+ 		r100_pci_gart_fini(rdev);
++	radeon_agp_fini(rdev);
+ 	radeon_irq_kms_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+-	radeon_object_fini(rdev);
++	radeon_bo_fini(rdev);
+ 	radeon_atombios_fini(rdev);
+ 	kfree(rdev->bios);
+ 	rdev->bios = NULL;
+@@ -1303,14 +1379,14 @@ int r300_init(struct radeon_device *rdev)
+ 			RREG32(R_0007C0_CP_STAT));
+ 	}
+ 	/* check if cards are posted or not */
+-	if (!radeon_card_posted(rdev) && rdev->bios) {
+-		DRM_INFO("GPU not posted. posting now...\n");
+-		radeon_combios_asic_init(rdev->ddev);
+-	}
++	if (radeon_boot_test_post_card(rdev) == false)
++		return -EINVAL;
+ 	/* Set asic errata */
+ 	r300_errata(rdev);
+ 	/* Initialize clocks */
+ 	radeon_get_clock_info(rdev->ddev);
++	/* Initialize power management */
++	radeon_pm_init(rdev);
+ 	/* Get vram informations */
+ 	r300_vram_info(rdev);
+ 	/* Initialize memory controller (also test AGP) */
+@@ -1325,7 +1401,7 @@ int r300_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	/* Memory manager */
+-	r = radeon_object_init(rdev);
++	r = radeon_bo_init(rdev);
+ 	if (r)
+ 		return r;
+ 	if (rdev->flags & RADEON_IS_PCIE) {
+@@ -1344,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+ 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+-		r300_suspend(rdev);
+ 		r100_cp_fini(rdev);
+ 		r100_wb_fini(rdev);
+ 		r100_ib_fini(rdev);
++		radeon_irq_kms_fini(rdev);
+ 		if (rdev->flags & RADEON_IS_PCIE)
+ 			rv370_pcie_gart_fini(rdev);
+ 		if (rdev->flags & RADEON_IS_PCI)
+ 			r100_pci_gart_fini(rdev);
+-		radeon_irq_kms_fini(rdev);
++		radeon_agp_fini(rdev);
+ 		rdev->accel_working = false;
+ 	}
+ 	return 0;
+diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
+index cb2e470..34bffa0 100644
+--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
++++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
+@@ -990,7 +990,7 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
+ 	int sz;
+ 	int addr;
+ 	int type;
+-	int clamp;
++	int isclamp;
+ 	int stride;
+ 	RING_LOCALS;
+ 
+@@ -999,10 +999,10 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
+ 	addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
+ 
+ 	type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
+-	clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
++	isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
+ 
+ 	addr |= (type << 16);
+-	addr |= (clamp << 17);
++	addr |= (isclamp << 17);
+ 
+ 	stride = type ? 4 : 6;
+ 
+diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
+index 4b7afef..1735a2b 100644
+--- a/drivers/gpu/drm/radeon/r300_reg.h
++++ b/drivers/gpu/drm/radeon/r300_reg.h
+@@ -900,6 +900,7 @@
+ #	define R300_TX_FORMAT_FL_I32		    0x1B
+ #	define R300_TX_FORMAT_FL_I32A32		    0x1C
+ #	define R300_TX_FORMAT_FL_R32G32B32A32	    0x1D
++#	define R300_TX_FORMAT_ATI2N		    0x1F
+ 	/* alpha modes, convenience mostly */
+ 	/* if you have alpha, pick constant appropriate to the
+ 	   number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
+diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
+index 1cefdbc..d937324 100644
+--- a/drivers/gpu/drm/radeon/r420.c
++++ b/drivers/gpu/drm/radeon/r420.c
+@@ -30,7 +30,15 @@
+ #include "radeon_reg.h"
+ #include "radeon.h"
+ #include "atom.h"
++#include "r100d.h"
+ #include "r420d.h"
++#include "r420_reg_safe.h"
++
++static void r420_set_reg_safe(struct radeon_device *rdev)
++{
++	rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
++	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
++}
+ 
+ int r420_mc_init(struct radeon_device *rdev)
+ {
+@@ -42,9 +50,7 @@ int r420_mc_init(struct radeon_device *rdev)
+ 	if (rdev->flags & RADEON_IS_AGP) {
+ 		r = radeon_agp_init(rdev);
+ 		if (r) {
+-			printk(KERN_WARNING "[drm] Disabling AGP\n");
+-			rdev->flags &= ~RADEON_IS_AGP;
+-			rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
++			radeon_agp_disable(rdev);
+ 		} else {
+ 			rdev->mc.gtt_location = rdev->mc.agp_base;
+ 		}
+@@ -165,10 +171,41 @@ static void r420_clock_resume(struct radeon_device *rdev)
+ 	WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
+ }
+ 
++static void r420_cp_errata_init(struct radeon_device *rdev)
++{
++	/* RV410 and R420 can lock up if CP DMA to host memory happens
++	 * while the 2D engine is busy.
++	 *
++	 * The proper workaround is to queue a RESYNC at the beginning
++	 * of the CP init, apparently.
++	 */
++	radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
++	radeon_ring_lock(rdev, 8);
++	radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
++	radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
++	radeon_ring_write(rdev, 0xDEADBEEF);
++	radeon_ring_unlock_commit(rdev);
++}
++
++static void r420_cp_errata_fini(struct radeon_device *rdev)
++{
++	/* Catch the RESYNC we dispatched all the way back,
++	 * at the very beginning of the CP init.
++	 */
++	radeon_ring_lock(rdev, 8);
++	radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
++	radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
++	radeon_ring_unlock_commit(rdev);
++	radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
++}
++
+ static int r420_startup(struct radeon_device *rdev)
+ {
+ 	int r;
+ 
++	/* set common regs */
++	r100_set_common_regs(rdev);
++	/* program mc */
+ 	r300_mc_program(rdev);
+ 	/* Resume clock */
+ 	r420_clock_resume(rdev);
+@@ -186,14 +223,15 @@ static int r420_startup(struct radeon_device *rdev)
+ 	}
+ 	r420_pipes_init(rdev);
+ 	/* Enable IRQ */
+-	rdev->irq.sw_int = true;
+ 	r100_irq_set(rdev);
++	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ 	/* 1M ring buffer */
+ 	r = r100_cp_init(rdev, 1024 * 1024);
+ 	if (r) {
+ 		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
+ 		return r;
+ 	}
++	r420_cp_errata_init(rdev);
+ 	r = r100_wb_init(rdev);
+ 	if (r) {
+ 		dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
+@@ -229,12 +267,14 @@ int r420_resume(struct radeon_device *rdev)
+ 	}
+ 	/* Resume clock after posting */
+ 	r420_clock_resume(rdev);
+-
++	/* Initialize surface registers */
++	radeon_surface_init(rdev);
+ 	return r420_startup(rdev);
+ }
+ 
+ int r420_suspend(struct radeon_device *rdev)
+ {
++	r420_cp_errata_fini(rdev);
+ 	r100_cp_disable(rdev);
+ 	r100_wb_disable(rdev);
+ 	r100_irq_disable(rdev);
+@@ -258,7 +298,7 @@ void r420_fini(struct radeon_device *rdev)
+ 	radeon_agp_fini(rdev);
+ 	radeon_irq_kms_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+-	radeon_object_fini(rdev);
++	radeon_bo_fini(rdev);
+ 	if (rdev->is_atom_bios) {
+ 		radeon_atombios_fini(rdev);
+ 	} else {
+@@ -301,14 +341,9 @@ int r420_init(struct radeon_device *rdev)
+ 			RREG32(R_0007C0_CP_STAT));
+ 	}
+ 	/* check if cards are posted or not */
+-	if (!radeon_card_posted(rdev) && rdev->bios) {
+-		DRM_INFO("GPU not posted. posting now...\n");
+-		if (rdev->is_atom_bios) {
+-			atom_asic_init(rdev->mode_info.atom_context);
+-		} else {
+-			radeon_combios_asic_init(rdev->ddev);
+-		}
+-	}
++	if (radeon_boot_test_post_card(rdev) == false)
++		return -EINVAL;
++
+ 	/* Initialize clocks */
+ 	radeon_get_clock_info(rdev->ddev);
+ 	/* Initialize power management */
+@@ -331,10 +366,13 @@ int r420_init(struct radeon_device *rdev)
+ 		return r;
+ 	}
+ 	/* Memory manager */
+-	r = radeon_object_init(rdev);
++	r = radeon_bo_init(rdev);
+ 	if (r) {
+ 		return r;
+ 	}
++	if (rdev->family == CHIP_R420)
++		r100_enable_bm(rdev);
++
+ 	if (rdev->flags & RADEON_IS_PCIE) {
+ 		r = rv370_pcie_gart_init(rdev);
+ 		if (r)
+@@ -345,22 +383,21 @@ int r420_init(struct radeon_device *rdev)
+ 		if (r)
+ 			return r;
+ 	}
+-	r300_set_reg_safe(rdev);
++	r420_set_reg_safe(rdev);
+ 	rdev->accel_working = true;
+ 	r = r420_startup(rdev);
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+ 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+-		r420_suspend(rdev);
+ 		r100_cp_fini(rdev);
+ 		r100_wb_fini(rdev);
+ 		r100_ib_fini(rdev);
++		radeon_irq_kms_fini(rdev);
+ 		if (rdev->flags & RADEON_IS_PCIE)
+ 			rv370_pcie_gart_fini(rdev);
+ 		if (rdev->flags & RADEON_IS_PCI)
+ 			r100_pci_gart_fini(rdev);
+ 		radeon_agp_fini(rdev);
+-		radeon_irq_kms_fini(rdev);
+ 		rdev->accel_working = false;
+ 	}
+ 	return 0;
+diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
+index 7baa739..74ad89b 100644
+--- a/drivers/gpu/drm/radeon/r500_reg.h
++++ b/drivers/gpu/drm/radeon/r500_reg.h
+@@ -716,6 +716,8 @@
+ 
+ #define AVIVO_DVOA_BIT_DEPTH_CONTROL			0x7988
+ 
++#define AVIVO_DC_GPIO_HPD_A                 0x7e94
++
+ #define AVIVO_GPIO_0                        0x7e30
+ #define AVIVO_GPIO_1                        0x7e40
+ #define AVIVO_GPIO_2                        0x7e50
+diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
+index f743518..ddf5731 100644
+--- a/drivers/gpu/drm/radeon/r520.c
++++ b/drivers/gpu/drm/radeon/r520.c
+@@ -185,8 +185,8 @@ static int r520_startup(struct radeon_device *rdev)
+ 			return r;
+ 	}
+ 	/* Enable IRQ */
+-	rdev->irq.sw_int = true;
+ 	rs600_irq_set(rdev);
++	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ 	/* 1M ring buffer */
+ 	r = r100_cp_init(rdev, 1024 * 1024);
+ 	if (r) {
+@@ -221,6 +221,8 @@ int r520_resume(struct radeon_device *rdev)
+ 	atom_asic_init(rdev->mode_info.atom_context);
+ 	/* Resume clock after posting */
+ 	rv515_clock_startup(rdev);
++	/* Initialize surface registers */
++	radeon_surface_init(rdev);
+ 	return r520_startup(rdev);
+ }
+ 
+@@ -254,6 +256,9 @@ int r520_init(struct radeon_device *rdev)
+ 			RREG32(R_0007C0_CP_STAT));
+ 	}
+ 	/* check if cards are posted or not */
++	if (radeon_boot_test_post_card(rdev) == false)
++		return -EINVAL;
++
+ 	if (!radeon_card_posted(rdev) && rdev->bios) {
+ 		DRM_INFO("GPU not posted. posting now...\n");
+ 		atom_asic_init(rdev->mode_info.atom_context);
+@@ -277,7 +282,7 @@ int r520_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	/* Memory manager */
+-	r = radeon_object_init(rdev);
++	r = radeon_bo_init(rdev);
+ 	if (r)
+ 		return r;
+ 	r = rv370_pcie_gart_init(rdev);
+@@ -289,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+ 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+-		rv515_suspend(rdev);
+ 		r100_cp_fini(rdev);
+ 		r100_wb_fini(rdev);
+ 		r100_ib_fini(rdev);
++		radeon_irq_kms_fini(rdev);
+ 		rv370_pcie_gart_fini(rdev);
+ 		radeon_agp_fini(rdev);
+-		radeon_irq_kms_fini(rdev);
+ 		rdev->accel_working = false;
+ 	}
+ 	return 0;
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 278f646..2ffcf5a 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -38,8 +38,10 @@
+ 
+ #define PFP_UCODE_SIZE 576
+ #define PM4_UCODE_SIZE 1792
++#define RLC_UCODE_SIZE 768
+ #define R700_PFP_UCODE_SIZE 848
+ #define R700_PM4_UCODE_SIZE 1360
++#define R700_RLC_UCODE_SIZE 1024
+ 
+ /* Firmware Names */
+ MODULE_FIRMWARE("radeon/R600_pfp.bin");
+@@ -62,6 +64,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin");
+ MODULE_FIRMWARE("radeon/RV730_me.bin");
+ MODULE_FIRMWARE("radeon/RV710_pfp.bin");
+ MODULE_FIRMWARE("radeon/RV710_me.bin");
++MODULE_FIRMWARE("radeon/R600_rlc.bin");
++MODULE_FIRMWARE("radeon/R700_rlc.bin");
+ 
+ int r600_debugfs_mc_info_init(struct radeon_device *rdev);
+ 
+@@ -70,6 +74,282 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
+ void r600_gpu_init(struct radeon_device *rdev);
+ void r600_fini(struct radeon_device *rdev);
+ 
++/* hpd for digital panel detect/disconnect */
++bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
++{
++	bool connected = false;
++
++	if (ASIC_IS_DCE3(rdev)) {
++		switch (hpd) {
++		case RADEON_HPD_1:
++			if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
++				connected = true;
++			break;
++		case RADEON_HPD_2:
++			if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
++				connected = true;
++			break;
++		case RADEON_HPD_3:
++			if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
++				connected = true;
++			break;
++		case RADEON_HPD_4:
++			if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
++				connected = true;
++			break;
++			/* DCE 3.2 */
++		case RADEON_HPD_5:
++			if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
++				connected = true;
++			break;
++		case RADEON_HPD_6:
++			if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
++				connected = true;
++			break;
++		default:
++			break;
++		}
++	} else {
++		switch (hpd) {
++		case RADEON_HPD_1:
++			if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
++				connected = true;
++			break;
++		case RADEON_HPD_2:
++			if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
++				connected = true;
++			break;
++		case RADEON_HPD_3:
++			if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
++				connected = true;
++			break;
++		default:
++			break;
++		}
++	}
++	return connected;
++}
++
++void r600_hpd_set_polarity(struct radeon_device *rdev,
++			   enum radeon_hpd_id hpd)
++{
++	u32 tmp;
++	bool connected = r600_hpd_sense(rdev, hpd);
++
++	if (ASIC_IS_DCE3(rdev)) {
++		switch (hpd) {
++		case RADEON_HPD_1:
++			tmp = RREG32(DC_HPD1_INT_CONTROL);
++			if (connected)
++				tmp &= ~DC_HPDx_INT_POLARITY;
++			else
++				tmp |= DC_HPDx_INT_POLARITY;
++			WREG32(DC_HPD1_INT_CONTROL, tmp);
++			break;
++		case RADEON_HPD_2:
++			tmp = RREG32(DC_HPD2_INT_CONTROL);
++			if (connected)
++				tmp &= ~DC_HPDx_INT_POLARITY;
++			else
++				tmp |= DC_HPDx_INT_POLARITY;
++			WREG32(DC_HPD2_INT_CONTROL, tmp);
++			break;
++		case RADEON_HPD_3:
++			tmp = RREG32(DC_HPD3_INT_CONTROL);
++			if (connected)
++				tmp &= ~DC_HPDx_INT_POLARITY;
++			else
++				tmp |= DC_HPDx_INT_POLARITY;
++			WREG32(DC_HPD3_INT_CONTROL, tmp);
++			break;
++		case RADEON_HPD_4:
++			tmp = RREG32(DC_HPD4_INT_CONTROL);
++			if (connected)
++				tmp &= ~DC_HPDx_INT_POLARITY;
++			else
++				tmp |= DC_HPDx_INT_POLARITY;
++			WREG32(DC_HPD4_INT_CONTROL, tmp);
++			break;
++		case RADEON_HPD_5:
++			tmp = RREG32(DC_HPD5_INT_CONTROL);
++			if (connected)
++				tmp &= ~DC_HPDx_INT_POLARITY;
++			else
++				tmp |= DC_HPDx_INT_POLARITY;
++			WREG32(DC_HPD5_INT_CONTROL, tmp);
++			break;
++			/* DCE 3.2 */
++		case RADEON_HPD_6:
++			tmp = RREG32(DC_HPD6_INT_CONTROL);
++			if (connected)
++				tmp &= ~DC_HPDx_INT_POLARITY;
++			else
++				tmp |= DC_HPDx_INT_POLARITY;
++			WREG32(DC_HPD6_INT_CONTROL, tmp);
++			break;
++		default:
++			break;
++		}
++	} else {
++		switch (hpd) {
++		case RADEON_HPD_1:
++			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
++			if (connected)
++				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
++			else
++				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
++			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
++			break;
++		case RADEON_HPD_2:
++			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
++			if (connected)
++				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
++			else
++				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
++			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
++			break;
++		case RADEON_HPD_3:
++			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
++			if (connected)
++				tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
++			else
++				tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
++			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
++			break;
++		default:
++			break;
++		}
++	}
++}
++
++void r600_hpd_init(struct radeon_device *rdev)
++{
++	struct drm_device *dev = rdev->ddev;
++	struct drm_connector *connector;
++
++	if (ASIC_IS_DCE3(rdev)) {
++		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
++		if (ASIC_IS_DCE32(rdev))
++			tmp |= DC_HPDx_EN;
++
++		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++			switch (radeon_connector->hpd.hpd) {
++			case RADEON_HPD_1:
++				WREG32(DC_HPD1_CONTROL, tmp);
++				rdev->irq.hpd[0] = true;
++				break;
++			case RADEON_HPD_2:
++				WREG32(DC_HPD2_CONTROL, tmp);
++				rdev->irq.hpd[1] = true;
++				break;
++			case RADEON_HPD_3:
++				WREG32(DC_HPD3_CONTROL, tmp);
++				rdev->irq.hpd[2] = true;
++				break;
++			case RADEON_HPD_4:
++				WREG32(DC_HPD4_CONTROL, tmp);
++				rdev->irq.hpd[3] = true;
++				break;
++				/* DCE 3.2 */
++			case RADEON_HPD_5:
++				WREG32(DC_HPD5_CONTROL, tmp);
++				rdev->irq.hpd[4] = true;
++				break;
++			case RADEON_HPD_6:
++				WREG32(DC_HPD6_CONTROL, tmp);
++				rdev->irq.hpd[5] = true;
++				break;
++			default:
++				break;
++			}
++		}
++	} else {
++		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++			switch (radeon_connector->hpd.hpd) {
++			case RADEON_HPD_1:
++				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
++				rdev->irq.hpd[0] = true;
++				break;
++			case RADEON_HPD_2:
++				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
++				rdev->irq.hpd[1] = true;
++				break;
++			case RADEON_HPD_3:
++				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
++				rdev->irq.hpd[2] = true;
++				break;
++			default:
++				break;
++			}
++		}
++	}
++	if (rdev->irq.installed)
++		r600_irq_set(rdev);
++}
++
++void r600_hpd_fini(struct radeon_device *rdev)
++{
++	struct drm_device *dev = rdev->ddev;
++	struct drm_connector *connector;
++
++	if (ASIC_IS_DCE3(rdev)) {
++		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++			switch (radeon_connector->hpd.hpd) {
++			case RADEON_HPD_1:
++				WREG32(DC_HPD1_CONTROL, 0);
++				rdev->irq.hpd[0] = false;
++				break;
++			case RADEON_HPD_2:
++				WREG32(DC_HPD2_CONTROL, 0);
++				rdev->irq.hpd[1] = false;
++				break;
++			case RADEON_HPD_3:
++				WREG32(DC_HPD3_CONTROL, 0);
++				rdev->irq.hpd[2] = false;
++				break;
++			case RADEON_HPD_4:
++				WREG32(DC_HPD4_CONTROL, 0);
++				rdev->irq.hpd[3] = false;
++				break;
++				/* DCE 3.2 */
++			case RADEON_HPD_5:
++				WREG32(DC_HPD5_CONTROL, 0);
++				rdev->irq.hpd[4] = false;
++				break;
++			case RADEON_HPD_6:
++				WREG32(DC_HPD6_CONTROL, 0);
++				rdev->irq.hpd[5] = false;
++				break;
++			default:
++				break;
++			}
++		}
++	} else {
++		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++			switch (radeon_connector->hpd.hpd) {
++			case RADEON_HPD_1:
++				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
++				rdev->irq.hpd[0] = false;
++				break;
++			case RADEON_HPD_2:
++				WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
++				rdev->irq.hpd[1] = false;
++				break;
++			case RADEON_HPD_3:
++				WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
++				rdev->irq.hpd[2] = false;
++				break;
++			default:
++				break;
++			}
++		}
++	}
++}
++
+ /*
+  * R600 PCIE GART
+  */
+@@ -180,7 +460,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
+ void r600_pcie_gart_disable(struct radeon_device *rdev)
+ {
+ 	u32 tmp;
+-	int i;
++	int i, r;
+ 
+ 	/* Disable all tables */
+ 	for (i = 0; i < 7; i++)
+@@ -208,8 +488,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
+ 	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
+ 	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
+ 	if (rdev->gart.table.vram.robj) {
+-		radeon_object_kunmap(rdev->gart.table.vram.robj);
+-		radeon_object_unpin(rdev->gart.table.vram.robj);
++		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
++		if (likely(r == 0)) {
++			radeon_bo_kunmap(rdev->gart.table.vram.robj);
++			radeon_bo_unpin(rdev->gart.table.vram.robj);
++			radeon_bo_unreserve(rdev->gart.table.vram.robj);
++		}
+ 	}
+ }
+ 
+@@ -340,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev)
+ 	fixed20_12 a;
+ 	u32 tmp;
+ 	int chansize, numchan;
+-	int r;
+ 
+ 	/* Get VRAM informations */
+ 	rdev->mc.vram_is_ddr = true;
+@@ -383,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev)
+ 		rdev->mc.real_vram_size = rdev->mc.aper_size;
+ 
+ 	if (rdev->flags & RADEON_IS_AGP) {
+-		r = radeon_agp_init(rdev);
+-		if (r)
+-			return r;
+ 		/* gtt_size is setup by radeon_agp_init */
+ 		rdev->mc.gtt_location = rdev->mc.agp_base;
+ 		tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
+@@ -394,11 +674,11 @@ int r600_mc_init(struct radeon_device *rdev)
+ 		 * AGP so that GPU can catch out of VRAM/AGP access
+ 		 */
+ 		if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
+-			/* Enought place before */
++			/* Enough place before */
+ 			rdev->mc.vram_location = rdev->mc.gtt_location -
+ 							rdev->mc.mc_vram_size;
+ 		} else if (tmp > rdev->mc.mc_vram_size) {
+-			/* Enought place after */
++			/* Enough place after */
+ 			rdev->mc.vram_location = rdev->mc.gtt_location +
+ 							rdev->mc.gtt_size;
+ 		} else {
+@@ -443,6 +723,10 @@ int r600_mc_init(struct radeon_device *rdev)
+ 	a.full = rfixed_const(100);
+ 	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
+ 	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
++
++	if (rdev->flags & RADEON_IS_IGP)
++		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
++
+ 	return 0;
+ }
+ 
+@@ -1101,7 +1385,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+ 	(void)RREG32(PCIE_PORT_DATA);
+ }
+ 
+-
+ /*
+  * CP & Ring
+  */
+@@ -1110,11 +1393,12 @@ void r600_cp_stop(struct radeon_device *rdev)
+ 	WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+ }
+ 
+-int r600_cp_init_microcode(struct radeon_device *rdev)
++int r600_init_microcode(struct radeon_device *rdev)
+ {
+ 	struct platform_device *pdev;
+ 	const char *chip_name;
+-	size_t pfp_req_size, me_req_size;
++	const char *rlc_chip_name;
++	size_t pfp_req_size, me_req_size, rlc_req_size;
+ 	char fw_name[30];
+ 	int err;
+ 
+@@ -1128,30 +1412,62 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
+ 	}
+ 
+ 	switch (rdev->family) {
+-	case CHIP_R600: chip_name = "R600"; break;
+-	case CHIP_RV610: chip_name = "RV610"; break;
+-	case CHIP_RV630: chip_name = "RV630"; break;
+-	case CHIP_RV620: chip_name = "RV620"; break;
+-	case CHIP_RV635: chip_name = "RV635"; break;
+-	case CHIP_RV670: chip_name = "RV670"; break;
++	case CHIP_R600:
++		chip_name = "R600";
++		rlc_chip_name = "R600";
++		break;
++	case CHIP_RV610:
++		chip_name = "RV610";
++		rlc_chip_name = "R600";
++		break;
++	case CHIP_RV630:
++		chip_name = "RV630";
++		rlc_chip_name = "R600";
++		break;
++	case CHIP_RV620:
++		chip_name = "RV620";
++		rlc_chip_name = "R600";
++		break;
++	case CHIP_RV635:
++		chip_name = "RV635";
++		rlc_chip_name = "R600";
++		break;
++	case CHIP_RV670:
++		chip_name = "RV670";
++		rlc_chip_name = "R600";
++		break;
+ 	case CHIP_RS780:
+-	case CHIP_RS880: chip_name = "RS780"; break;
+-	case CHIP_RV770: chip_name = "RV770"; break;
++	case CHIP_RS880:
++		chip_name = "RS780";
++		rlc_chip_name = "R600";
++		break;
++	case CHIP_RV770:
++		chip_name = "RV770";
++		rlc_chip_name = "R700";
++		break;
+ 	case CHIP_RV730:
+-	case CHIP_RV740: chip_name = "RV730"; break;
+-	case CHIP_RV710: chip_name = "RV710"; break;
++	case CHIP_RV740:
++		chip_name = "RV730";
++		rlc_chip_name = "R700";
++		break;
++	case CHIP_RV710:
++		chip_name = "RV710";
++		rlc_chip_name = "R700";
++		break;
+ 	default: BUG();
+ 	}
+ 
+ 	if (rdev->family >= CHIP_RV770) {
+ 		pfp_req_size = R700_PFP_UCODE_SIZE * 4;
+ 		me_req_size = R700_PM4_UCODE_SIZE * 4;
++		rlc_req_size = R700_RLC_UCODE_SIZE * 4;
+ 	} else {
+ 		pfp_req_size = PFP_UCODE_SIZE * 4;
+ 		me_req_size = PM4_UCODE_SIZE * 12;
++		rlc_req_size = RLC_UCODE_SIZE * 4;
+ 	}
+ 
+-	DRM_INFO("Loading %s CP Microcode\n", chip_name);
++	DRM_INFO("Loading %s Microcode\n", chip_name);
+ 
+ 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ 	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+@@ -1175,6 +1491,18 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
+ 		       rdev->me_fw->size, fw_name);
+ 		err = -EINVAL;
+ 	}
++
++	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
++	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
++	if (err)
++		goto out;
++	if (rdev->rlc_fw->size != rlc_req_size) {
++		printk(KERN_ERR
++		       "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
++		       rdev->rlc_fw->size, fw_name);
++		err = -EINVAL;
++	}
++
+ out:
+ 	platform_device_unregister(pdev);
+ 
+@@ -1187,6 +1515,8 @@ out:
+ 		rdev->pfp_fw = NULL;
+ 		release_firmware(rdev->me_fw);
+ 		rdev->me_fw = NULL;
++		release_firmware(rdev->rlc_fw);
++		rdev->rlc_fw = NULL;
+ 	}
+ 	return err;
+ }
+@@ -1324,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
+ 	rdev->cp.align_mask = 16 - 1;
+ }
+ 
++void r600_cp_fini(struct radeon_device *rdev)
++{
++	r600_cp_stop(rdev);
++	radeon_ring_fini(rdev);
++}
++
+ 
+ /*
+  * GPU scratch registers helpers function.
+@@ -1381,10 +1717,16 @@ int r600_ring_test(struct radeon_device *rdev)
+ 
+ void r600_wb_disable(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	WREG32(SCRATCH_UMSK, 0);
+ 	if (rdev->wb.wb_obj) {
+-		radeon_object_kunmap(rdev->wb.wb_obj);
+-		radeon_object_unpin(rdev->wb.wb_obj);
++		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
++		if (unlikely(r != 0))
++			return;
++		radeon_bo_kunmap(rdev->wb.wb_obj);
++		radeon_bo_unpin(rdev->wb.wb_obj);
++		radeon_bo_unreserve(rdev->wb.wb_obj);
+ 	}
+ }
+ 
+@@ -1392,7 +1734,7 @@ void r600_wb_fini(struct radeon_device *rdev)
+ {
+ 	r600_wb_disable(rdev);
+ 	if (rdev->wb.wb_obj) {
+-		radeon_object_unref(&rdev->wb.wb_obj);
++		radeon_bo_unref(&rdev->wb.wb_obj);
+ 		rdev->wb.wb = NULL;
+ 		rdev->wb.wb_obj = NULL;
+ 	}
+@@ -1403,22 +1745,29 @@ int r600_wb_enable(struct radeon_device *rdev)
+ 	int r;
+ 
+ 	if (rdev->wb.wb_obj == NULL) {
+-		r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+-				RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
++		r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
++				RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
+ 		if (r) {
+-			dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
++			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
++			return r;
++		}
++		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
++		if (unlikely(r != 0)) {
++			r600_wb_fini(rdev);
+ 			return r;
+ 		}
+-		r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
++		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ 				&rdev->wb.gpu_addr);
+ 		if (r) {
+-			dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r);
++			radeon_bo_unreserve(rdev->wb.wb_obj);
++			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+ 			r600_wb_fini(rdev);
+ 			return r;
+ 		}
+-		r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
++		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
++		radeon_bo_unreserve(rdev->wb.wb_obj);
+ 		if (r) {
+-			dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r);
++			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+ 			r600_wb_fini(rdev);
+ 			return r;
+ 		}
+@@ -1433,41 +1782,36 @@ int r600_wb_enable(struct radeon_device *rdev)
+ void r600_fence_ring_emit(struct radeon_device *rdev,
+ 			  struct radeon_fence *fence)
+ {
++	/* Also consider EVENT_WRITE_EOP.  it handles the interrupts + timestamps + events */
+ 	/* Emit fence sequence & fire IRQ */
+ 	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ 	radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ 	radeon_ring_write(rdev, fence->seq);
+-}
+-
+-int r600_copy_dma(struct radeon_device *rdev,
+-		  uint64_t src_offset,
+-		  uint64_t dst_offset,
+-		  unsigned num_pages,
+-		  struct radeon_fence *fence)
+-{
+-	/* FIXME: implement */
+-	return 0;
++	radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
++	radeon_ring_write(rdev, 1);
++	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
++	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
++	radeon_ring_write(rdev, RB_INT_STAT);
+ }
+ 
+ int r600_copy_blit(struct radeon_device *rdev,
+ 		   uint64_t src_offset, uint64_t dst_offset,
+ 		   unsigned num_pages, struct radeon_fence *fence)
+ {
+-	r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
++	int r;
++
++	mutex_lock(&rdev->r600_blit.mutex);
++	rdev->r600_blit.vb_ib = NULL;
++	r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
++	if (r) {
++		if (rdev->r600_blit.vb_ib)
++			radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
++		mutex_unlock(&rdev->r600_blit.mutex);
++		return r;
++	}
+ 	r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
+ 	r600_blit_done_copy(rdev, fence);
+-	return 0;
+-}
+-
+-int r600_irq_process(struct radeon_device *rdev)
+-{
+-	/* FIXME: implement */
+-	return 0;
+-}
+-
+-int r600_irq_set(struct radeon_device *rdev)
+-{
+-	/* FIXME: implement */
++	mutex_unlock(&rdev->r600_blit.mutex);
+ 	return 0;
+ }
+ 
+@@ -1506,6 +1850,14 @@ int r600_startup(struct radeon_device *rdev)
+ {
+ 	int r;
+ 
++	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
++		r = r600_init_microcode(rdev);
++		if (r) {
++			DRM_ERROR("Failed to load firmware!\n");
++			return r;
++		}
++	}
++
+ 	r600_mc_program(rdev);
+ 	if (rdev->flags & RADEON_IS_AGP) {
+ 		r600_agp_enable(rdev);
+@@ -1515,13 +1867,33 @@ int r600_startup(struct radeon_device *rdev)
+ 			return r;
+ 	}
+ 	r600_gpu_init(rdev);
+-
+-	r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+-			      &rdev->r600_blit.shader_gpu_addr);
++	r = r600_blit_init(rdev);
+ 	if (r) {
+-		DRM_ERROR("failed to pin blit object %d\n", r);
++		r600_blit_fini(rdev);
++		rdev->asic->copy = NULL;
++		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
++	}
++	/* pin copy shader into vram */
++	if (rdev->r600_blit.shader_obj) {
++		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
++		if (unlikely(r != 0))
++			return r;
++		r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
++				&rdev->r600_blit.shader_gpu_addr);
++		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
++		if (r) {
++			dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
++			return r;
++		}
++	}
++	/* Enable IRQ */
++	r = r600_irq_init(rdev);
++	if (r) {
++		DRM_ERROR("radeon: IH init failed (%d).\n", r);
++		radeon_irq_kms_fini(rdev);
+ 		return r;
+ 	}
++	r600_irq_set(rdev);
+ 
+ 	r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ 	if (r)
+@@ -1578,18 +1950,35 @@ int r600_resume(struct radeon_device *rdev)
+ 		DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ 		return r;
+ 	}
++
++	r = r600_audio_init(rdev);
++	if (r) {
++		DRM_ERROR("radeon: audio resume failed\n");
++		return r;
++	}
++
+ 	return r;
+ }
+ 
+ int r600_suspend(struct radeon_device *rdev)
+ {
++	int r;
++
++	r600_audio_fini(rdev);
+ 	/* FIXME: we should wait for ring to be empty */
+ 	r600_cp_stop(rdev);
+ 	rdev->cp.ready = false;
++	r600_irq_suspend(rdev);
+ 	r600_wb_disable(rdev);
+ 	r600_pcie_gart_disable(rdev);
+ 	/* unpin shaders bo */
+-	radeon_object_unpin(rdev->r600_blit.shader_obj);
++	if (rdev->r600_blit.shader_obj) {
++		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
++		if (!r) {
++			radeon_bo_unpin(rdev->r600_blit.shader_obj);
++			radeon_bo_unreserve(rdev->r600_blit.shader_obj);
++		}
++	}
+ 	return 0;
+ }
+ 
+@@ -1627,7 +2016,11 @@ int r600_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	/* Post card if necessary */
+-	if (!r600_card_posted(rdev) && rdev->bios) {
++	if (!r600_card_posted(rdev)) {
++		if (!rdev->bios) {
++			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
++			return -EINVAL;
++		}
+ 		DRM_INFO("GPU not posted. posting now...\n");
+ 		atom_asic_init(rdev->mode_info.atom_context);
+ 	}
+@@ -1646,73 +2039,78 @@ int r600_init(struct radeon_device *rdev)
+ 	r = radeon_fence_driver_init(rdev);
+ 	if (r)
+ 		return r;
++	if (rdev->flags & RADEON_IS_AGP) {
++		r = radeon_agp_init(rdev);
++		if (r)
++			radeon_agp_disable(rdev);
++	}
+ 	r = r600_mc_init(rdev);
+ 	if (r)
+ 		return r;
+ 	/* Memory manager */
+-	r = radeon_object_init(rdev);
++	r = radeon_bo_init(rdev);
++	if (r)
++		return r;
++
++	r = radeon_irq_kms_init(rdev);
+ 	if (r)
+ 		return r;
++
+ 	rdev->cp.ring_obj = NULL;
+ 	r600_ring_init(rdev, 1024 * 1024);
+ 
+-	if (!rdev->me_fw || !rdev->pfp_fw) {
+-		r = r600_cp_init_microcode(rdev);
+-		if (r) {
+-			DRM_ERROR("Failed to load firmware!\n");
+-			return r;
+-		}
+-	}
++	rdev->ih.ring_obj = NULL;
++	r600_ih_ring_init(rdev, 64 * 1024);
+ 
+ 	r = r600_pcie_gart_init(rdev);
+ 	if (r)
+ 		return r;
+ 
+ 	rdev->accel_working = true;
+-	r = r600_blit_init(rdev);
+-	if (r) {
+-		DRM_ERROR("radeon: failled blitter (%d).\n", r);
+-		return r;
+-	}
+-
+ 	r = r600_startup(rdev);
+ 	if (r) {
+-		r600_suspend(rdev);
++		dev_err(rdev->dev, "disabling GPU acceleration\n");
++		r600_cp_fini(rdev);
+ 		r600_wb_fini(rdev);
+-		radeon_ring_fini(rdev);
++		r600_irq_fini(rdev);
++		radeon_irq_kms_fini(rdev);
+ 		r600_pcie_gart_fini(rdev);
+ 		rdev->accel_working = false;
+ 	}
+ 	if (rdev->accel_working) {
+ 		r = radeon_ib_pool_init(rdev);
+ 		if (r) {
+-			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+-			rdev->accel_working = false;
+-		}
+-		r = r600_ib_test(rdev);
+-		if (r) {
+-			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
++			dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ 			rdev->accel_working = false;
++		} else {
++			r = r600_ib_test(rdev);
++			if (r) {
++				dev_err(rdev->dev, "IB test failed (%d).\n", r);
++				rdev->accel_working = false;
++			}
+ 		}
+ 	}
++
++	r = r600_audio_init(rdev);
++	if (r)
++		return r; /* TODO error handling */
+ 	return 0;
+ }
+ 
+ void r600_fini(struct radeon_device *rdev)
+ {
+-	/* Suspend operations */
+-	r600_suspend(rdev);
+-
++	r600_audio_fini(rdev);
+ 	r600_blit_fini(rdev);
+-	radeon_ring_fini(rdev);
++	r600_cp_fini(rdev);
+ 	r600_wb_fini(rdev);
++	r600_irq_fini(rdev);
++	radeon_irq_kms_fini(rdev);
+ 	r600_pcie_gart_fini(rdev);
++	radeon_agp_fini(rdev);
+ 	radeon_gem_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+ 	radeon_clocks_fini(rdev);
+-	if (rdev->flags & RADEON_IS_AGP)
+-		radeon_agp_fini(rdev);
+-	radeon_object_fini(rdev);
++	radeon_bo_fini(rdev);
+ 	radeon_atombios_fini(rdev);
+ 	kfree(rdev->bios);
+ 	rdev->bios = NULL;
+@@ -1798,8 +2196,668 @@ int r600_ib_test(struct radeon_device *rdev)
+ 	return r;
+ }
+ 
++/*
++ * Interrupts
++ *
++ * Interrupts use a ring buffer on r6xx/r7xx hardware.  It works pretty
++ * the same as the CP ring buffer, but in reverse.  Rather than the CPU
++ * writing to the ring and the GPU consuming, the GPU writes to the ring
++ * and host consumes.  As the host irq handler processes interrupts, it
++ * increments the rptr.  When the rptr catches up with the wptr, all the
++ * current interrupts have been processed.
++ */
++
++void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
++{
++	u32 rb_bufsz;
++
++	/* Align ring size */
++	rb_bufsz = drm_order(ring_size / 4);
++	ring_size = (1 << rb_bufsz) * 4;
++	rdev->ih.ring_size = ring_size;
++	rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
++	rdev->ih.rptr = 0;
++}
++
++static int r600_ih_ring_alloc(struct radeon_device *rdev)
++{
++	int r;
++
++	/* Allocate ring buffer */
++	if (rdev->ih.ring_obj == NULL) {
++		r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
++				     true,
++				     RADEON_GEM_DOMAIN_GTT,
++				     &rdev->ih.ring_obj);
++		if (r) {
++			DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
++			return r;
++		}
++		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
++		if (unlikely(r != 0))
++			return r;
++		r = radeon_bo_pin(rdev->ih.ring_obj,
++				  RADEON_GEM_DOMAIN_GTT,
++				  &rdev->ih.gpu_addr);
++		if (r) {
++			radeon_bo_unreserve(rdev->ih.ring_obj);
++			DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
++			return r;
++		}
++		r = radeon_bo_kmap(rdev->ih.ring_obj,
++				   (void **)&rdev->ih.ring);
++		radeon_bo_unreserve(rdev->ih.ring_obj);
++		if (r) {
++			DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
++			return r;
++		}
++	}
++	return 0;
++}
++
++static void r600_ih_ring_fini(struct radeon_device *rdev)
++{
++	int r;
++	if (rdev->ih.ring_obj) {
++		r = radeon_bo_reserve(rdev->ih.ring_obj, false);
++		if (likely(r == 0)) {
++			radeon_bo_kunmap(rdev->ih.ring_obj);
++			radeon_bo_unpin(rdev->ih.ring_obj);
++			radeon_bo_unreserve(rdev->ih.ring_obj);
++		}
++		radeon_bo_unref(&rdev->ih.ring_obj);
++		rdev->ih.ring = NULL;
++		rdev->ih.ring_obj = NULL;
++	}
++}
++
++static void r600_rlc_stop(struct radeon_device *rdev)
++{
++
++	if (rdev->family >= CHIP_RV770) {
++		/* r7xx asics need to soft reset RLC before halting */
++		WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
++		RREG32(SRBM_SOFT_RESET);
++		udelay(15000);
++		WREG32(SRBM_SOFT_RESET, 0);
++		RREG32(SRBM_SOFT_RESET);
++	}
++
++	WREG32(RLC_CNTL, 0);
++}
++
++static void r600_rlc_start(struct radeon_device *rdev)
++{
++	WREG32(RLC_CNTL, RLC_ENABLE);
++}
++
++static int r600_rlc_init(struct radeon_device *rdev)
++{
++	u32 i;
++	const __be32 *fw_data;
++
++	if (!rdev->rlc_fw)
++		return -EINVAL;
++
++	r600_rlc_stop(rdev);
++
++	WREG32(RLC_HB_BASE, 0);
++	WREG32(RLC_HB_CNTL, 0);
++	WREG32(RLC_HB_RPTR, 0);
++	WREG32(RLC_HB_WPTR, 0);
++	WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
++	WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
++	WREG32(RLC_MC_CNTL, 0);
++	WREG32(RLC_UCODE_CNTL, 0);
++
++	fw_data = (const __be32 *)rdev->rlc_fw->data;
++	if (rdev->family >= CHIP_RV770) {
++		for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
++			WREG32(RLC_UCODE_ADDR, i);
++			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
++		}
++	} else {
++		for (i = 0; i < RLC_UCODE_SIZE; i++) {
++			WREG32(RLC_UCODE_ADDR, i);
++			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
++		}
++	}
++	WREG32(RLC_UCODE_ADDR, 0);
++
++	r600_rlc_start(rdev);
++
++	return 0;
++}
++
++static void r600_enable_interrupts(struct radeon_device *rdev)
++{
++	u32 ih_cntl = RREG32(IH_CNTL);
++	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
++
++	ih_cntl |= ENABLE_INTR;
++	ih_rb_cntl |= IH_RB_ENABLE;
++	WREG32(IH_CNTL, ih_cntl);
++	WREG32(IH_RB_CNTL, ih_rb_cntl);
++	rdev->ih.enabled = true;
++}
+ 
++static void r600_disable_interrupts(struct radeon_device *rdev)
++{
++	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
++	u32 ih_cntl = RREG32(IH_CNTL);
++
++	ih_rb_cntl &= ~IH_RB_ENABLE;
++	ih_cntl &= ~ENABLE_INTR;
++	WREG32(IH_RB_CNTL, ih_rb_cntl);
++	WREG32(IH_CNTL, ih_cntl);
++	/* set rptr, wptr to 0 */
++	WREG32(IH_RB_RPTR, 0);
++	WREG32(IH_RB_WPTR, 0);
++	rdev->ih.enabled = false;
++	rdev->ih.wptr = 0;
++	rdev->ih.rptr = 0;
++}
+ 
++static void r600_disable_interrupt_state(struct radeon_device *rdev)
++{
++	u32 tmp;
++
++	WREG32(CP_INT_CNTL, 0);
++	WREG32(GRBM_INT_CNTL, 0);
++	WREG32(DxMODE_INT_MASK, 0);
++	if (ASIC_IS_DCE3(rdev)) {
++		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
++		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
++		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
++		WREG32(DC_HPD1_INT_CONTROL, tmp);
++		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
++		WREG32(DC_HPD2_INT_CONTROL, tmp);
++		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
++		WREG32(DC_HPD3_INT_CONTROL, tmp);
++		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
++		WREG32(DC_HPD4_INT_CONTROL, tmp);
++		if (ASIC_IS_DCE32(rdev)) {
++			tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
++			WREG32(DC_HPD5_INT_CONTROL, 0);
++			tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
++			WREG32(DC_HPD6_INT_CONTROL, 0);
++		}
++	} else {
++		WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
++		WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
++		tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
++		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
++		tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
++		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
++		tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
++		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
++	}
++}
++
++int r600_irq_init(struct radeon_device *rdev)
++{
++	int ret = 0;
++	int rb_bufsz;
++	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
++
++	/* allocate ring */
++	ret = r600_ih_ring_alloc(rdev);
++	if (ret)
++		return ret;
++
++	/* disable irqs */
++	r600_disable_interrupts(rdev);
++
++	/* init rlc */
++	ret = r600_rlc_init(rdev);
++	if (ret) {
++		r600_ih_ring_fini(rdev);
++		return ret;
++	}
++
++	/* setup interrupt control */
++	/* set dummy read address to ring address */
++	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
++	interrupt_cntl = RREG32(INTERRUPT_CNTL);
++	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
++	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
++	 */
++	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
++	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
++	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
++	WREG32(INTERRUPT_CNTL, interrupt_cntl);
++
++	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
++	rb_bufsz = drm_order(rdev->ih.ring_size / 4);
++
++	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
++		      IH_WPTR_OVERFLOW_CLEAR |
++		      (rb_bufsz << 1));
++	/* WPTR writeback, not yet */
++	/*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
++	WREG32(IH_RB_WPTR_ADDR_LO, 0);
++	WREG32(IH_RB_WPTR_ADDR_HI, 0);
++
++	WREG32(IH_RB_CNTL, ih_rb_cntl);
++
++	/* set rptr, wptr to 0 */
++	WREG32(IH_RB_RPTR, 0);
++	WREG32(IH_RB_WPTR, 0);
++
++	/* Default settings for IH_CNTL (disabled at first) */
++	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
++	/* RPTR_REARM only works if msi's are enabled */
++	if (rdev->msi_enabled)
++		ih_cntl |= RPTR_REARM;
++
++#ifdef __BIG_ENDIAN
++	ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
++#endif
++	WREG32(IH_CNTL, ih_cntl);
++
++	/* force the active interrupt state to all disabled */
++	r600_disable_interrupt_state(rdev);
++
++	/* enable irqs */
++	r600_enable_interrupts(rdev);
++
++	return ret;
++}
++
++void r600_irq_suspend(struct radeon_device *rdev)
++{
++	r600_disable_interrupts(rdev);
++	r600_rlc_stop(rdev);
++}
++
++void r600_irq_fini(struct radeon_device *rdev)
++{
++	r600_irq_suspend(rdev);
++	r600_ih_ring_fini(rdev);
++}
++
++int r600_irq_set(struct radeon_device *rdev)
++{
++	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
++	u32 mode_int = 0;
++	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
++
++	if (!rdev->irq.installed) {
++		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
++		return -EINVAL;
++	}
++	/* don't enable anything if the ih is disabled */
++	if (!rdev->ih.enabled) {
++		r600_disable_interrupts(rdev);
++		/* force the active interrupt state to all disabled */
++		r600_disable_interrupt_state(rdev);
++		return 0;
++	}
++
++	if (ASIC_IS_DCE3(rdev)) {
++		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
++		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
++		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
++		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
++		if (ASIC_IS_DCE32(rdev)) {
++			hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
++			hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
++		}
++	} else {
++		hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
++		hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
++		hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
++	}
++
++	if (rdev->irq.sw_int) {
++		DRM_DEBUG("r600_irq_set: sw int\n");
++		cp_int_cntl |= RB_INT_ENABLE;
++	}
++	if (rdev->irq.crtc_vblank_int[0]) {
++		DRM_DEBUG("r600_irq_set: vblank 0\n");
++		mode_int |= D1MODE_VBLANK_INT_MASK;
++	}
++	if (rdev->irq.crtc_vblank_int[1]) {
++		DRM_DEBUG("r600_irq_set: vblank 1\n");
++		mode_int |= D2MODE_VBLANK_INT_MASK;
++	}
++	if (rdev->irq.hpd[0]) {
++		DRM_DEBUG("r600_irq_set: hpd 1\n");
++		hpd1 |= DC_HPDx_INT_EN;
++	}
++	if (rdev->irq.hpd[1]) {
++		DRM_DEBUG("r600_irq_set: hpd 2\n");
++		hpd2 |= DC_HPDx_INT_EN;
++	}
++	if (rdev->irq.hpd[2]) {
++		DRM_DEBUG("r600_irq_set: hpd 3\n");
++		hpd3 |= DC_HPDx_INT_EN;
++	}
++	if (rdev->irq.hpd[3]) {
++		DRM_DEBUG("r600_irq_set: hpd 4\n");
++		hpd4 |= DC_HPDx_INT_EN;
++	}
++	if (rdev->irq.hpd[4]) {
++		DRM_DEBUG("r600_irq_set: hpd 5\n");
++		hpd5 |= DC_HPDx_INT_EN;
++	}
++	if (rdev->irq.hpd[5]) {
++		DRM_DEBUG("r600_irq_set: hpd 6\n");
++		hpd6 |= DC_HPDx_INT_EN;
++	}
++
++	WREG32(CP_INT_CNTL, cp_int_cntl);
++	WREG32(DxMODE_INT_MASK, mode_int);
++	if (ASIC_IS_DCE3(rdev)) {
++		WREG32(DC_HPD1_INT_CONTROL, hpd1);
++		WREG32(DC_HPD2_INT_CONTROL, hpd2);
++		WREG32(DC_HPD3_INT_CONTROL, hpd3);
++		WREG32(DC_HPD4_INT_CONTROL, hpd4);
++		if (ASIC_IS_DCE32(rdev)) {
++			WREG32(DC_HPD5_INT_CONTROL, hpd5);
++			WREG32(DC_HPD6_INT_CONTROL, hpd6);
++		}
++	} else {
++		WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
++		WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
++		WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
++	}
++
++	return 0;
++}
++
++static inline void r600_irq_ack(struct radeon_device *rdev,
++				u32 *disp_int,
++				u32 *disp_int_cont,
++				u32 *disp_int_cont2)
++{
++	u32 tmp;
++
++	if (ASIC_IS_DCE3(rdev)) {
++		*disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
++		*disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
++		*disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
++	} else {
++		*disp_int = RREG32(DISP_INTERRUPT_STATUS);
++		*disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
++		*disp_int_cont2 = 0;
++	}
++
++	if (*disp_int & LB_D1_VBLANK_INTERRUPT)
++		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
++	if (*disp_int & LB_D1_VLINE_INTERRUPT)
++		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
++	if (*disp_int & LB_D2_VBLANK_INTERRUPT)
++		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
++	if (*disp_int & LB_D2_VLINE_INTERRUPT)
++		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
++	if (*disp_int & DC_HPD1_INTERRUPT) {
++		if (ASIC_IS_DCE3(rdev)) {
++			tmp = RREG32(DC_HPD1_INT_CONTROL);
++			tmp |= DC_HPDx_INT_ACK;
++			WREG32(DC_HPD1_INT_CONTROL, tmp);
++		} else {
++			tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
++			tmp |= DC_HPDx_INT_ACK;
++			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
++		}
++	}
++	if (*disp_int & DC_HPD2_INTERRUPT) {
++		if (ASIC_IS_DCE3(rdev)) {
++			tmp = RREG32(DC_HPD2_INT_CONTROL);
++			tmp |= DC_HPDx_INT_ACK;
++			WREG32(DC_HPD2_INT_CONTROL, tmp);
++		} else {
++			tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
++			tmp |= DC_HPDx_INT_ACK;
++			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
++		}
++	}
++	if (*disp_int_cont & DC_HPD3_INTERRUPT) {
++		if (ASIC_IS_DCE3(rdev)) {
++			tmp = RREG32(DC_HPD3_INT_CONTROL);
++			tmp |= DC_HPDx_INT_ACK;
++			WREG32(DC_HPD3_INT_CONTROL, tmp);
++		} else {
++			tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
++			tmp |= DC_HPDx_INT_ACK;
++			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
++		}
++	}
++	if (*disp_int_cont & DC_HPD4_INTERRUPT) {
++		tmp = RREG32(DC_HPD4_INT_CONTROL);
++		tmp |= DC_HPDx_INT_ACK;
++		WREG32(DC_HPD4_INT_CONTROL, tmp);
++	}
++	if (ASIC_IS_DCE32(rdev)) {
++		if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
++			tmp = RREG32(DC_HPD5_INT_CONTROL);
++			tmp |= DC_HPDx_INT_ACK;
++			WREG32(DC_HPD5_INT_CONTROL, tmp);
++		}
++		if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
++			tmp = RREG32(DC_HPD5_INT_CONTROL);
++			tmp |= DC_HPDx_INT_ACK;
++			WREG32(DC_HPD6_INT_CONTROL, tmp);
++		}
++	}
++}
++
++void r600_irq_disable(struct radeon_device *rdev)
++{
++	u32 disp_int, disp_int_cont, disp_int_cont2;
++
++	r600_disable_interrupts(rdev);
++	/* Wait and acknowledge irq */
++	mdelay(1);
++	r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
++	r600_disable_interrupt_state(rdev);
++}
++
++static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
++{
++	u32 wptr, tmp;
++
++	/* XXX use writeback */
++	wptr = RREG32(IH_RB_WPTR);
++
++	if (wptr & RB_OVERFLOW) {
++		/* When a ring buffer overflow happen start parsing interrupt
++		 * from the last not overwritten vector (wptr + 16). Hopefully
++		 * this should allow us to catchup.
++		 */
++		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
++			wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
++		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
++		tmp = RREG32(IH_RB_CNTL);
++		tmp |= IH_WPTR_OVERFLOW_CLEAR;
++		WREG32(IH_RB_CNTL, tmp);
++	}
++	return (wptr & rdev->ih.ptr_mask);
++}
++
++/*        r600 IV Ring
++ * Each IV ring entry is 128 bits:
++ * [7:0]    - interrupt source id
++ * [31:8]   - reserved
++ * [59:32]  - interrupt source data
++ * [127:60]  - reserved
++ *
++ * The basic interrupt vector entries
++ * are decoded as follows:
++ * src_id  src_data  description
++ *      1         0  D1 Vblank
++ *      1         1  D1 Vline
++ *      5         0  D2 Vblank
++ *      5         1  D2 Vline
++ *     19         0  FP Hot plug detection A
++ *     19         1  FP Hot plug detection B
++ *     19         2  DAC A auto-detection
++ *     19         3  DAC B auto-detection
++ *    176         -  CP_INT RB
++ *    177         -  CP_INT IB1
++ *    178         -  CP_INT IB2
++ *    181         -  EOP Interrupt
++ *    233         -  GUI Idle
++ *
++ * Note, these are based on r600 and may need to be
++ * adjusted or added to on newer asics
++ */
++
++int r600_irq_process(struct radeon_device *rdev)
++{
++	u32 wptr = r600_get_ih_wptr(rdev);
++	u32 rptr = rdev->ih.rptr;
++	u32 src_id, src_data;
++	u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
++	unsigned long flags;
++	bool queue_hotplug = false;
++
++	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
++	if (!rdev->ih.enabled)
++		return IRQ_NONE;
++
++	spin_lock_irqsave(&rdev->ih.lock, flags);
++
++	if (rptr == wptr) {
++		spin_unlock_irqrestore(&rdev->ih.lock, flags);
++		return IRQ_NONE;
++	}
++	if (rdev->shutdown) {
++		spin_unlock_irqrestore(&rdev->ih.lock, flags);
++		return IRQ_NONE;
++	}
++
++restart_ih:
++	/* display interrupts */
++	r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
++
++	rdev->ih.wptr = wptr;
++	while (rptr != wptr) {
++		/* wptr/rptr are in bytes! */
++		ring_index = rptr / 4;
++		src_id =  rdev->ih.ring[ring_index] & 0xff;
++		src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
++
++		switch (src_id) {
++		case 1: /* D1 vblank/vline */
++			switch (src_data) {
++			case 0: /* D1 vblank */
++				if (disp_int & LB_D1_VBLANK_INTERRUPT) {
++					drm_handle_vblank(rdev->ddev, 0);
++					disp_int &= ~LB_D1_VBLANK_INTERRUPT;
++					DRM_DEBUG("IH: D1 vblank\n");
++				}
++				break;
++			case 1: /* D1 vline */
++				if (disp_int & LB_D1_VLINE_INTERRUPT) {
++					disp_int &= ~LB_D1_VLINE_INTERRUPT;
++					DRM_DEBUG("IH: D1 vline\n");
++				}
++				break;
++			default:
++				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
++				break;
++			}
++			break;
++		case 5: /* D2 vblank/vline */
++			switch (src_data) {
++			case 0: /* D2 vblank */
++				if (disp_int & LB_D2_VBLANK_INTERRUPT) {
++					drm_handle_vblank(rdev->ddev, 1);
++					disp_int &= ~LB_D2_VBLANK_INTERRUPT;
++					DRM_DEBUG("IH: D2 vblank\n");
++				}
++				break;
++			case 1: /* D1 vline */
++				if (disp_int & LB_D2_VLINE_INTERRUPT) {
++					disp_int &= ~LB_D2_VLINE_INTERRUPT;
++					DRM_DEBUG("IH: D2 vline\n");
++				}
++				break;
++			default:
++				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
++				break;
++			}
++			break;
++		case 19: /* HPD/DAC hotplug */
++			switch (src_data) {
++			case 0:
++				if (disp_int & DC_HPD1_INTERRUPT) {
++					disp_int &= ~DC_HPD1_INTERRUPT;
++					queue_hotplug = true;
++					DRM_DEBUG("IH: HPD1\n");
++				}
++				break;
++			case 1:
++				if (disp_int & DC_HPD2_INTERRUPT) {
++					disp_int &= ~DC_HPD2_INTERRUPT;
++					queue_hotplug = true;
++					DRM_DEBUG("IH: HPD2\n");
++				}
++				break;
++			case 4:
++				if (disp_int_cont & DC_HPD3_INTERRUPT) {
++					disp_int_cont &= ~DC_HPD3_INTERRUPT;
++					queue_hotplug = true;
++					DRM_DEBUG("IH: HPD3\n");
++				}
++				break;
++			case 5:
++				if (disp_int_cont & DC_HPD4_INTERRUPT) {
++					disp_int_cont &= ~DC_HPD4_INTERRUPT;
++					queue_hotplug = true;
++					DRM_DEBUG("IH: HPD4\n");
++				}
++				break;
++			case 10:
++				if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
++					disp_int_cont &= ~DC_HPD5_INTERRUPT;
++					queue_hotplug = true;
++					DRM_DEBUG("IH: HPD5\n");
++				}
++				break;
++			case 12:
++				if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
++					disp_int_cont &= ~DC_HPD6_INTERRUPT;
++					queue_hotplug = true;
++					DRM_DEBUG("IH: HPD6\n");
++				}
++				break;
++			default:
++				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
++				break;
++			}
++			break;
++		case 176: /* CP_INT in ring buffer */
++		case 177: /* CP_INT in IB1 */
++		case 178: /* CP_INT in IB2 */
++			DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
++			radeon_fence_process(rdev);
++			break;
++		case 181: /* CP EOP event */
++			DRM_DEBUG("IH: CP EOP\n");
++			break;
++		default:
++			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
++			break;
++		}
++
++		/* wptr/rptr are in bytes! */
++		rptr += 16;
++		rptr &= rdev->ih.ptr_mask;
++	}
++	/* make sure wptr hasn't changed while processing */
++	wptr = r600_get_ih_wptr(rdev);
++	if (wptr != rdev->ih.wptr)
++		goto restart_ih;
++	if (queue_hotplug)
++		queue_work(rdev->wq, &rdev->hotplug_work);
++	rdev->ih.rptr = rptr;
++	WREG32(IH_RB_RPTR, rdev->ih.rptr);
++	spin_unlock_irqrestore(&rdev->ih.lock, flags);
++	return IRQ_HANDLED;
++}
+ 
+ /*
+  * Debugfs info
+@@ -1811,21 +2869,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
+ 	struct drm_info_node *node = (struct drm_info_node *) m->private;
+ 	struct drm_device *dev = node->minor->dev;
+ 	struct radeon_device *rdev = dev->dev_private;
+-	uint32_t rdp, wdp;
+ 	unsigned count, i, j;
+ 
+ 	radeon_ring_free_size(rdev);
+-	rdp = RREG32(CP_RB_RPTR);
+-	wdp = RREG32(CP_RB_WPTR);
+-	count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
++	count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
+ 	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
+-	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
+-	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
++	seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
++	seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
++	seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
++	seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
+ 	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
+ 	seq_printf(m, "%u dwords in ring\n", count);
++	i = rdev->cp.rptr;
+ 	for (j = 0; j <= count; j++) {
+-		i = (rdp + j) & rdev->cp.ptr_mask;
+ 		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
++		i = (i + 1) & rdev->cp.ptr_mask;
+ 	}
+ 	return 0;
+ }
+@@ -1855,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
+ 	return 0;
+ #endif
+ }
++
++/**
++ * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
++ * rdev: radeon device structure
++ * bo: buffer object struct which userspace is waiting for idle
++ *
++ * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
++ * through ring buffer, this leads to corruption in rendering, see
++ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
++ * directly perform HDP flush by writing register through MMIO.
++ */
++void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
++{
++	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
++}
+diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
+new file mode 100644
+index 0000000..0dcb690
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/r600_audio.c
+@@ -0,0 +1,266 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Christian König.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Christian König
++ */
++#include "drmP.h"
++#include "radeon.h"
++#include "radeon_reg.h"
++#include "atom.h"
++
++#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
++
++/*
++ * check if the chipset is supported
++ */
++static int r600_audio_chipset_supported(struct radeon_device *rdev)
++{
++	return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
++		|| rdev->family == CHIP_RS600
++		|| rdev->family == CHIP_RS690
++		|| rdev->family == CHIP_RS740;
++}
++
++/*
++ * current number of channels
++ */
++static int r600_audio_channels(struct radeon_device *rdev)
++{
++	return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
++}
++
++/*
++ * current bits per sample
++ */
++static int r600_audio_bits_per_sample(struct radeon_device *rdev)
++{
++	uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
++	switch (value) {
++	case 0x0: return  8;
++	case 0x1: return 16;
++	case 0x2: return 20;
++	case 0x3: return 24;
++	case 0x4: return 32;
++	}
++
++	DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value);
++
++	return 16;
++}
++
++/*
++ * current sampling rate in HZ
++ */
++static int r600_audio_rate(struct radeon_device *rdev)
++{
++	uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
++	uint32_t result;
++
++	if (value & 0x4000)
++		result = 44100;
++	else
++		result = 48000;
++
++	result *= ((value >> 11) & 0x7) + 1;
++	result /= ((value >> 8) & 0x7) + 1;
++
++	return result;
++}
++
++/*
++ * iec 60958 status bits
++ */
++static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
++{
++	return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
++}
++
++/*
++ * iec 60958 category code
++ */
++static uint8_t r600_audio_category_code(struct radeon_device *rdev)
++{
++	return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
++}
++
++/*
++ * update all hdmi interfaces with current audio parameters
++ */
++static void r600_audio_update_hdmi(unsigned long param)
++{
++	struct radeon_device *rdev = (struct radeon_device *)param;
++	struct drm_device *dev = rdev->ddev;
++
++	int channels = r600_audio_channels(rdev);
++	int rate = r600_audio_rate(rdev);
++	int bps = r600_audio_bits_per_sample(rdev);
++	uint8_t status_bits = r600_audio_status_bits(rdev);
++	uint8_t category_code = r600_audio_category_code(rdev);
++
++	struct drm_encoder *encoder;
++	int changes = 0;
++
++	changes |= channels != rdev->audio_channels;
++	changes |= rate != rdev->audio_rate;
++	changes |= bps != rdev->audio_bits_per_sample;
++	changes |= status_bits != rdev->audio_status_bits;
++	changes |= category_code != rdev->audio_category_code;
++
++	if (changes) {
++		rdev->audio_channels = channels;
++		rdev->audio_rate = rate;
++		rdev->audio_bits_per_sample = bps;
++		rdev->audio_status_bits = status_bits;
++		rdev->audio_category_code = category_code;
++	}
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		if (changes || r600_hdmi_buffer_status_changed(encoder))
++			r600_hdmi_update_audio_settings(
++				encoder, channels,
++				rate, bps, status_bits,
++				category_code);
++	}
++
++	mod_timer(&rdev->audio_timer,
++		jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
++}
++
++/*
++ * initialize the audio vars and register the update timer
++ */
++int r600_audio_init(struct radeon_device *rdev)
++{
++	if (!r600_audio_chipset_supported(rdev))
++		return 0;
++
++	DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
++	WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
++
++	rdev->audio_channels = -1;
++	rdev->audio_rate = -1;
++	rdev->audio_bits_per_sample = -1;
++	rdev->audio_status_bits = 0;
++	rdev->audio_category_code = 0;
++
++	setup_timer(
++		&rdev->audio_timer,
++		r600_audio_update_hdmi,
++		(unsigned long)rdev);
++
++	mod_timer(&rdev->audio_timer, jiffies + 1);
++
++	return 0;
++}
++
++/*
++ * determin how the encoders and audio interface is wired together
++ */
++int r600_audio_tmds_index(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	struct drm_encoder *other;
++
++	switch (radeon_encoder->encoder_id) {
++	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++		return 0;
++
++	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
++		/* special case check if an TMDS1 is present */
++		list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
++			if (to_radeon_encoder(other)->encoder_id ==
++				ENCODER_OBJECT_ID_INTERNAL_TMDS1)
++				return 1;
++		}
++		return 0;
++
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
++		return 1;
++
++	default:
++		DRM_ERROR("Unsupported encoder type 0x%02X\n",
++			  radeon_encoder->encoder_id);
++		return -1;
++	}
++}
++
++/*
++ * atach the audio codec to the clock source of the encoder
++ */
++void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	int base_rate = 48000;
++
++	switch (radeon_encoder->encoder_id) {
++	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
++	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
++		WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
++		break;
++
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
++		WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
++		break;
++
++	default:
++		DRM_ERROR("Unsupported encoder type 0x%02X\n",
++			  radeon_encoder->encoder_id);
++		return;
++	}
++
++	switch (r600_audio_tmds_index(encoder)) {
++	case 0:
++		WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
++		WREG32(R600_AUDIO_PLL1_DIV, clock*100);
++		WREG32(R600_AUDIO_CLK_SRCSEL, 0);
++		break;
++
++	case 1:
++		WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
++		WREG32(R600_AUDIO_PLL2_DIV, clock*100);
++		WREG32(R600_AUDIO_CLK_SRCSEL, 1);
++		break;
++	}
++}
++
++/*
++ * release the audio timer
++ * TODO: How to do this correctly on SMP systems?
++ */
++void r600_audio_fini(struct radeon_device *rdev)
++{
++	if (!r600_audio_chipset_supported(rdev))
++		return;
++
++	del_timer(&rdev->audio_timer);
++	WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
++}
+diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
+index dbf716e..446b765 100644
+--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
++++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
+@@ -449,6 +449,7 @@ int r600_blit_init(struct radeon_device *rdev)
+ 	u32 packet2s[16];
+ 	int num_packet2s = 0;
+ 
++	mutex_init(&rdev->r600_blit.mutex);
+ 	rdev->r600_blit.state_offset = 0;
+ 
+ 	if (rdev->family >= CHIP_RV770)
+@@ -473,9 +474,8 @@ int r600_blit_init(struct radeon_device *rdev)
+ 	obj_size += r6xx_ps_size * 4;
+ 	obj_size = ALIGN(obj_size, 256);
+ 
+-	r = radeon_object_create(rdev, NULL, obj_size,
+-				 true, RADEON_GEM_DOMAIN_VRAM,
+-				 false, &rdev->r600_blit.shader_obj);
++	r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
++				&rdev->r600_blit.shader_obj);
+ 	if (r) {
+ 		DRM_ERROR("r600 failed to allocate shader\n");
+ 		return r;
+@@ -485,12 +485,14 @@ int r600_blit_init(struct radeon_device *rdev)
+ 		  obj_size,
+ 		  rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
+ 
+-	r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr);
++	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
++	if (unlikely(r != 0))
++		return r;
++	r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
+ 	if (r) {
+ 		DRM_ERROR("failed to map blit object %d\n", r);
+ 		return r;
+ 	}
+-
+ 	if (rdev->family >= CHIP_RV770)
+ 		memcpy_toio(ptr + rdev->r600_blit.state_offset,
+ 			    r7xx_default_state, rdev->r600_blit.state_len * 4);
+@@ -500,19 +502,28 @@ int r600_blit_init(struct radeon_device *rdev)
+ 	if (num_packet2s)
+ 		memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+ 			    packet2s, num_packet2s * 4);
+-
+-
+ 	memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
+ 	memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
+-
+-	radeon_object_kunmap(rdev->r600_blit.shader_obj);
++	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
++	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ 	return 0;
+ }
+ 
+ void r600_blit_fini(struct radeon_device *rdev)
+ {
+-	radeon_object_unpin(rdev->r600_blit.shader_obj);
+-	radeon_object_unref(&rdev->r600_blit.shader_obj);
++	int r;
++
++	if (rdev->r600_blit.shader_obj == NULL)
++		return;
++	/* If we can't reserve the bo, unref should be enough to destroy
++	 * it when it becomes idle.
++	 */
++	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
++	if (!r) {
++		radeon_bo_unpin(rdev->r600_blit.shader_obj);
++		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
++	}
++	radeon_bo_unref(&rdev->r600_blit.shader_obj);
+ }
+ 
+ int r600_vb_ib_get(struct radeon_device *rdev)
+@@ -532,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
+ void r600_vb_ib_put(struct radeon_device *rdev)
+ {
+ 	radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
+-	mutex_lock(&rdev->ib_pool.mutex);
+-	list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
+-	mutex_unlock(&rdev->ib_pool.mutex);
+ 	radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
+ }
+ 
+@@ -547,7 +555,8 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
+ 	int dwords_per_loop = 76, num_loops;
+ 
+ 	r = r600_vb_ib_get(rdev);
+-	WARN_ON(r);
++	if (r)
++		return r;
+ 
+ 	/* set_render_target emits 2 extra dwords on rv6xx */
+ 	if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
+@@ -569,11 +578,12 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
+ 	ring_size = num_loops * dwords_per_loop;
+ 	/* set default  + shaders */
+ 	ring_size += 40; /* shaders + def state */
+-	ring_size += 3; /* fence emit for VB IB */
++	ring_size += 7; /* fence emit for VB IB */
+ 	ring_size += 5; /* done copy */
+-	ring_size += 3; /* fence emit for done copy */
++	ring_size += 7; /* fence emit for done copy */
+ 	r = radeon_ring_lock(rdev, ring_size);
+-	WARN_ON(r);
++	if (r)
++		return r;
+ 
+ 	set_default_state(rdev); /* 14 */
+ 	set_shaders(rdev); /* 26 */
+diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
+index 6d5a711..75bcf35 100644
+--- a/drivers/gpu/drm/radeon/r600_cp.c
++++ b/drivers/gpu/drm/radeon/r600_cp.c
+@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
+ 
+ 	gb_tiling_config |= R600_BANK_SWAPS(1);
+ 
+-	backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+-							dev_priv->r600_max_backends,
+-							(0xff << dev_priv->r600_max_backends) & 0xff);
++	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
++		backend_map = 0x28;
++	else
++		backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
++								dev_priv->r600_max_backends,
++								(0xff << dev_priv->r600_max_backends) & 0xff);
+ 	gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+ 
+ 	cc_gc_shader_pipe_config =
+diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
+index 0d82076..e4c45ec 100644
+--- a/drivers/gpu/drm/radeon/r600_cs.c
++++ b/drivers/gpu/drm/radeon/r600_cs.c
+@@ -36,6 +36,10 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
+ typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
+ static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+ 
++struct r600_cs_track {
++	u32	cb_color0_base_last;
++};
++
+ /**
+  * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
+  * @parser:	parser structure holding parsing context.
+@@ -170,13 +174,35 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
+ 			  idx, relocs_chunk->length_dw);
+ 		return -EINVAL;
+ 	}
+-	*cs_reloc = &p->relocs[0];
++	*cs_reloc = p->relocs;
+ 	(*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
+ 	(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
+ 	return 0;
+ }
+ 
+ /**
++ * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
++ * @parser:		parser structure holding parsing context.
++ *
++ * Check next packet is relocation packet3, do bo validation and compute
++ * GPU offset using the provided start.
++ **/
++static inline int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
++{
++	struct radeon_cs_packet p3reloc;
++	int r;
++
++	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
++	if (r) {
++		return 0;
++	}
++	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
++		return 0;
++	}
++	return 1;
++}
++
++/**
+  * r600_cs_packet_next_vline() - parse userspace VLINE packet
+  * @parser:		parser structure holding parsing context.
+  *
+@@ -337,6 +363,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 				struct radeon_cs_packet *pkt)
+ {
+ 	struct radeon_cs_reloc *reloc;
++	struct r600_cs_track *track;
+ 	volatile u32 *ib;
+ 	unsigned idx;
+ 	unsigned i;
+@@ -344,6 +371,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 	int r;
+ 	u32 idx_value;
+ 
++	track = (struct r600_cs_track *)p->track;
+ 	ib = p->ib->ptr;
+ 	idx = pkt->idx + 1;
+ 	idx_value = radeon_get_ib_value(p, idx);
+@@ -503,9 +531,60 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ 		for (i = 0; i < pkt->count; i++) {
+ 			reg = start_reg + (4 * i);
+ 			switch (reg) {
++			/* This register were added late, there is userspace
++			 * which does provide relocation for those but set
++			 * 0 offset. In order to avoid breaking old userspace
++			 * we detect this and set address to point to last
++			 * CB_COLOR0_BASE, note that if userspace doesn't set
++			 * CB_COLOR0_BASE before this register we will report
++			 * error. Old userspace always set CB_COLOR0_BASE
++			 * before any of this.
++			 */
++			case R_0280E0_CB_COLOR0_FRAG:
++			case R_0280E4_CB_COLOR1_FRAG:
++			case R_0280E8_CB_COLOR2_FRAG:
++			case R_0280EC_CB_COLOR3_FRAG:
++			case R_0280F0_CB_COLOR4_FRAG:
++			case R_0280F4_CB_COLOR5_FRAG:
++			case R_0280F8_CB_COLOR6_FRAG:
++			case R_0280FC_CB_COLOR7_FRAG:
++			case R_0280C0_CB_COLOR0_TILE:
++			case R_0280C4_CB_COLOR1_TILE:
++			case R_0280C8_CB_COLOR2_TILE:
++			case R_0280CC_CB_COLOR3_TILE:
++			case R_0280D0_CB_COLOR4_TILE:
++			case R_0280D4_CB_COLOR5_TILE:
++			case R_0280D8_CB_COLOR6_TILE:
++			case R_0280DC_CB_COLOR7_TILE:
++				if (!r600_cs_packet_next_is_pkt3_nop(p)) {
++					if (!track->cb_color0_base_last) {
++						dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
++						return -EINVAL;
++					}
++					ib[idx+1+i] = track->cb_color0_base_last;
++					printk_once(KERN_WARNING "radeon: You have old & broken userspace "
++						"please consider updating mesa & xf86-video-ati\n");
++				} else {
++					r = r600_cs_packet_next_reloc(p, &reloc);
++					if (r) {
++						dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
++						return -EINVAL;
++					}
++					ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
++				}
++				break;
+ 			case DB_DEPTH_BASE:
+ 			case DB_HTILE_DATA_BASE:
+ 			case CB_COLOR0_BASE:
++				r = r600_cs_packet_next_reloc(p, &reloc);
++				if (r) {
++					DRM_ERROR("bad SET_CONTEXT_REG "
++							"0x%04X\n", reg);
++					return -EINVAL;
++				}
++				ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
++				track->cb_color0_base_last = ib[idx+1+i];
++				break;
+ 			case CB_COLOR1_BASE:
+ 			case CB_COLOR2_BASE:
+ 			case CB_COLOR3_BASE:
+@@ -678,8 +757,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
+ int r600_cs_parse(struct radeon_cs_parser *p)
+ {
+ 	struct radeon_cs_packet pkt;
++	struct r600_cs_track *track;
+ 	int r;
+ 
++	track = kzalloc(sizeof(*track), GFP_KERNEL);
++	p->track = track;
+ 	do {
+ 		r = r600_cs_packet_parse(p, &pkt, p->idx);
+ 		if (r) {
+@@ -717,7 +799,7 @@ static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
+ 	if (p->chunk_relocs_idx == -1) {
+ 		return 0;
+ 	}
+-	p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
++	p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ 	if (p->relocs == NULL) {
+ 		return -ENOMEM;
+ 	}
+@@ -757,6 +839,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
+ 	/* initialize parser */
+ 	memset(&parser, 0, sizeof(struct radeon_cs_parser));
+ 	parser.filp = filp;
++	parser.dev = &dev->pdev->dev;
+ 	parser.rdev = NULL;
+ 	parser.family = family;
+ 	parser.ib = &fake_ib;
+diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
+new file mode 100644
+index 0000000..fcc949d
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -0,0 +1,506 @@
++/*
++ * Copyright 2008 Advanced Micro Devices, Inc.
++ * Copyright 2008 Red Hat Inc.
++ * Copyright 2009 Christian König.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors: Christian König
++ */
++#include "drmP.h"
++#include "radeon_drm.h"
++#include "radeon.h"
++#include "atom.h"
++
++/*
++ * HDMI color format
++ */
++enum r600_hdmi_color_format {
++	RGB = 0,
++	YCC_422 = 1,
++	YCC_444 = 2
++};
++
++/*
++ * IEC60958 status bits
++ */
++enum r600_hdmi_iec_status_bits {
++	AUDIO_STATUS_DIG_ENABLE   = 0x01,
++	AUDIO_STATUS_V	    = 0x02,
++	AUDIO_STATUS_VCFG	 = 0x04,
++	AUDIO_STATUS_EMPHASIS     = 0x08,
++	AUDIO_STATUS_COPYRIGHT    = 0x10,
++	AUDIO_STATUS_NONAUDIO     = 0x20,
++	AUDIO_STATUS_PROFESSIONAL = 0x40,
++	AUDIO_STATUS_LEVEL	= 0x80
++};
++
++struct {
++	uint32_t Clock;
++
++	int N_32kHz;
++	int CTS_32kHz;
++
++	int N_44_1kHz;
++	int CTS_44_1kHz;
++
++	int N_48kHz;
++	int CTS_48kHz;
++
++} r600_hdmi_ACR[] = {
++    /*	     32kHz	  44.1kHz	48kHz    */
++    /* Clock      N     CTS      N     CTS      N     CTS */
++    {  25174,  4576,  28125,  7007,  31250,  6864,  28125 }, /*  25,20/1.001 MHz */
++    {  25200,  4096,  25200,  6272,  28000,  6144,  25200 }, /*  25.20       MHz */
++    {  27000,  4096,  27000,  6272,  30000,  6144,  27000 }, /*  27.00       MHz */
++    {  27027,  4096,  27027,  6272,  30030,  6144,  27027 }, /*  27.00*1.001 MHz */
++    {  54000,  4096,  54000,  6272,  60000,  6144,  54000 }, /*  54.00       MHz */
++    {  54054,  4096,  54054,  6272,  60060,  6144,  54054 }, /*  54.00*1.001 MHz */
++    {  74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /*  74.25/1.001 MHz */
++    {  74250,  4096,  74250,  6272,  82500,  6144,  74250 }, /*  74.25       MHz */
++    { 148351, 11648, 421875,  8918, 234375,  5824, 140625 }, /* 148.50/1.001 MHz */
++    { 148500,  4096, 148500,  6272, 165000,  6144, 148500 }, /* 148.50       MHz */
++    {      0,  4096,      0,  6272,      0,  6144,      0 }  /* Other */
++};
++
++/*
++ * calculate CTS value if it's not found in the table
++ */
++static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
++{
++	if (*CTS == 0)
++		*CTS = clock*N/(128*freq)*1000;
++	DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
++		  N, *CTS, freq);
++}
++
++/*
++ * update the N and CTS parameters for a given pixel clock rate
++ */
++static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
++	int CTS;
++	int N;
++	int i;
++
++	for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
++
++	CTS = r600_hdmi_ACR[i].CTS_32kHz;
++	N = r600_hdmi_ACR[i].N_32kHz;
++	r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
++	WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
++	WREG32(offset+R600_HDMI_32kHz_N, N);
++
++	CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
++	N = r600_hdmi_ACR[i].N_44_1kHz;
++	r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
++	WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
++	WREG32(offset+R600_HDMI_44_1kHz_N, N);
++
++	CTS = r600_hdmi_ACR[i].CTS_48kHz;
++	N = r600_hdmi_ACR[i].N_48kHz;
++	r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
++	WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
++	WREG32(offset+R600_HDMI_48kHz_N, N);
++}
++
++/*
++ * calculate the crc for a given info frame
++ */
++static void r600_hdmi_infoframe_checksum(uint8_t packetType,
++					 uint8_t versionNumber,
++					 uint8_t length,
++					 uint8_t *frame)
++{
++    int i;
++    frame[0] = packetType + versionNumber + length;
++    for (i = 1; i <= length; i++)
++	frame[0] += frame[i];
++    frame[0] = 0x100 - frame[0];
++}
++
++/*
++ * build a HDMI Video Info Frame
++ */
++static void r600_hdmi_videoinfoframe(
++	struct drm_encoder *encoder,
++	enum r600_hdmi_color_format color_format,
++	int active_information_present,
++	uint8_t active_format_aspect_ratio,
++	uint8_t scan_information,
++	uint8_t colorimetry,
++	uint8_t ex_colorimetry,
++	uint8_t quantization,
++	int ITC,
++	uint8_t picture_aspect_ratio,
++	uint8_t video_format_identification,
++	uint8_t pixel_repetition,
++	uint8_t non_uniform_picture_scaling,
++	uint8_t bar_info_data_valid,
++	uint16_t top_bar,
++	uint16_t bottom_bar,
++	uint16_t left_bar,
++	uint16_t right_bar
++)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
++
++	uint8_t frame[14];
++
++	frame[0x0] = 0;
++	frame[0x1] =
++		(scan_information & 0x3) |
++		((bar_info_data_valid & 0x3) << 2) |
++		((active_information_present & 0x1) << 4) |
++		((color_format & 0x3) << 5);
++	frame[0x2] =
++		(active_format_aspect_ratio & 0xF) |
++		((picture_aspect_ratio & 0x3) << 4) |
++		((colorimetry & 0x3) << 6);
++	frame[0x3] =
++		(non_uniform_picture_scaling & 0x3) |
++		((quantization & 0x3) << 2) |
++		((ex_colorimetry & 0x7) << 4) |
++		((ITC & 0x1) << 7);
++	frame[0x4] = (video_format_identification & 0x7F);
++	frame[0x5] = (pixel_repetition & 0xF);
++	frame[0x6] = (top_bar & 0xFF);
++	frame[0x7] = (top_bar >> 8);
++	frame[0x8] = (bottom_bar & 0xFF);
++	frame[0x9] = (bottom_bar >> 8);
++	frame[0xA] = (left_bar & 0xFF);
++	frame[0xB] = (left_bar >> 8);
++	frame[0xC] = (right_bar & 0xFF);
++	frame[0xD] = (right_bar >> 8);
++
++	r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
++
++	WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
++		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
++	WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
++		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
++	WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
++		frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
++	WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
++		frame[0xC] | (frame[0xD] << 8));
++}
++
++/*
++ * build a Audio Info Frame
++ */
++static void r600_hdmi_audioinfoframe(
++	struct drm_encoder *encoder,
++	uint8_t channel_count,
++	uint8_t coding_type,
++	uint8_t sample_size,
++	uint8_t sample_frequency,
++	uint8_t format,
++	uint8_t channel_allocation,
++	uint8_t level_shift,
++	int downmix_inhibit
++)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
++
++	uint8_t frame[11];
++
++	frame[0x0] = 0;
++	frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
++	frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
++	frame[0x3] = format;
++	frame[0x4] = channel_allocation;
++	frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
++	frame[0x6] = 0;
++	frame[0x7] = 0;
++	frame[0x8] = 0;
++	frame[0x9] = 0;
++	frame[0xA] = 0;
++
++	r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
++
++	WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
++		frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
++	WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
++		frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
++}
++
++/*
++ * test if audio buffer is filled enough to start playing
++ */
++static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
++
++	return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
++}
++
++/*
++ * have buffer status changed since last call?
++ */
++int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
++{
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	int status, result;
++
++	if (!radeon_encoder->hdmi_offset)
++		return 0;
++
++	status = r600_hdmi_is_audio_buffer_filled(encoder);
++	result = radeon_encoder->hdmi_buffer_status != status;
++	radeon_encoder->hdmi_buffer_status = status;
++
++	return result;
++}
++
++/*
++ * write the audio workaround status to the hardware
++ */
++void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	uint32_t offset = radeon_encoder->hdmi_offset;
++
++	if (!offset)
++		return;
++
++	if (r600_hdmi_is_audio_buffer_filled(encoder)) {
++		/* disable audio workaround and start delivering of audio frames */
++		WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
++
++	} else if (radeon_encoder->hdmi_audio_workaround) {
++		/* enable audio workaround and start delivering of audio frames */
++		WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
++
++	} else {
++		/* disable audio workaround and stop delivering of audio frames */
++		WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
++	}
++}
++
++
++/*
++ * update the info frames with the data from the current display mode
++ */
++void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
++
++	if (!offset)
++		return;
++
++	r600_audio_set_clock(encoder, mode->clock);
++
++	WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
++	WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
++	WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
++
++	r600_hdmi_update_ACR(encoder, mode->clock);
++
++	WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
++
++	WREG32(offset+R600_HDMI_VERSION, 0x202);
++
++	r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
++		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
++
++	/* it's unknown what these bits do excatly, but it's indeed quite usefull for debugging */
++	WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
++	WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
++	WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
++	WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
++
++	r600_hdmi_audio_workaround(encoder);
++
++	/* audio packets per line, does anyone know how to calc this ? */
++	WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
++
++	/* update? reset? don't realy know */
++	WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
++}
++
++/*
++ * update settings with current parameters from audio engine
++ */
++void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
++				     int channels,
++				     int rate,
++				     int bps,
++				     uint8_t status_bits,
++				     uint8_t category_code)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
++
++	uint32_t iec;
++
++	if (!offset)
++		return;
++
++	DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
++		 r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
++		channels, rate, bps);
++	DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
++		  (int)status_bits, (int)category_code);
++
++	iec = 0;
++	if (status_bits & AUDIO_STATUS_PROFESSIONAL)
++		iec |= 1 << 0;
++	if (status_bits & AUDIO_STATUS_NONAUDIO)
++		iec |= 1 << 1;
++	if (status_bits & AUDIO_STATUS_COPYRIGHT)
++		iec |= 1 << 2;
++	if (status_bits & AUDIO_STATUS_EMPHASIS)
++		iec |= 1 << 3;
++
++	iec |= category_code << 8;
++
++	switch (rate) {
++	case  32000: iec |= 0x3 << 24; break;
++	case  44100: iec |= 0x0 << 24; break;
++	case  88200: iec |= 0x8 << 24; break;
++	case 176400: iec |= 0xc << 24; break;
++	case  48000: iec |= 0x2 << 24; break;
++	case  96000: iec |= 0xa << 24; break;
++	case 192000: iec |= 0xe << 24; break;
++	}
++
++	WREG32(offset+R600_HDMI_IEC60958_1, iec);
++
++	iec = 0;
++	switch (bps) {
++	case 16: iec |= 0x2; break;
++	case 20: iec |= 0x3; break;
++	case 24: iec |= 0xb; break;
++	}
++	if (status_bits & AUDIO_STATUS_V)
++		iec |= 0x5 << 16;
++
++	WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
++
++	/* 0x021 or 0x031 sets the audio frame length */
++	WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
++	r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
++
++	r600_hdmi_audio_workaround(encoder);
++
++	/* update? reset? don't realy know */
++	WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
++}
++
++/*
++ * enable/disable the HDMI engine
++ */
++void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
++
++	if (!offset)
++		return;
++
++	DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
++
++	/* some version of atombios ignore the enable HDMI flag
++	 * so enabling/disabling HDMI was moved here for TMDS1+2 */
++	switch (radeon_encoder->encoder_id) {
++	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
++		WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
++		WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
++		break;
++
++	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
++		WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
++		WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
++		break;
++
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
++		/* This part is doubtfull in my opinion */
++		WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
++		break;
++
++	default:
++		DRM_ERROR("unknown HDMI output type\n");
++		break;
++	}
++}
++
++/*
++ * determin at which register offset the HDMI encoder is
++ */
++void r600_hdmi_init(struct drm_encoder *encoder)
++{
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++
++	switch (radeon_encoder->encoder_id) {
++	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++		radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
++		break;
++
++	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
++		switch (r600_audio_tmds_index(encoder)) {
++		case 0:
++			radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
++			break;
++		case 1:
++			radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
++			break;
++		default:
++			radeon_encoder->hdmi_offset = 0;
++			break;
++		}
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++		radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
++		break;
++
++	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
++		radeon_encoder->hdmi_offset = R600_HDMI_DIG;
++		break;
++
++	default:
++		radeon_encoder->hdmi_offset = 0;
++		break;
++	}
++
++	DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
++		  radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
++
++	/* TODO: make this configureable */
++	radeon_encoder->hdmi_audio_workaround = 0;
++}
+diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
+index e2d1f5f..d0e28ff 100644
+--- a/drivers/gpu/drm/radeon/r600_reg.h
++++ b/drivers/gpu/drm/radeon/r600_reg.h
+@@ -110,5 +110,79 @@
+ #define R600_BIOS_6_SCRATCH               0x173c
+ #define R600_BIOS_7_SCRATCH               0x1740
+ 
++/* Audio, these regs were reverse enginered,
++ * so the chance is high that the naming is wrong
++ * R6xx+ ??? */
++
++/* Audio clocks */
++#define R600_AUDIO_PLL1_MUL               0x0514
++#define R600_AUDIO_PLL1_DIV               0x0518
++#define R600_AUDIO_PLL2_MUL               0x0524
++#define R600_AUDIO_PLL2_DIV               0x0528
++#define R600_AUDIO_CLK_SRCSEL             0x0534
++
++/* Audio general */
++#define R600_AUDIO_ENABLE                 0x7300
++#define R600_AUDIO_TIMING                 0x7344
++
++/* Audio params */
++#define R600_AUDIO_VENDOR_ID              0x7380
++#define R600_AUDIO_REVISION_ID            0x7384
++#define R600_AUDIO_ROOT_NODE_COUNT        0x7388
++#define R600_AUDIO_NID1_NODE_COUNT        0x738c
++#define R600_AUDIO_NID1_TYPE              0x7390
++#define R600_AUDIO_SUPPORTED_SIZE_RATE    0x7394
++#define R600_AUDIO_SUPPORTED_CODEC        0x7398
++#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c
++#define R600_AUDIO_NID2_CAPS              0x73a0
++#define R600_AUDIO_NID3_CAPS              0x73a4
++#define R600_AUDIO_NID3_PIN_CAPS          0x73a8
++
++/* Audio conn list */
++#define R600_AUDIO_CONN_LIST_LEN          0x73ac
++#define R600_AUDIO_CONN_LIST              0x73b0
++
++/* Audio verbs */
++#define R600_AUDIO_RATE_BPS_CHANNEL       0x73c0
++#define R600_AUDIO_PLAYING                0x73c4
++#define R600_AUDIO_IMPLEMENTATION_ID      0x73c8
++#define R600_AUDIO_CONFIG_DEFAULT         0x73cc
++#define R600_AUDIO_PIN_SENSE              0x73d0
++#define R600_AUDIO_PIN_WIDGET_CNTL        0x73d4
++#define R600_AUDIO_STATUS_BITS            0x73d8
++
++/* HDMI base register addresses */
++#define R600_HDMI_TMDS1                   0x7400
++#define R600_HDMI_TMDS2                   0x7700
++#define R600_HDMI_DIG                     0x7800
++
++/* HDMI registers */
++#define R600_HDMI_ENABLE           0x00
++#define R600_HDMI_STATUS           0x04
++#define R600_HDMI_CNTL             0x08
++#define R600_HDMI_UNKNOWN_0        0x0C
++#define R600_HDMI_AUDIOCNTL        0x10
++#define R600_HDMI_VIDEOCNTL        0x14
++#define R600_HDMI_VERSION          0x18
++#define R600_HDMI_UNKNOWN_1        0x28
++#define R600_HDMI_VIDEOINFOFRAME_0 0x54
++#define R600_HDMI_VIDEOINFOFRAME_1 0x58
++#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
++#define R600_HDMI_VIDEOINFOFRAME_3 0x60
++#define R600_HDMI_32kHz_CTS        0xac
++#define R600_HDMI_32kHz_N          0xb0
++#define R600_HDMI_44_1kHz_CTS      0xb4
++#define R600_HDMI_44_1kHz_N        0xb8
++#define R600_HDMI_48kHz_CTS        0xbc
++#define R600_HDMI_48kHz_N          0xc0
++#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
++#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
++#define R600_HDMI_IEC60958_1       0xd4
++#define R600_HDMI_IEC60958_2       0xd8
++#define R600_HDMI_UNKNOWN_2        0xdc
++#define R600_HDMI_AUDIO_DEBUG_0    0xe0
++#define R600_HDMI_AUDIO_DEBUG_1    0xe4
++#define R600_HDMI_AUDIO_DEBUG_2    0xe8
++#define R600_HDMI_AUDIO_DEBUG_3    0xec
+ 
+ #endif
+diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
+index 27ab428..3048088 100644
+--- a/drivers/gpu/drm/radeon/r600d.h
++++ b/drivers/gpu/drm/radeon/r600d.h
+@@ -456,7 +456,215 @@
+ #define         WAIT_2D_IDLECLEAN_bit                           (1 << 16)
+ #define         WAIT_3D_IDLECLEAN_bit                           (1 << 17)
+ 
+-
++#define IH_RB_CNTL                                        0x3e00
++#       define IH_RB_ENABLE                               (1 << 0)
++#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
++#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
++#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
++#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
++#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
++#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
++#define IH_RB_BASE                                        0x3e04
++#define IH_RB_RPTR                                        0x3e08
++#define IH_RB_WPTR                                        0x3e0c
++#       define RB_OVERFLOW                                (1 << 0)
++#       define WPTR_OFFSET_MASK                           0x3fffc
++#define IH_RB_WPTR_ADDR_HI                                0x3e10
++#define IH_RB_WPTR_ADDR_LO                                0x3e14
++#define IH_CNTL                                           0x3e18
++#       define ENABLE_INTR                                (1 << 0)
++#       define IH_MC_SWAP(x)                              ((x) << 2)
++#       define IH_MC_SWAP_NONE                            0
++#       define IH_MC_SWAP_16BIT                           1
++#       define IH_MC_SWAP_32BIT                           2
++#       define IH_MC_SWAP_64BIT                           3
++#       define RPTR_REARM                                 (1 << 4)
++#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
++#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
++
++#define RLC_CNTL                                          0x3f00
++#       define RLC_ENABLE                                 (1 << 0)
++#define RLC_HB_BASE                                       0x3f10
++#define RLC_HB_CNTL                                       0x3f0c
++#define RLC_HB_RPTR                                       0x3f20
++#define RLC_HB_WPTR                                       0x3f1c
++#define RLC_HB_WPTR_LSB_ADDR                              0x3f14
++#define RLC_HB_WPTR_MSB_ADDR                              0x3f18
++#define RLC_MC_CNTL                                       0x3f44
++#define RLC_UCODE_CNTL                                    0x3f48
++#define RLC_UCODE_ADDR                                    0x3f2c
++#define RLC_UCODE_DATA                                    0x3f30
++
++#define SRBM_SOFT_RESET                                   0xe60
++#       define SOFT_RESET_RLC                             (1 << 13)
++
++#define CP_INT_CNTL                                       0xc124
++#       define CNTX_BUSY_INT_ENABLE                       (1 << 19)
++#       define CNTX_EMPTY_INT_ENABLE                      (1 << 20)
++#       define SCRATCH_INT_ENABLE                         (1 << 25)
++#       define TIME_STAMP_INT_ENABLE                      (1 << 26)
++#       define IB2_INT_ENABLE                             (1 << 29)
++#       define IB1_INT_ENABLE                             (1 << 30)
++#       define RB_INT_ENABLE                              (1 << 31)
++#define CP_INT_STATUS                                     0xc128
++#       define SCRATCH_INT_STAT                           (1 << 25)
++#       define TIME_STAMP_INT_STAT                        (1 << 26)
++#       define IB2_INT_STAT                               (1 << 29)
++#       define IB1_INT_STAT                               (1 << 30)
++#       define RB_INT_STAT                                (1 << 31)
++
++#define GRBM_INT_CNTL                                     0x8060
++#       define RDERR_INT_ENABLE                           (1 << 0)
++#       define WAIT_COUNT_TIMEOUT_INT_ENABLE              (1 << 1)
++#       define GUI_IDLE_INT_ENABLE                        (1 << 19)
++
++#define INTERRUPT_CNTL                                    0x5468
++#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
++#       define IH_DUMMY_RD_EN                             (1 << 1)
++#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
++#       define GEN_IH_INT_EN                              (1 << 8)
++#define INTERRUPT_CNTL2                                   0x546c
++
++#define D1MODE_VBLANK_STATUS                              0x6534
++#define D2MODE_VBLANK_STATUS                              0x6d34
++#       define DxMODE_VBLANK_OCCURRED                     (1 << 0)
++#       define DxMODE_VBLANK_ACK                          (1 << 4)
++#       define DxMODE_VBLANK_STAT                         (1 << 12)
++#       define DxMODE_VBLANK_INTERRUPT                    (1 << 16)
++#       define DxMODE_VBLANK_INTERRUPT_TYPE               (1 << 17)
++#define D1MODE_VLINE_STATUS                               0x653c
++#define D2MODE_VLINE_STATUS                               0x6d3c
++#       define DxMODE_VLINE_OCCURRED                      (1 << 0)
++#       define DxMODE_VLINE_ACK                           (1 << 4)
++#       define DxMODE_VLINE_STAT                          (1 << 12)
++#       define DxMODE_VLINE_INTERRUPT                     (1 << 16)
++#       define DxMODE_VLINE_INTERRUPT_TYPE                (1 << 17)
++#define DxMODE_INT_MASK                                   0x6540
++#       define D1MODE_VBLANK_INT_MASK                     (1 << 0)
++#       define D1MODE_VLINE_INT_MASK                      (1 << 4)
++#       define D2MODE_VBLANK_INT_MASK                     (1 << 8)
++#       define D2MODE_VLINE_INT_MASK                      (1 << 12)
++#define DCE3_DISP_INTERRUPT_STATUS                        0x7ddc
++#       define DC_HPD1_INTERRUPT                          (1 << 18)
++#       define DC_HPD2_INTERRUPT                          (1 << 19)
++#define DISP_INTERRUPT_STATUS                             0x7edc
++#       define LB_D1_VLINE_INTERRUPT                      (1 << 2)
++#       define LB_D2_VLINE_INTERRUPT                      (1 << 3)
++#       define LB_D1_VBLANK_INTERRUPT                     (1 << 4)
++#       define LB_D2_VBLANK_INTERRUPT                     (1 << 5)
++#       define DACA_AUTODETECT_INTERRUPT                  (1 << 16)
++#       define DACB_AUTODETECT_INTERRUPT                  (1 << 17)
++#       define DC_HOT_PLUG_DETECT1_INTERRUPT              (1 << 18)
++#       define DC_HOT_PLUG_DETECT2_INTERRUPT              (1 << 19)
++#       define DC_I2C_SW_DONE_INTERRUPT                   (1 << 20)
++#       define DC_I2C_HW_DONE_INTERRUPT                   (1 << 21)
++#define DISP_INTERRUPT_STATUS_CONTINUE                    0x7ee8
++#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE               0x7de8
++#       define DC_HPD4_INTERRUPT                          (1 << 14)
++#       define DC_HPD4_RX_INTERRUPT                       (1 << 15)
++#       define DC_HPD3_INTERRUPT                          (1 << 28)
++#       define DC_HPD1_RX_INTERRUPT                       (1 << 29)
++#       define DC_HPD2_RX_INTERRUPT                       (1 << 30)
++#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2              0x7dec
++#       define DC_HPD3_RX_INTERRUPT                       (1 << 0)
++#       define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT       (1 << 1)
++#       define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT      (1 << 2)
++#       define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT       (1 << 3)
++#       define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT      (1 << 4)
++#       define AUX1_SW_DONE_INTERRUPT                     (1 << 5)
++#       define AUX1_LS_DONE_INTERRUPT                     (1 << 6)
++#       define AUX2_SW_DONE_INTERRUPT                     (1 << 7)
++#       define AUX2_LS_DONE_INTERRUPT                     (1 << 8)
++#       define AUX3_SW_DONE_INTERRUPT                     (1 << 9)
++#       define AUX3_LS_DONE_INTERRUPT                     (1 << 10)
++#       define AUX4_SW_DONE_INTERRUPT                     (1 << 11)
++#       define AUX4_LS_DONE_INTERRUPT                     (1 << 12)
++#       define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT   (1 << 13)
++#       define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT   (1 << 14)
++/* DCE 3.2 */
++#       define AUX5_SW_DONE_INTERRUPT                     (1 << 15)
++#       define AUX5_LS_DONE_INTERRUPT                     (1 << 16)
++#       define AUX6_SW_DONE_INTERRUPT                     (1 << 17)
++#       define AUX6_LS_DONE_INTERRUPT                     (1 << 18)
++#       define DC_HPD5_INTERRUPT                          (1 << 19)
++#       define DC_HPD5_RX_INTERRUPT                       (1 << 20)
++#       define DC_HPD6_INTERRUPT                          (1 << 21)
++#       define DC_HPD6_RX_INTERRUPT                       (1 << 22)
++
++#define DACA_AUTO_DETECT_CONTROL                          0x7828
++#define DACB_AUTO_DETECT_CONTROL                          0x7a28
++#define DCE3_DACA_AUTO_DETECT_CONTROL                     0x7028
++#define DCE3_DACB_AUTO_DETECT_CONTROL                     0x7128
++#       define DACx_AUTODETECT_MODE(x)                    ((x) << 0)
++#       define DACx_AUTODETECT_MODE_NONE                  0
++#       define DACx_AUTODETECT_MODE_CONNECT               1
++#       define DACx_AUTODETECT_MODE_DISCONNECT            2
++#       define DACx_AUTODETECT_FRAME_TIME_COUNTER(x)      ((x) << 8)
++/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
++#       define DACx_AUTODETECT_CHECK_MASK(x)              ((x) << 16)
++
++#define DCE3_DACA_AUTODETECT_INT_CONTROL                  0x7038
++#define DCE3_DACB_AUTODETECT_INT_CONTROL                  0x7138
++#define DACA_AUTODETECT_INT_CONTROL                       0x7838
++#define DACB_AUTODETECT_INT_CONTROL                       0x7a38
++#       define DACx_AUTODETECT_ACK                        (1 << 0)
++#       define DACx_AUTODETECT_INT_ENABLE                 (1 << 16)
++
++#define DC_HOT_PLUG_DETECT1_CONTROL                       0x7d00
++#define DC_HOT_PLUG_DETECT2_CONTROL                       0x7d10
++#define DC_HOT_PLUG_DETECT3_CONTROL                       0x7d24
++#       define DC_HOT_PLUG_DETECTx_EN                     (1 << 0)
++
++#define DC_HOT_PLUG_DETECT1_INT_STATUS                    0x7d04
++#define DC_HOT_PLUG_DETECT2_INT_STATUS                    0x7d14
++#define DC_HOT_PLUG_DETECT3_INT_STATUS                    0x7d28
++#       define DC_HOT_PLUG_DETECTx_INT_STATUS             (1 << 0)
++#       define DC_HOT_PLUG_DETECTx_SENSE                  (1 << 1)
++
++/* DCE 3.0 */
++#define DC_HPD1_INT_STATUS                                0x7d00
++#define DC_HPD2_INT_STATUS                                0x7d0c
++#define DC_HPD3_INT_STATUS                                0x7d18
++#define DC_HPD4_INT_STATUS                                0x7d24
++/* DCE 3.2 */
++#define DC_HPD5_INT_STATUS                                0x7dc0
++#define DC_HPD6_INT_STATUS                                0x7df4
++#       define DC_HPDx_INT_STATUS                         (1 << 0)
++#       define DC_HPDx_SENSE                              (1 << 1)
++#       define DC_HPDx_RX_INT_STATUS                      (1 << 8)
++
++#define DC_HOT_PLUG_DETECT1_INT_CONTROL                   0x7d08
++#define DC_HOT_PLUG_DETECT2_INT_CONTROL                   0x7d18
++#define DC_HOT_PLUG_DETECT3_INT_CONTROL                   0x7d2c
++#       define DC_HOT_PLUG_DETECTx_INT_ACK                (1 << 0)
++#       define DC_HOT_PLUG_DETECTx_INT_POLARITY           (1 << 8)
++#       define DC_HOT_PLUG_DETECTx_INT_EN                 (1 << 16)
++/* DCE 3.0 */
++#define DC_HPD1_INT_CONTROL                               0x7d04
++#define DC_HPD2_INT_CONTROL                               0x7d10
++#define DC_HPD3_INT_CONTROL                               0x7d1c
++#define DC_HPD4_INT_CONTROL                               0x7d28
++/* DCE 3.2 */
++#define DC_HPD5_INT_CONTROL                               0x7dc4
++#define DC_HPD6_INT_CONTROL                               0x7df8
++#       define DC_HPDx_INT_ACK                            (1 << 0)
++#       define DC_HPDx_INT_POLARITY                       (1 << 8)
++#       define DC_HPDx_INT_EN                             (1 << 16)
++#       define DC_HPDx_RX_INT_ACK                         (1 << 20)
++#       define DC_HPDx_RX_INT_EN                          (1 << 24)
++
++/* DCE 3.0 */
++#define DC_HPD1_CONTROL                                   0x7d08
++#define DC_HPD2_CONTROL                                   0x7d14
++#define DC_HPD3_CONTROL                                   0x7d20
++#define DC_HPD4_CONTROL                                   0x7d2c
++/* DCE 3.2 */
++#define DC_HPD5_CONTROL                                   0x7dc8
++#define DC_HPD6_CONTROL                                   0x7dfc
++#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
++#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
++/* DCE 3.2 */
++#       define DC_HPDx_EN                                 (1 << 28)
+ 
+ /*
+  * PM4
+@@ -500,7 +708,6 @@
+ #define	PACKET3_WAIT_REG_MEM				0x3C
+ #define	PACKET3_MEM_WRITE				0x3D
+ #define	PACKET3_INDIRECT_BUFFER				0x32
+-#define	PACKET3_CP_INTERRUPT				0x40
+ #define	PACKET3_SURFACE_SYNC				0x43
+ #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
+ #              define PACKET3_TC_ACTION_ENA        (1 << 23)
+@@ -674,4 +881,30 @@
+ #define		S_000E60_SOFT_RESET_TSC(x)		(((x) & 1) << 16)
+ #define		S_000E60_SOFT_RESET_VMC(x)		(((x) & 1) << 17)
+ 
++#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL		0x5480
++
++#define R_0280E0_CB_COLOR0_FRAG                      0x0280E0
++#define   S_0280E0_BASE_256B(x)                        (((x) & 0xFFFFFFFF) << 0)
++#define   G_0280E0_BASE_256B(x)                        (((x) >> 0) & 0xFFFFFFFF)
++#define   C_0280E0_BASE_256B                           0x00000000
++#define R_0280E4_CB_COLOR1_FRAG                      0x0280E4
++#define R_0280E8_CB_COLOR2_FRAG                      0x0280E8
++#define R_0280EC_CB_COLOR3_FRAG                      0x0280EC
++#define R_0280F0_CB_COLOR4_FRAG                      0x0280F0
++#define R_0280F4_CB_COLOR5_FRAG                      0x0280F4
++#define R_0280F8_CB_COLOR6_FRAG                      0x0280F8
++#define R_0280FC_CB_COLOR7_FRAG                      0x0280FC
++#define R_0280C0_CB_COLOR0_TILE                      0x0280C0
++#define   S_0280C0_BASE_256B(x)                        (((x) & 0xFFFFFFFF) << 0)
++#define   G_0280C0_BASE_256B(x)                        (((x) >> 0) & 0xFFFFFFFF)
++#define   C_0280C0_BASE_256B                           0x00000000
++#define R_0280C4_CB_COLOR1_TILE                      0x0280C4
++#define R_0280C8_CB_COLOR2_TILE                      0x0280C8
++#define R_0280CC_CB_COLOR3_TILE                      0x0280CC
++#define R_0280D0_CB_COLOR4_TILE                      0x0280D0
++#define R_0280D4_CB_COLOR5_TILE                      0x0280D4
++#define R_0280D8_CB_COLOR6_TILE                      0x0280D8
++#define R_0280DC_CB_COLOR7_TILE                      0x0280DC
++
++
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 224506a..c0356bb 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -28,8 +28,6 @@
+ #ifndef __RADEON_H__
+ #define __RADEON_H__
+ 
+-#include "radeon_object.h"
+-
+ /* TODO: Here are things that needs to be done :
+  *	- surface allocator & initializer : (bit like scratch reg) should
+  *	  initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
+@@ -67,6 +65,11 @@
+ #include <linux/list.h>
+ #include <linux/kref.h>
+ 
++#include <ttm/ttm_bo_api.h>
++#include <ttm/ttm_bo_driver.h>
++#include <ttm/ttm_placement.h>
++#include <ttm/ttm_module.h>
++
+ #include "radeon_family.h"
+ #include "radeon_mode.h"
+ #include "radeon_reg.h"
+@@ -85,12 +88,15 @@ extern int radeon_benchmarking;
+ extern int radeon_testing;
+ extern int radeon_connector_table;
+ extern int radeon_tv;
++extern int radeon_new_pll;
++extern int radeon_audio;
+ 
+ /*
+  * Copy from radeon_drv.h so we don't have to include both and have conflicting
+  * symbol;
+  */
+ #define RADEON_MAX_USEC_TIMEOUT		100000	/* 100 ms */
++/* RADEON_IB_POOL_SIZE must be a power of 2 */
+ #define RADEON_IB_POOL_SIZE		16
+ #define RADEON_DEBUGFS_MAX_NUM_FILES	32
+ #define RADEONFB_CONN_LIMIT		4
+@@ -157,6 +163,7 @@ struct radeon_fence_driver {
+ 	struct list_head		created;
+ 	struct list_head		emited;
+ 	struct list_head		signaled;
++	bool				initialized;
+ };
+ 
+ struct radeon_fence {
+@@ -186,76 +193,63 @@ void radeon_fence_unref(struct radeon_fence **fence);
+  * Tiling registers
+  */
+ struct radeon_surface_reg {
+-	struct radeon_object *robj;
++	struct radeon_bo *bo;
+ };
+ 
+ #define RADEON_GEM_MAX_SURFACES 8
+ 
+ /*
+- * Radeon buffer.
++ * TTM.
+  */
+-struct radeon_object;
++struct radeon_mman {
++	struct ttm_bo_global_ref        bo_global_ref;
++	struct ttm_global_reference	mem_global_ref;
++	struct ttm_bo_device		bdev;
++	bool				mem_global_referenced;
++	bool				initialized;
++};
+ 
+-struct radeon_object_list {
++struct radeon_bo {
++	/* Protected by gem.mutex */
++	struct list_head		list;
++	/* Protected by tbo.reserved */
++	u32				placements[3];
++	struct ttm_placement		placement;
++	struct ttm_buffer_object	tbo;
++	struct ttm_bo_kmap_obj		kmap;
++	unsigned			pin_count;
++	void				*kptr;
++	u32				tiling_flags;
++	u32				pitch;
++	int				surface_reg;
++	/* Constant after initialization */
++	struct radeon_device		*rdev;
++	struct drm_gem_object		*gobj;
++};
++
++struct radeon_bo_list {
+ 	struct list_head	list;
+-	struct radeon_object	*robj;
++	struct radeon_bo	*bo;
+ 	uint64_t		gpu_offset;
+ 	unsigned		rdomain;
+ 	unsigned		wdomain;
+-	uint32_t                tiling_flags;
++	u32			tiling_flags;
+ };
+ 
+-int radeon_object_init(struct radeon_device *rdev);
+-void radeon_object_fini(struct radeon_device *rdev);
+-int radeon_object_create(struct radeon_device *rdev,
+-			 struct drm_gem_object *gobj,
+-			 unsigned long size,
+-			 bool kernel,
+-			 uint32_t domain,
+-			 bool interruptible,
+-			 struct radeon_object **robj_ptr);
+-int radeon_object_kmap(struct radeon_object *robj, void **ptr);
+-void radeon_object_kunmap(struct radeon_object *robj);
+-void radeon_object_unref(struct radeon_object **robj);
+-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
+-		      uint64_t *gpu_addr);
+-void radeon_object_unpin(struct radeon_object *robj);
+-int radeon_object_wait(struct radeon_object *robj);
+-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement);
+-int radeon_object_evict_vram(struct radeon_device *rdev);
+-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
+-void radeon_object_force_delete(struct radeon_device *rdev);
+-void radeon_object_list_add_object(struct radeon_object_list *lobj,
+-				   struct list_head *head);
+-int radeon_object_list_validate(struct list_head *head, void *fence);
+-void radeon_object_list_unvalidate(struct list_head *head);
+-void radeon_object_list_clean(struct list_head *head);
+-int radeon_object_fbdev_mmap(struct radeon_object *robj,
+-			     struct vm_area_struct *vma);
+-unsigned long radeon_object_size(struct radeon_object *robj);
+-void radeon_object_clear_surface_reg(struct radeon_object *robj);
+-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
+-			       bool force_drop);
+-void radeon_object_set_tiling_flags(struct radeon_object *robj,
+-				    uint32_t tiling_flags, uint32_t pitch);
+-void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
+-void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+-			   struct ttm_mem_reg *mem);
+-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+ /*
+  * GEM objects.
+  */
+ struct radeon_gem {
++	struct mutex		mutex;
+ 	struct list_head	objects;
+ };
+ 
+ int radeon_gem_init(struct radeon_device *rdev);
+ void radeon_gem_fini(struct radeon_device *rdev);
+ int radeon_gem_object_create(struct radeon_device *rdev, int size,
+-			     int alignment, int initial_domain,
+-			     bool discardable, bool kernel,
+-			     bool interruptible,
+-			     struct drm_gem_object **obj);
++				int alignment, int initial_domain,
++				bool discardable, bool kernel,
++				struct drm_gem_object **obj);
+ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+ 			  uint64_t *gpu_addr);
+ void radeon_gem_object_unpin(struct drm_gem_object *obj);
+@@ -271,7 +265,7 @@ struct radeon_gart_table_ram {
+ };
+ 
+ struct radeon_gart_table_vram {
+-	struct radeon_object		*robj;
++	struct radeon_bo		*robj;
+ 	volatile uint32_t		*ptr;
+ };
+ 
+@@ -326,10 +320,12 @@ struct radeon_mc {
+ 	u64			real_vram_size;
+ 	int			vram_mtrr;
+ 	bool			vram_is_ddr;
++	bool                    igp_sideport_enabled;
+ };
+ 
+ int radeon_mc_setup(struct radeon_device *rdev);
+-
++bool radeon_combios_sideport_present(struct radeon_device *rdev);
++bool radeon_atombios_sideport_present(struct radeon_device *rdev);
+ 
+ /*
+  * GPU scratch registers structures, functions & helpers
+@@ -352,22 +348,28 @@ struct radeon_irq {
+ 	bool		sw_int;
+ 	/* FIXME: use a define max crtc rather than hardcode it */
+ 	bool		crtc_vblank_int[2];
++	/* FIXME: use defines for max hpd/dacs */
++	bool            hpd[6];
++	spinlock_t sw_lock;
++	int sw_refcount;
+ };
+ 
+ int radeon_irq_kms_init(struct radeon_device *rdev);
+ void radeon_irq_kms_fini(struct radeon_device *rdev);
+-
++void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
++void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
+ 
+ /*
+  * CP & ring.
+  */
+ struct radeon_ib {
+ 	struct list_head	list;
+-	unsigned long		idx;
++	unsigned		idx;
+ 	uint64_t		gpu_addr;
+ 	struct radeon_fence	*fence;
+-	uint32_t	*ptr;
++	uint32_t		*ptr;
+ 	uint32_t		length_dw;
++	bool			free;
+ };
+ 
+ /*
+@@ -376,15 +378,14 @@ struct radeon_ib {
+  */
+ struct radeon_ib_pool {
+ 	struct mutex		mutex;
+-	struct radeon_object	*robj;
+-	struct list_head	scheduled_ibs;
++	struct radeon_bo	*robj;
+ 	struct radeon_ib	ibs[RADEON_IB_POOL_SIZE];
+ 	bool			ready;
+-	DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
++	unsigned		head_id;
+ };
+ 
+ struct radeon_cp {
+-	struct radeon_object	*ring_obj;
++	struct radeon_bo	*ring_obj;
+ 	volatile uint32_t	*ring;
+ 	unsigned		rptr;
+ 	unsigned		wptr;
+@@ -399,8 +400,25 @@ struct radeon_cp {
+ 	bool			ready;
+ };
+ 
++/*
++ * R6xx+ IH ring
++ */
++struct r600_ih {
++	struct radeon_bo	*ring_obj;
++	volatile uint32_t	*ring;
++	unsigned		rptr;
++	unsigned		wptr;
++	unsigned		wptr_old;
++	unsigned		ring_size;
++	uint64_t		gpu_addr;
++	uint32_t		ptr_mask;
++	spinlock_t              lock;
++	bool                    enabled;
++};
++
+ struct r600_blit {
+-	struct radeon_object	*shader_obj;
++	struct mutex		mutex;
++	struct radeon_bo	*shader_obj;
+ 	u64 shader_gpu_addr;
+ 	u32 vs_offset, ps_offset;
+ 	u32 state_offset;
+@@ -430,8 +448,8 @@ void radeon_ring_fini(struct radeon_device *rdev);
+  */
+ struct radeon_cs_reloc {
+ 	struct drm_gem_object		*gobj;
+-	struct radeon_object		*robj;
+-	struct radeon_object_list	lobj;
++	struct radeon_bo		*robj;
++	struct radeon_bo_list		lobj;
+ 	uint32_t			handle;
+ 	uint32_t			flags;
+ };
+@@ -448,6 +466,7 @@ struct radeon_cs_chunk {
+ };
+ 
+ struct radeon_cs_parser {
++	struct device		*dev;
+ 	struct radeon_device	*rdev;
+ 	struct drm_file		*filp;
+ 	/* chunks */
+@@ -527,7 +546,7 @@ void radeon_agp_fini(struct radeon_device *rdev);
+  * Writeback
+  */
+ struct radeon_wb {
+-	struct radeon_object	*wb_obj;
++	struct radeon_bo	*wb_obj;
+ 	volatile uint32_t	*wb;
+ 	uint64_t		gpu_addr;
+ };
+@@ -639,6 +658,17 @@ struct radeon_asic {
+ 			       uint32_t offset, uint32_t obj_size);
+ 	int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
+ 	void (*bandwidth_update)(struct radeon_device *rdev);
++	void (*hpd_init)(struct radeon_device *rdev);
++	void (*hpd_fini)(struct radeon_device *rdev);
++	bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
++	void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
++	/* ioctl hw specific callback. Some hw might want to perform special
++	 * operation on specific ioctl. For instance on wait idle some hw
++	 * might want to perform and HDP flush through MMIO as it seems that
++	 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
++	 * through ring.
++	 */
++	void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+ };
+ 
+ /*
+@@ -647,11 +677,14 @@ struct radeon_asic {
+ struct r100_asic {
+ 	const unsigned	*reg_safe_bm;
+ 	unsigned	reg_safe_bm_size;
++	u32		hdp_cntl;
+ };
+ 
+ struct r300_asic {
+ 	const unsigned	*reg_safe_bm;
+ 	unsigned	reg_safe_bm_size;
++	u32		resync_scratch;
++	u32		hdp_cntl;
+ };
+ 
+ struct r600_asic {
+@@ -751,9 +784,9 @@ struct radeon_device {
+ 	uint8_t				*bios;
+ 	bool				is_atom_bios;
+ 	uint16_t			bios_header_start;
+-	struct radeon_object		*stollen_vga_memory;
++	struct radeon_bo		*stollen_vga_memory;
+ 	struct fb_info			*fbdev_info;
+-	struct radeon_object		*fbdev_robj;
++	struct radeon_bo		*fbdev_rbo;
+ 	struct radeon_framebuffer	*fbdev_rfb;
+ 	/* Register mmio */
+ 	resource_size_t			rmmio_base;
+@@ -791,8 +824,20 @@ struct radeon_device {
+ 	struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
+ 	const struct firmware *me_fw;	/* all family ME firmware */
+ 	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
++	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
+ 	struct r600_blit r600_blit;
+ 	int msi_enabled; /* msi enabled */
++	struct r600_ih ih; /* r6/700 interrupt ring */
++	struct workqueue_struct *wq;
++	struct work_struct hotplug_work;
++
++	/* audio stuff */
++	struct timer_list	audio_timer;
++	int			audio_channels;
++	int			audio_rate;
++	int			audio_bits_per_sample;
++	uint8_t			audio_status_bits;
++	uint8_t			audio_category_code;
+ };
+ 
+ int radeon_device_init(struct radeon_device *rdev,
+@@ -811,7 +856,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
+ 
+ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+ {
+-	if (reg < 0x10000)
++	if (reg < rdev->rmmio_size)
+ 		return readl(((void __iomem *)rdev->rmmio) + reg);
+ 	else {
+ 		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+@@ -821,7 +866,7 @@ static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
+ 
+ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+ {
+-	if (reg < 0x10000)
++	if (reg < rdev->rmmio_size)
+ 		writel(v, ((void __iomem *)rdev->rmmio) + reg);
+ 	else {
+ 		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+@@ -829,6 +874,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
+ 	}
+ }
+ 
++/*
++ * Cast helper
++ */
++#define to_radeon_fence(p) ((struct radeon_fence *)(p))
+ 
+ /*
+  * Registers read & write functions.
+@@ -965,18 +1014,25 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+ #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
+ #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+ #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
+-#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
++#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
+ #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
+ #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
+ #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
+ #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
+ #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
++#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
++#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
++#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
++#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
+ 
+ /* Common functions */
++/* AGP */
++extern void radeon_agp_disable(struct radeon_device *rdev);
+ extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
+ extern int radeon_modeset_init(struct radeon_device *rdev);
+ extern void radeon_modeset_fini(struct radeon_device *rdev);
+ extern bool radeon_card_posted(struct radeon_device *rdev);
++extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
+ extern int radeon_clocks_init(struct radeon_device *rdev);
+ extern void radeon_clocks_fini(struct radeon_device *rdev);
+ extern void radeon_scratch_init(struct radeon_device *rdev);
+@@ -984,6 +1040,8 @@ extern void radeon_surface_init(struct radeon_device *rdev);
+ extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
+ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+ extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
++extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
++extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
+ 
+ /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
+ struct r100_mc_save {
+@@ -1021,7 +1079,7 @@ extern int r100_cp_reset(struct radeon_device *rdev);
+ extern void r100_vga_render_disable(struct radeon_device *rdev);
+ extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+ 						struct radeon_cs_packet *pkt,
+-						struct radeon_object *robj);
++						struct radeon_bo *robj);
+ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+ 				struct radeon_cs_packet *pkt,
+ 				const unsigned *auth, unsigned n,
+@@ -1029,6 +1087,8 @@ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+ extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
+ 				struct radeon_cs_packet *pkt,
+ 				unsigned idx);
++extern void r100_enable_bm(struct radeon_device *rdev);
++extern void r100_set_common_regs(struct radeon_device *rdev);
+ 
+ /* rv200,rv250,rv280 */
+ extern void r200_set_safe_registers(struct radeon_device *rdev);
+@@ -1091,6 +1151,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
+ extern void r600_cp_stop(struct radeon_device *rdev);
+ extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
+ extern int r600_cp_resume(struct radeon_device *rdev);
++extern void r600_cp_fini(struct radeon_device *rdev);
+ extern int r600_count_pipe_bits(uint32_t val);
+ extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
+ extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
+@@ -1104,7 +1165,30 @@ extern void r600_wb_disable(struct radeon_device *rdev);
+ extern void r600_scratch_init(struct radeon_device *rdev);
+ extern int r600_blit_init(struct radeon_device *rdev);
+ extern void r600_blit_fini(struct radeon_device *rdev);
+-extern int r600_cp_init_microcode(struct radeon_device *rdev);
++extern int r600_init_microcode(struct radeon_device *rdev);
+ extern int r600_gpu_reset(struct radeon_device *rdev);
++/* r600 irq */
++extern int r600_irq_init(struct radeon_device *rdev);
++extern void r600_irq_fini(struct radeon_device *rdev);
++extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
++extern int r600_irq_set(struct radeon_device *rdev);
++extern void r600_irq_suspend(struct radeon_device *rdev);
++/* r600 audio */
++extern int r600_audio_init(struct radeon_device *rdev);
++extern int r600_audio_tmds_index(struct drm_encoder *encoder);
++extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
++extern void r600_audio_fini(struct radeon_device *rdev);
++extern void r600_hdmi_init(struct drm_encoder *encoder);
++extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
++extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
++extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
++extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
++					    int channels,
++					    int rate,
++					    int bps,
++					    uint8_t status_bits,
++					    uint8_t category_code);
++
++#include "radeon_object.h"
+ 
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
+index 54bf49a..c0681a5 100644
+--- a/drivers/gpu/drm/radeon/radeon_agp.c
++++ b/drivers/gpu/drm/radeon/radeon_agp.c
+@@ -144,9 +144,19 @@ int radeon_agp_init(struct radeon_device *rdev)
+ 
+ 	ret = drm_agp_info(rdev->ddev, &info);
+ 	if (ret) {
++		drm_agp_release(rdev->ddev);
+ 		DRM_ERROR("Unable to get AGP info: %d\n", ret);
+ 		return ret;
+ 	}
++
++	if (rdev->ddev->agp->agp_info.aper_size < 32) {
++		drm_agp_release(rdev->ddev);
++		dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
++			"need at least 32M, disabling AGP\n",
++			rdev->ddev->agp->agp_info.aper_size);
++		return -EINVAL;
++	}
++
+ 	mode.mode = info.mode;
+ 	agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode;
+ 	is_v3 = !!(agp_status & RADEON_AGPv3_MODE);
+@@ -221,6 +231,7 @@ int radeon_agp_init(struct radeon_device *rdev)
+ 	ret = drm_agp_enable(rdev->ddev, mode);
+ 	if (ret) {
+ 		DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
++		drm_agp_release(rdev->ddev);
+ 		return ret;
+ 	}
+ 
+@@ -252,10 +263,8 @@ void radeon_agp_resume(struct radeon_device *rdev)
+ void radeon_agp_fini(struct radeon_device *rdev)
+ {
+ #if __OS_HAS_AGP
+-	if (rdev->flags & RADEON_IS_AGP) {
+-		if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
+-			drm_agp_release(rdev->ddev);
+-		}
++	if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
++		drm_agp_release(rdev->ddev);
+ 	}
+ #endif
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
+index c18fbee..05ee1ae 100644
+--- a/drivers/gpu/drm/radeon/radeon_asic.h
++++ b/drivers/gpu/drm/radeon/radeon_asic.h
+@@ -33,6 +33,7 @@
+  */
+ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
+ void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
++uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev);
+ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
+ 
+ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
+@@ -76,6 +77,11 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
+ void r100_bandwidth_update(struct radeon_device *rdev);
+ void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+ int r100_ring_test(struct radeon_device *rdev);
++void r100_hpd_init(struct radeon_device *rdev);
++void r100_hpd_fini(struct radeon_device *rdev);
++bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
++void r100_hpd_set_polarity(struct radeon_device *rdev,
++			   enum radeon_hpd_id hpd);
+ 
+ static struct radeon_asic r100_asic = {
+ 	.init = &r100_init,
+@@ -100,13 +106,18 @@ static struct radeon_asic r100_asic = {
+ 	.copy = &r100_copy_blit,
+ 	.get_engine_clock = &radeon_legacy_get_engine_clock,
+ 	.set_engine_clock = &radeon_legacy_set_engine_clock,
+-	.get_memory_clock = NULL,
++	.get_memory_clock = &radeon_legacy_get_memory_clock,
+ 	.set_memory_clock = NULL,
+ 	.set_pcie_lanes = NULL,
+ 	.set_clock_gating = &radeon_legacy_set_clock_gating,
+ 	.set_surface_reg = r100_set_surface_reg,
+ 	.clear_surface_reg = r100_clear_surface_reg,
+ 	.bandwidth_update = &r100_bandwidth_update,
++	.hpd_init = &r100_hpd_init,
++	.hpd_fini = &r100_hpd_fini,
++	.hpd_sense = &r100_hpd_sense,
++	.hpd_set_polarity = &r100_hpd_set_polarity,
++	.ioctl_wait_idle = NULL,
+ };
+ 
+ 
+@@ -155,13 +166,18 @@ static struct radeon_asic r300_asic = {
+ 	.copy = &r100_copy_blit,
+ 	.get_engine_clock = &radeon_legacy_get_engine_clock,
+ 	.set_engine_clock = &radeon_legacy_set_engine_clock,
+-	.get_memory_clock = NULL,
++	.get_memory_clock = &radeon_legacy_get_memory_clock,
+ 	.set_memory_clock = NULL,
+ 	.set_pcie_lanes = &rv370_set_pcie_lanes,
+ 	.set_clock_gating = &radeon_legacy_set_clock_gating,
+ 	.set_surface_reg = r100_set_surface_reg,
+ 	.clear_surface_reg = r100_clear_surface_reg,
+ 	.bandwidth_update = &r100_bandwidth_update,
++	.hpd_init = &r100_hpd_init,
++	.hpd_fini = &r100_hpd_fini,
++	.hpd_sense = &r100_hpd_sense,
++	.hpd_set_polarity = &r100_hpd_set_polarity,
++	.ioctl_wait_idle = NULL,
+ };
+ 
+ /*
+@@ -201,6 +217,11 @@ static struct radeon_asic r420_asic = {
+ 	.set_surface_reg = r100_set_surface_reg,
+ 	.clear_surface_reg = r100_clear_surface_reg,
+ 	.bandwidth_update = &r100_bandwidth_update,
++	.hpd_init = &r100_hpd_init,
++	.hpd_fini = &r100_hpd_fini,
++	.hpd_sense = &r100_hpd_sense,
++	.hpd_set_polarity = &r100_hpd_set_polarity,
++	.ioctl_wait_idle = NULL,
+ };
+ 
+ 
+@@ -238,13 +259,18 @@ static struct radeon_asic rs400_asic = {
+ 	.copy = &r100_copy_blit,
+ 	.get_engine_clock = &radeon_legacy_get_engine_clock,
+ 	.set_engine_clock = &radeon_legacy_set_engine_clock,
+-	.get_memory_clock = NULL,
++	.get_memory_clock = &radeon_legacy_get_memory_clock,
+ 	.set_memory_clock = NULL,
+ 	.set_pcie_lanes = NULL,
+ 	.set_clock_gating = &radeon_legacy_set_clock_gating,
+ 	.set_surface_reg = r100_set_surface_reg,
+ 	.clear_surface_reg = r100_clear_surface_reg,
+ 	.bandwidth_update = &r100_bandwidth_update,
++	.hpd_init = &r100_hpd_init,
++	.hpd_fini = &r100_hpd_fini,
++	.hpd_sense = &r100_hpd_sense,
++	.hpd_set_polarity = &r100_hpd_set_polarity,
++	.ioctl_wait_idle = NULL,
+ };
+ 
+ 
+@@ -263,6 +289,12 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
+ uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
+ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+ void rs600_bandwidth_update(struct radeon_device *rdev);
++void rs600_hpd_init(struct radeon_device *rdev);
++void rs600_hpd_fini(struct radeon_device *rdev);
++bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
++void rs600_hpd_set_polarity(struct radeon_device *rdev,
++			    enum radeon_hpd_id hpd);
++
+ static struct radeon_asic rs600_asic = {
+ 	.init = &rs600_init,
+ 	.fini = &rs600_fini,
+@@ -291,6 +323,11 @@ static struct radeon_asic rs600_asic = {
+ 	.set_pcie_lanes = NULL,
+ 	.set_clock_gating = &radeon_atom_set_clock_gating,
+ 	.bandwidth_update = &rs600_bandwidth_update,
++	.hpd_init = &rs600_hpd_init,
++	.hpd_fini = &rs600_hpd_fini,
++	.hpd_sense = &rs600_hpd_sense,
++	.hpd_set_polarity = &rs600_hpd_set_polarity,
++	.ioctl_wait_idle = NULL,
+ };
+ 
+ 
+@@ -334,6 +371,11 @@ static struct radeon_asic rs690_asic = {
+ 	.set_surface_reg = r100_set_surface_reg,
+ 	.clear_surface_reg = r100_clear_surface_reg,
+ 	.bandwidth_update = &rs690_bandwidth_update,
++	.hpd_init = &rs600_hpd_init,
++	.hpd_fini = &rs600_hpd_fini,
++	.hpd_sense = &rs600_hpd_sense,
++	.hpd_set_polarity = &rs600_hpd_set_polarity,
++	.ioctl_wait_idle = NULL,
+ };
+ 
+ 
+@@ -381,6 +423,11 @@ static struct radeon_asic rv515_asic = {
+ 	.set_surface_reg = r100_set_surface_reg,
+ 	.clear_surface_reg = r100_clear_surface_reg,
+ 	.bandwidth_update = &rv515_bandwidth_update,
++	.hpd_init = &rs600_hpd_init,
++	.hpd_fini = &rs600_hpd_fini,
++	.hpd_sense = &rs600_hpd_sense,
++	.hpd_set_polarity = &rs600_hpd_set_polarity,
++	.ioctl_wait_idle = NULL,
+ };
+ 
+ 
+@@ -419,6 +466,11 @@ static struct radeon_asic r520_asic = {
+ 	.set_surface_reg = r100_set_surface_reg,
+ 	.clear_surface_reg = r100_clear_surface_reg,
+ 	.bandwidth_update = &rv515_bandwidth_update,
++	.hpd_init = &rs600_hpd_init,
++	.hpd_fini = &rs600_hpd_fini,
++	.hpd_sense = &rs600_hpd_sense,
++	.hpd_set_polarity = &rs600_hpd_set_polarity,
++	.ioctl_wait_idle = NULL,
+ };
+ 
+ /*
+@@ -455,6 +507,12 @@ int r600_ring_test(struct radeon_device *rdev);
+ int r600_copy_blit(struct radeon_device *rdev,
+ 		   uint64_t src_offset, uint64_t dst_offset,
+ 		   unsigned num_pages, struct radeon_fence *fence);
++void r600_hpd_init(struct radeon_device *rdev);
++void r600_hpd_fini(struct radeon_device *rdev);
++bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
++void r600_hpd_set_polarity(struct radeon_device *rdev,
++			   enum radeon_hpd_id hpd);
++extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+ 
+ static struct radeon_asic r600_asic = {
+ 	.init = &r600_init,
+@@ -470,6 +528,7 @@ static struct radeon_asic r600_asic = {
+ 	.ring_ib_execute = &r600_ring_ib_execute,
+ 	.irq_set = &r600_irq_set,
+ 	.irq_process = &r600_irq_process,
++	.get_vblank_counter = &rs600_get_vblank_counter,
+ 	.fence_ring_emit = &r600_fence_ring_emit,
+ 	.cs_parse = &r600_cs_parse,
+ 	.copy_blit = &r600_copy_blit,
+@@ -484,6 +543,11 @@ static struct radeon_asic r600_asic = {
+ 	.set_surface_reg = r600_set_surface_reg,
+ 	.clear_surface_reg = r600_clear_surface_reg,
+ 	.bandwidth_update = &rv515_bandwidth_update,
++	.hpd_init = &r600_hpd_init,
++	.hpd_fini = &r600_hpd_fini,
++	.hpd_sense = &r600_hpd_sense,
++	.hpd_set_polarity = &r600_hpd_set_polarity,
++	.ioctl_wait_idle = r600_ioctl_wait_idle,
+ };
+ 
+ /*
+@@ -509,6 +573,7 @@ static struct radeon_asic rv770_asic = {
+ 	.ring_ib_execute = &r600_ring_ib_execute,
+ 	.irq_set = &r600_irq_set,
+ 	.irq_process = &r600_irq_process,
++	.get_vblank_counter = &rs600_get_vblank_counter,
+ 	.fence_ring_emit = &r600_fence_ring_emit,
+ 	.cs_parse = &r600_cs_parse,
+ 	.copy_blit = &r600_copy_blit,
+@@ -523,6 +588,11 @@ static struct radeon_asic rv770_asic = {
+ 	.set_surface_reg = r600_set_surface_reg,
+ 	.clear_surface_reg = r600_clear_surface_reg,
+ 	.bandwidth_update = &rv515_bandwidth_update,
++	.hpd_init = &r600_hpd_init,
++	.hpd_fini = &r600_hpd_fini,
++	.hpd_sense = &r600_hpd_sense,
++	.hpd_set_polarity = &r600_hpd_set_polarity,
++	.ioctl_wait_idle = r600_ioctl_wait_idle,
+ };
+ 
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 969502a..4d88315 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -47,7 +47,8 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 			  int connector_type,
+ 			  struct radeon_i2c_bus_rec *i2c_bus,
+ 			  bool linkb, uint32_t igp_lane_info,
+-			  uint16_t connector_object_id);
++			  uint16_t connector_object_id,
++			  struct radeon_hpd *hpd);
+ 
+ /* from radeon_legacy_encoder.c */
+ extern void
+@@ -60,16 +61,16 @@ union atom_supported_devices {
+ 	struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
+ };
+ 
+-static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
+-							   *dev, uint8_t id)
++static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
++							       uint8_t id)
+ {
+-	struct radeon_device *rdev = dev->dev_private;
+ 	struct atom_context *ctx = rdev->mode_info.atom_context;
+-	ATOM_GPIO_I2C_ASSIGMENT gpio;
++	ATOM_GPIO_I2C_ASSIGMENT *gpio;
+ 	struct radeon_i2c_bus_rec i2c;
+ 	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+ 	struct _ATOM_GPIO_I2C_INFO *i2c_info;
+ 	uint16_t data_offset;
++	int i;
+ 
+ 	memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+ 	i2c.valid = false;
+@@ -78,34 +79,122 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
+ 
+ 	i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+ 
+-	gpio = i2c_info->asGPIO_Info[id];
+-
+-	i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4;
+-	i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4;
+-	i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
+-	i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
+-	i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
+-	i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
+-	i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4;
+-	i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4;
+-	i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
+-	i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
+-	i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
+-	i2c.put_data_mask = (1 << gpio.ucDataEnShift);
+-	i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
+-	i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
+-	i2c.a_clk_mask = (1 << gpio.ucClkA_Shift);
+-	i2c.a_data_mask = (1 << gpio.ucDataA_Shift);
+-	i2c.valid = true;
++
++	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
++		gpio = &i2c_info->asGPIO_Info[i];
++
++		if (gpio->sucI2cId.ucAccess == id) {
++			i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
++			i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
++			i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
++			i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
++			i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
++			i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
++			i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
++			i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
++			i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
++			i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
++			i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
++			i2c.en_data_mask = (1 << gpio->ucDataEnShift);
++			i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
++			i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
++			i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
++			i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
++
++			if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
++				i2c.hw_capable = true;
++			else
++				i2c.hw_capable = false;
++
++			if (gpio->sucI2cId.ucAccess == 0xa0)
++				i2c.mm_i2c = true;
++			else
++				i2c.mm_i2c = false;
++
++			i2c.i2c_id = gpio->sucI2cId.ucAccess;
++
++			i2c.valid = true;
++			break;
++		}
++	}
+ 
+ 	return i2c;
+ }
+ 
++static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
++							u8 id)
++{
++	struct atom_context *ctx = rdev->mode_info.atom_context;
++	struct radeon_gpio_rec gpio;
++	int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
++	struct _ATOM_GPIO_PIN_LUT *gpio_info;
++	ATOM_GPIO_PIN_ASSIGNMENT *pin;
++	u16 data_offset, size;
++	int i, num_indices;
++
++	memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
++	gpio.valid = false;
++
++	atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
++
++	gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
++
++	num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
++
++	for (i = 0; i < num_indices; i++) {
++		pin = &gpio_info->asGPIO_Pin[i];
++		if (id == pin->ucGPIO_ID) {
++			gpio.id = pin->ucGPIO_ID;
++			gpio.reg = pin->usGpioPin_AIndex * 4;
++			gpio.mask = (1 << pin->ucGpioPinBitShift);
++			gpio.valid = true;
++			break;
++		}
++	}
++
++	return gpio;
++}
++
++static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
++							    struct radeon_gpio_rec *gpio)
++{
++	struct radeon_hpd hpd;
++	hpd.gpio = *gpio;
++	if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
++		switch(gpio->mask) {
++		case (1 << 0):
++			hpd.hpd = RADEON_HPD_1;
++			break;
++		case (1 << 8):
++			hpd.hpd = RADEON_HPD_2;
++			break;
++		case (1 << 16):
++			hpd.hpd = RADEON_HPD_3;
++			break;
++		case (1 << 24):
++			hpd.hpd = RADEON_HPD_4;
++			break;
++		case (1 << 26):
++			hpd.hpd = RADEON_HPD_5;
++			break;
++		case (1 << 28):
++			hpd.hpd = RADEON_HPD_6;
++			break;
++		default:
++			hpd.hpd = RADEON_HPD_NONE;
++			break;
++		}
++	} else
++		hpd.hpd = RADEON_HPD_NONE;
++	return hpd;
++}
++
+ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 				     uint32_t supported_device,
+ 				     int *connector_type,
+ 				     struct radeon_i2c_bus_rec *i2c_bus,
+-				     uint16_t *line_mux)
++				     uint16_t *line_mux,
++				     struct radeon_hpd *hpd)
+ {
+ 
+ 	/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
+@@ -117,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 			*connector_type = DRM_MODE_CONNECTOR_DVID;
+ 	}
+ 
++	/* Asrock RS600 board lists the DVI port as HDMI */
++	if ((dev->pdev->device == 0x7941) &&
++	    (dev->pdev->subsystem_vendor == 0x1849) &&
++	    (dev->pdev->subsystem_device == 0x7941)) {
++		if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
++		    (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
++			*connector_type = DRM_MODE_CONNECTOR_DVID;
++	}
++
+ 	/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
+ 	if ((dev->pdev->device == 0x7941) &&
+ 	    (dev->pdev->subsystem_vendor == 0x147b) &&
+@@ -143,6 +241,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 			return false;
+ 	}
+ 
++	/* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
++	if ((dev->pdev->device == 0x7142) &&
++	    (dev->pdev->subsystem_vendor == 0x1458) &&
++	    (dev->pdev->subsystem_device == 0x2134)) {
++		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
++			return false;
++	}
++
++
+ 	/* Funky macbooks */
+ 	if ((dev->pdev->device == 0x71C5) &&
+ 	    (dev->pdev->subsystem_vendor == 0x106b) &&
+@@ -180,6 +287,24 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 		}
+ 	}
+ 
++	/* Acer laptop reports DVI-D as DVI-I */
++	if ((dev->pdev->device == 0x95c4) &&
++	    (dev->pdev->subsystem_vendor == 0x1025) &&
++	    (dev->pdev->subsystem_device == 0x013c)) {
++		if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
++		    (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
++			*connector_type = DRM_MODE_CONNECTOR_DVID;
++	}
++
++	/* XFX Pine Group device rv730 reports no VGA DDC lines
++	 * even though they are wired up to record 0x93
++	 */
++	if ((dev->pdev->device == 0x9498) &&
++	    (dev->pdev->subsystem_vendor == 0x1682) &&
++	    (dev->pdev->subsystem_device == 0x2452)) {
++		struct radeon_device *rdev = dev->dev_private;
++		*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
++	}
+ 	return true;
+ }
+ 
+@@ -239,7 +364,9 @@ const int object_connector_convert[] = {
+ 	DRM_MODE_CONNECTOR_Unknown,
+ 	DRM_MODE_CONNECTOR_Unknown,
+ 	DRM_MODE_CONNECTOR_Unknown,
+-	DRM_MODE_CONNECTOR_DisplayPort
++	DRM_MODE_CONNECTOR_DisplayPort,
++	DRM_MODE_CONNECTOR_eDP,
++	DRM_MODE_CONNECTOR_Unknown
+ };
+ 
+ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+@@ -248,16 +375,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+ 	struct radeon_mode_info *mode_info = &rdev->mode_info;
+ 	struct atom_context *ctx = mode_info->atom_context;
+ 	int index = GetIndexIntoMasterTable(DATA, Object_Header);
+-	uint16_t size, data_offset;
+-	uint8_t frev, crev, line_mux = 0;
++	u16 size, data_offset;
++	u8 frev, crev;
+ 	ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+ 	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
+ 	ATOM_OBJECT_HEADER *obj_header;
+ 	int i, j, path_size, device_support;
+ 	int connector_type;
+-	uint16_t igp_lane_info, conn_id, connector_object_id;
++	u16 igp_lane_info, conn_id, connector_object_id;
+ 	bool linkb;
+ 	struct radeon_i2c_bus_rec ddc_bus;
++	struct radeon_gpio_rec gpio;
++	struct radeon_hpd hpd;
+ 
+ 	atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
+ 
+@@ -284,7 +413,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+ 		path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
+ 		path_size += le16_to_cpu(path->usSize);
+ 		linkb = false;
+-
+ 		if (device_support & le16_to_cpu(path->usDeviceTag)) {
+ 			uint8_t con_obj_id, con_obj_num, con_obj_type;
+ 
+@@ -385,10 +513,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+ 				}
+ 			}
+ 
+-			/* look up gpio for ddc */
++			/* look up gpio for ddc, hpd */
+ 			if ((le16_to_cpu(path->usDeviceTag) &
+-			     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+-			    == 0) {
++			     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
+ 				for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
+ 					if (le16_to_cpu(path->usConnObjectId) ==
+ 					    le16_to_cpu(con_obj->asObjects[j].
+@@ -402,21 +529,34 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+ 								 asObjects[j].
+ 								 usRecordOffset));
+ 						ATOM_I2C_RECORD *i2c_record;
++						ATOM_HPD_INT_RECORD *hpd_record;
++						ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
++						hpd.hpd = RADEON_HPD_NONE;
+ 
+ 						while (record->ucRecordType > 0
+ 						       && record->
+ 						       ucRecordType <=
+ 						       ATOM_MAX_OBJECT_RECORD_NUMBER) {
+-							switch (record->
+-								ucRecordType) {
++							switch (record->ucRecordType) {
+ 							case ATOM_I2C_RECORD_TYPE:
+ 								i2c_record =
+-								    (ATOM_I2C_RECORD
+-								     *) record;
+-								line_mux =
+-								    i2c_record->
+-								    sucI2cId.
+-								    bfI2C_LineMux;
++								    (ATOM_I2C_RECORD *)
++									record;
++								i2c_config =
++									(ATOM_I2C_ID_CONFIG_ACCESS *)
++									&i2c_record->sucI2cId;
++								ddc_bus = radeon_lookup_i2c_gpio(rdev,
++												 i2c_config->
++												 ucAccess);
++								break;
++							case ATOM_HPD_INT_RECORD_TYPE:
++								hpd_record =
++									(ATOM_HPD_INT_RECORD *)
++									record;
++								gpio = radeon_lookup_gpio(rdev,
++											  hpd_record->ucHPDIntGPIOID);
++								hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
++								hpd.plugged_state = hpd_record->ucPlugged_PinState;
+ 								break;
+ 							}
+ 							record =
+@@ -429,24 +569,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+ 						break;
+ 					}
+ 				}
+-			} else
+-				line_mux = 0;
+-
+-			if ((le16_to_cpu(path->usDeviceTag) ==
+-			     ATOM_DEVICE_TV1_SUPPORT)
+-			    || (le16_to_cpu(path->usDeviceTag) ==
+-				ATOM_DEVICE_TV2_SUPPORT)
+-			    || (le16_to_cpu(path->usDeviceTag) ==
+-				ATOM_DEVICE_CV_SUPPORT))
++			} else {
++				hpd.hpd = RADEON_HPD_NONE;
+ 				ddc_bus.valid = false;
+-			else
+-				ddc_bus = radeon_lookup_gpio(dev, line_mux);
++			}
+ 
+ 			conn_id = le16_to_cpu(path->usConnObjectId);
+ 
+ 			if (!radeon_atom_apply_quirks
+ 			    (dev, le16_to_cpu(path->usDeviceTag), &connector_type,
+-			     &ddc_bus, &conn_id))
++			     &ddc_bus, &conn_id, &hpd))
+ 				continue;
+ 
+ 			radeon_add_atom_connector(dev,
+@@ -455,7 +587,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
+ 							      usDeviceTag),
+ 						  connector_type, &ddc_bus,
+ 						  linkb, igp_lane_info,
+-						  connector_object_id);
++						  connector_object_id,
++						  &hpd);
+ 
+ 		}
+ 	}
+@@ -510,6 +643,7 @@ struct bios_connector {
+ 	uint16_t devices;
+ 	int connector_type;
+ 	struct radeon_i2c_bus_rec ddc_bus;
++	struct radeon_hpd hpd;
+ };
+ 
+ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+@@ -525,7 +659,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+ 	uint16_t device_support;
+ 	uint8_t dac;
+ 	union atom_supported_devices *supported_devices;
+-	int i, j;
++	int i, j, max_device;
+ 	struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
+ 
+ 	atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
+@@ -535,7 +669,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+ 
+ 	device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
+ 
+-	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
++	if (frev > 1)
++		max_device = ATOM_MAX_SUPPORTED_DEVICE;
++	else
++		max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
++
++	for (i = 0; i < max_device; i++) {
+ 		ATOM_CONNECTOR_INFO_I2C ci =
+ 		    supported_devices->info.asConnInfo[i];
+ 
+@@ -561,22 +700,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+ 
+ 		dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
+ 
+-		if ((rdev->family == CHIP_RS690) ||
+-		    (rdev->family == CHIP_RS740)) {
+-			if ((i == ATOM_DEVICE_DFP2_INDEX)
+-			    && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
+-				bios_connectors[i].line_mux =
+-				    ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
+-			else if ((i == ATOM_DEVICE_DFP3_INDEX)
+-				 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
+-				bios_connectors[i].line_mux =
+-				    ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
+-			else
+-				bios_connectors[i].line_mux =
+-				    ci.sucI2cId.sbfAccess.bfI2C_LineMux;
+-		} else
+-			bios_connectors[i].line_mux =
+-			    ci.sucI2cId.sbfAccess.bfI2C_LineMux;
++		bios_connectors[i].line_mux =
++			ci.sucI2cId.ucAccess;
+ 
+ 		/* give tv unique connector ids */
+ 		if (i == ATOM_DEVICE_TV1_INDEX) {
+@@ -590,8 +715,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+ 			bios_connectors[i].line_mux = 52;
+ 		} else
+ 			bios_connectors[i].ddc_bus =
+-			    radeon_lookup_gpio(dev,
+-					       bios_connectors[i].line_mux);
++			    radeon_lookup_i2c_gpio(rdev,
++						   bios_connectors[i].line_mux);
++
++		if ((crev > 1) && (frev > 1)) {
++			u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
++			switch (isb) {
++			case 0x4:
++				bios_connectors[i].hpd.hpd = RADEON_HPD_1;
++				break;
++			case 0xa:
++				bios_connectors[i].hpd.hpd = RADEON_HPD_2;
++				break;
++			default:
++				bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
++				break;
++			}
++		} else {
++			if (i == ATOM_DEVICE_DFP1_INDEX)
++				bios_connectors[i].hpd.hpd = RADEON_HPD_1;
++			else if (i == ATOM_DEVICE_DFP2_INDEX)
++				bios_connectors[i].hpd.hpd = RADEON_HPD_2;
++			else
++				bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
++		}
+ 
+ 		/* Always set the connector type to VGA for CRT1/CRT2. if they are
+ 		 * shared with a DVI port, we'll pick up the DVI connector when we
+@@ -603,7 +750,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+ 
+ 		if (!radeon_atom_apply_quirks
+ 		    (dev, (1 << i), &bios_connectors[i].connector_type,
+-		     &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux))
++		     &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
++		     &bios_connectors[i].hpd))
+ 			continue;
+ 
+ 		bios_connectors[i].valid = true;
+@@ -618,41 +766,42 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+ 		else
+ 			radeon_add_legacy_encoder(dev,
+ 						  radeon_get_encoder_id(dev,
+-									(1 <<
+-									 i),
++									(1 << i),
+ 									dac),
+ 						  (1 << i));
+ 	}
+ 
+ 	/* combine shared connectors */
+-	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
++	for (i = 0; i < max_device; i++) {
+ 		if (bios_connectors[i].valid) {
+-			for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) {
++			for (j = 0; j < max_device; j++) {
+ 				if (bios_connectors[j].valid && (i != j)) {
+ 					if (bios_connectors[i].line_mux ==
+ 					    bios_connectors[j].line_mux) {
+-						if (((bios_connectors[i].
+-						      devices &
+-						      (ATOM_DEVICE_DFP_SUPPORT))
+-						     && (bios_connectors[j].
+-							 devices &
+-							 (ATOM_DEVICE_CRT_SUPPORT)))
+-						    ||
+-						    ((bios_connectors[j].
+-						      devices &
+-						      (ATOM_DEVICE_DFP_SUPPORT))
+-						     && (bios_connectors[i].
+-							 devices &
+-							 (ATOM_DEVICE_CRT_SUPPORT)))) {
+-							bios_connectors[i].
+-							    devices |=
+-							    bios_connectors[j].
+-							    devices;
+-							bios_connectors[i].
+-							    connector_type =
+-							    DRM_MODE_CONNECTOR_DVII;
+-							bios_connectors[j].
+-							    valid = false;
++						/* make sure not to combine LVDS */
++						if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
++							bios_connectors[i].line_mux = 53;
++							bios_connectors[i].ddc_bus.valid = false;
++							continue;
++						}
++						if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
++							bios_connectors[j].line_mux = 53;
++							bios_connectors[j].ddc_bus.valid = false;
++							continue;
++						}
++						/* combine analog and digital for DVI-I */
++						if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
++						     (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) ||
++						    ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
++						     (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) {
++							bios_connectors[i].devices |=
++								bios_connectors[j].devices;
++							bios_connectors[i].connector_type =
++								DRM_MODE_CONNECTOR_DVII;
++							if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT))
++								bios_connectors[i].hpd =
++									bios_connectors[j].hpd;
++							bios_connectors[j].valid = false;
+ 						}
+ 					}
+ 				}
+@@ -661,7 +810,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+ 	}
+ 
+ 	/* add the connectors */
+-	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
++	for (i = 0; i < max_device; i++) {
+ 		if (bios_connectors[i].valid) {
+ 			uint16_t connector_object_id =
+ 				atombios_get_connector_object_id(dev,
+@@ -674,7 +823,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
+ 						  connector_type,
+ 						  &bios_connectors[i].ddc_bus,
+ 						  false, 0,
+-						  connector_object_id);
++						  connector_object_id,
++						  &bios_connectors[i].hpd);
+ 		}
+ 	}
+ 
+@@ -739,7 +889,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ 			 * pre-DCE 3.0 r6xx hardware.  This might need to be adjusted per
+ 			 * family.
+ 			 */
+-			p1pll->pll_out_min = 64800;
++			if (!radeon_new_pll)
++				p1pll->pll_out_min = 64800;
+ 		}
+ 
+ 		p1pll->pll_in_min =
+@@ -805,6 +956,43 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ 	return false;
+ }
+ 
++union igp_info {
++	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
++	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
++};
++
++bool radeon_atombios_sideport_present(struct radeon_device *rdev)
++{
++	struct radeon_mode_info *mode_info = &rdev->mode_info;
++	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
++	union igp_info *igp_info;
++	u8 frev, crev;
++	u16 data_offset;
++
++	atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
++			       &crev, &data_offset);
++
++	igp_info = (union igp_info *)(mode_info->atom_context->bios +
++				      data_offset);
++
++	if (igp_info) {
++		switch (crev) {
++		case 1:
++			if (igp_info->info.ucMemoryType & 0xf0)
++				return true;
++			break;
++		case 2:
++			if (igp_info->info_2.ucMemoryType & 0x0f)
++				return true;
++			break;
++		default:
++			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
++			break;
++		}
++	}
++	return false;
++}
++
+ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+ 				   struct radeon_encoder_int_tmds *tmds)
+ {
+@@ -869,6 +1057,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
+ 	struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
+ 	uint8_t frev, crev;
+ 	struct radeon_atom_ss *ss = NULL;
++	int i;
+ 
+ 	if (id > ATOM_MAX_SS_ENTRY)
+ 		return NULL;
+@@ -886,12 +1075,18 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
+ 		if (!ss)
+ 			return NULL;
+ 
+-		ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage);
+-		ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType;
+-		ss->step = ss_info->asSS_Info[id].ucSS_Step;
+-		ss->delay = ss_info->asSS_Info[id].ucSS_Delay;
+-		ss->range = ss_info->asSS_Info[id].ucSS_Range;
+-		ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div;
++		for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
++			if (ss_info->asSS_Info[i].ucSS_Id == id) {
++				ss->percentage =
++					le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
++				ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
++				ss->step = ss_info->asSS_Info[i].ucSS_Step;
++				ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
++				ss->range = ss_info->asSS_Info[i].ucSS_Range;
++				ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
++				break;
++			}
++		}
+ 	}
+ 	return ss;
+ }
+@@ -909,7 +1104,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct radeon_mode_info *mode_info = &rdev->mode_info;
+ 	int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
+-	uint16_t data_offset;
++	uint16_t data_offset, misc;
+ 	union lvds_info *lvds_info;
+ 	uint8_t frev, crev;
+ 	struct radeon_encoder_atom_dig *lvds = NULL;
+@@ -948,6 +1143,19 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
+ 		lvds->panel_pwr_delay =
+ 		    le16_to_cpu(lvds_info->info.usOffDelayInMs);
+ 		lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
++
++		misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
++		if (misc & ATOM_VSYNC_POLARITY)
++			lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
++		if (misc & ATOM_HSYNC_POLARITY)
++			lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
++		if (misc & ATOM_COMPOSITESYNC)
++			lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
++		if (misc & ATOM_INTERLACE)
++			lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
++		if (misc & ATOM_DOUBLE_CLOCK_MODE)
++			lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
++
+ 		/* set crtc values */
+ 		drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
+ 
+@@ -1082,6 +1290,61 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+ 	return true;
+ }
+ 
++enum radeon_tv_std
++radeon_atombios_get_tv_info(struct radeon_device *rdev)
++{
++	struct radeon_mode_info *mode_info = &rdev->mode_info;
++	int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
++	uint16_t data_offset;
++	uint8_t frev, crev;
++	struct _ATOM_ANALOG_TV_INFO *tv_info;
++	enum radeon_tv_std tv_std = TV_STD_NTSC;
++
++	atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
++
++	tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
++
++	switch (tv_info->ucTV_BootUpDefaultStandard) {
++	case ATOM_TV_NTSC:
++		tv_std = TV_STD_NTSC;
++		DRM_INFO("Default TV standard: NTSC\n");
++		break;
++	case ATOM_TV_NTSCJ:
++		tv_std = TV_STD_NTSC_J;
++		DRM_INFO("Default TV standard: NTSC-J\n");
++		break;
++	case ATOM_TV_PAL:
++		tv_std = TV_STD_PAL;
++		DRM_INFO("Default TV standard: PAL\n");
++		break;
++	case ATOM_TV_PALM:
++		tv_std = TV_STD_PAL_M;
++		DRM_INFO("Default TV standard: PAL-M\n");
++		break;
++	case ATOM_TV_PALN:
++		tv_std = TV_STD_PAL_N;
++		DRM_INFO("Default TV standard: PAL-N\n");
++		break;
++	case ATOM_TV_PALCN:
++		tv_std = TV_STD_PAL_CN;
++		DRM_INFO("Default TV standard: PAL-CN\n");
++		break;
++	case ATOM_TV_PAL60:
++		tv_std = TV_STD_PAL_60;
++		DRM_INFO("Default TV standard: PAL-60\n");
++		break;
++	case ATOM_TV_SECAM:
++		tv_std = TV_STD_SECAM;
++		DRM_INFO("Default TV standard: SECAM\n");
++		break;
++	default:
++		tv_std = TV_STD_NTSC;
++		DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
++		break;
++	}
++	return tv_std;
++}
++
+ struct radeon_encoder_tv_dac *
+ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
+ {
+@@ -1117,6 +1380,7 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
+ 		dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
+ 		tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+ 
++		tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
+ 	}
+ 	return tv_dac;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
+index 10bd50a..7932dc4 100644
+--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
++++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
+@@ -29,8 +29,8 @@
+ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
+ 			   unsigned sdomain, unsigned ddomain)
+ {
+-	struct radeon_object *dobj = NULL;
+-	struct radeon_object *sobj = NULL;
++	struct radeon_bo *dobj = NULL;
++	struct radeon_bo *sobj = NULL;
+ 	struct radeon_fence *fence = NULL;
+ 	uint64_t saddr, daddr;
+ 	unsigned long start_jiffies;
+@@ -41,47 +41,66 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
+ 
+ 	size = bsize;
+ 	n = 1024;
+-	r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj);
++	r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj);
+ 	if (r) {
+ 		goto out_cleanup;
+ 	}
+-	r = radeon_object_pin(sobj, sdomain, &saddr);
++	r = radeon_bo_reserve(sobj, false);
++	if (unlikely(r != 0))
++		goto out_cleanup;
++	r = radeon_bo_pin(sobj, sdomain, &saddr);
++	radeon_bo_unreserve(sobj);
+ 	if (r) {
+ 		goto out_cleanup;
+ 	}
+-	r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj);
++	r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj);
+ 	if (r) {
+ 		goto out_cleanup;
+ 	}
+-	r = radeon_object_pin(dobj, ddomain, &daddr);
++	r = radeon_bo_reserve(dobj, false);
++	if (unlikely(r != 0))
++		goto out_cleanup;
++	r = radeon_bo_pin(dobj, ddomain, &daddr);
++	radeon_bo_unreserve(dobj);
+ 	if (r) {
+ 		goto out_cleanup;
+ 	}
+-	start_jiffies = jiffies;
+-	for (i = 0; i < n; i++) {
+-		r = radeon_fence_create(rdev, &fence);
+-		if (r) {
+-			goto out_cleanup;
++
++	/* r100 doesn't have dma engine so skip the test */
++	if (rdev->asic->copy_dma) {
++
++		start_jiffies = jiffies;
++		for (i = 0; i < n; i++) {
++			r = radeon_fence_create(rdev, &fence);
++			if (r) {
++				goto out_cleanup;
++			}
++
++			r = radeon_copy_dma(rdev, saddr, daddr,
++					size / RADEON_GPU_PAGE_SIZE, fence);
++
++			if (r) {
++				goto out_cleanup;
++			}
++			r = radeon_fence_wait(fence, false);
++			if (r) {
++				goto out_cleanup;
++			}
++			radeon_fence_unref(&fence);
+ 		}
+-		r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
+-		if (r) {
+-			goto out_cleanup;
++		end_jiffies = jiffies;
++		time = end_jiffies - start_jiffies;
++		time = jiffies_to_msecs(time);
++		if (time > 0) {
++			i = ((n * size) >> 10) / time;
++			printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
++					" %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
++					n, size >> 10,
++					sdomain, ddomain, time,
++					i, i * 1000, (i * 1000) / 1024);
+ 		}
+-		r = radeon_fence_wait(fence, false);
+-		if (r) {
+-			goto out_cleanup;
+-		}
+-		radeon_fence_unref(&fence);
+-	}
+-	end_jiffies = jiffies;
+-	time = end_jiffies - start_jiffies;
+-	time = jiffies_to_msecs(time);
+-	if (time > 0) {
+-		i = ((n * size) >> 10) / time;
+-		printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
+-		       " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
+-		       sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
+ 	}
++
+ 	start_jiffies = jiffies;
+ 	for (i = 0; i < n; i++) {
+ 		r = radeon_fence_create(rdev, &fence);
+@@ -109,12 +128,20 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
+ 	}
+ out_cleanup:
+ 	if (sobj) {
+-		radeon_object_unpin(sobj);
+-		radeon_object_unref(&sobj);
++		r = radeon_bo_reserve(sobj, false);
++		if (likely(r == 0)) {
++			radeon_bo_unpin(sobj);
++			radeon_bo_unreserve(sobj);
++		}
++		radeon_bo_unref(&sobj);
+ 	}
+ 	if (dobj) {
+-		radeon_object_unpin(dobj);
+-		radeon_object_unref(&dobj);
++		r = radeon_bo_reserve(dobj, false);
++		if (likely(r == 0)) {
++			radeon_bo_unpin(dobj);
++			radeon_bo_unreserve(dobj);
++		}
++		radeon_bo_unref(&dobj);
+ 	}
+ 	if (fence) {
+ 		radeon_fence_unref(&fence);
+diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
+index a813541..73c4405 100644
+--- a/drivers/gpu/drm/radeon/radeon_clocks.c
++++ b/drivers/gpu/drm/radeon/radeon_clocks.c
+@@ -44,6 +44,10 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
+ 
+ 	ref_div =
+ 	    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
++
++	if (ref_div == 0)
++		return 0;
++
+ 	sclk = fb_div / ref_div;
+ 
+ 	post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
+@@ -52,13 +56,13 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
+ 	else if (post_div == 3)
+ 		sclk >>= 2;
+ 	else if (post_div == 4)
+-		sclk >>= 4;
++		sclk >>= 3;
+ 
+ 	return sclk;
+ }
+ 
+ /* 10 khz */
+-static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
++uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
+ {
+ 	struct radeon_pll *mpll = &rdev->clock.mpll;
+ 	uint32_t fb_div, ref_div, post_div, mclk;
+@@ -70,6 +74,10 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
+ 
+ 	ref_div =
+ 	    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
++
++	if (ref_div == 0)
++		return 0;
++
+ 	mclk = fb_div / ref_div;
+ 
+ 	post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
+@@ -78,7 +86,7 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
+ 	else if (post_div == 3)
+ 		mclk >>= 2;
+ 	else if (post_div == 4)
+-		mclk >>= 4;
++		mclk >>= 3;
+ 
+ 	return mclk;
+ }
+@@ -98,8 +106,19 @@ void radeon_get_clock_info(struct drm_device *dev)
+ 		ret = radeon_combios_get_clock_info(dev);
+ 
+ 	if (ret) {
+-		if (p1pll->reference_div < 2)
+-			p1pll->reference_div = 12;
++		if (p1pll->reference_div < 2) {
++			if (!ASIC_IS_AVIVO(rdev)) {
++				u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
++				if (ASIC_IS_R300(rdev))
++					p1pll->reference_div =
++						(tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
++				else
++					p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
++				if (p1pll->reference_div < 2)
++					p1pll->reference_div = 12;
++			} else
++				p1pll->reference_div = 12;
++		}
+ 		if (p2pll->reference_div < 2)
+ 			p2pll->reference_div = 12;
+ 		if (rdev->family < CHIP_RS600) {
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 5253cbf..e7b1944 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -50,7 +50,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
+ 			    uint32_t supported_device,
+ 			    int connector_type,
+ 			    struct radeon_i2c_bus_rec *i2c_bus,
+-			    uint16_t connector_object_id);
++			    uint16_t connector_object_id,
++			    struct radeon_hpd *hpd);
+ 
+ /* from radeon_legacy_encoder.c */
+ extern void
+@@ -442,39 +443,71 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
+ 
+ }
+ 
+-struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line)
++static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
++						       int ddc_line)
+ {
+ 	struct radeon_i2c_bus_rec i2c;
+ 
+-	i2c.mask_clk_mask = RADEON_GPIO_EN_1;
+-	i2c.mask_data_mask = RADEON_GPIO_EN_0;
+-	i2c.a_clk_mask = RADEON_GPIO_A_1;
+-	i2c.a_data_mask = RADEON_GPIO_A_0;
+-	i2c.put_clk_mask = RADEON_GPIO_EN_1;
+-	i2c.put_data_mask = RADEON_GPIO_EN_0;
+-	i2c.get_clk_mask = RADEON_GPIO_Y_1;
+-	i2c.get_data_mask = RADEON_GPIO_Y_0;
+-	if ((ddc_line == RADEON_LCD_GPIO_MASK) ||
+-	    (ddc_line == RADEON_MDGPIO_EN_REG)) {
+-		i2c.mask_clk_reg = ddc_line;
+-		i2c.mask_data_reg = ddc_line;
+-		i2c.a_clk_reg = ddc_line;
+-		i2c.a_data_reg = ddc_line;
+-		i2c.put_clk_reg = ddc_line;
+-		i2c.put_data_reg = ddc_line;
+-		i2c.get_clk_reg = ddc_line + 4;
+-		i2c.get_data_reg = ddc_line + 4;
++	if (ddc_line == RADEON_GPIOPAD_MASK) {
++		i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
++		i2c.mask_data_reg = RADEON_GPIOPAD_MASK;
++		i2c.a_clk_reg = RADEON_GPIOPAD_A;
++		i2c.a_data_reg = RADEON_GPIOPAD_A;
++		i2c.en_clk_reg = RADEON_GPIOPAD_EN;
++		i2c.en_data_reg = RADEON_GPIOPAD_EN;
++		i2c.y_clk_reg = RADEON_GPIOPAD_Y;
++		i2c.y_data_reg = RADEON_GPIOPAD_Y;
++	} else if (ddc_line == RADEON_MDGPIO_MASK) {
++		i2c.mask_clk_reg = RADEON_MDGPIO_MASK;
++		i2c.mask_data_reg = RADEON_MDGPIO_MASK;
++		i2c.a_clk_reg = RADEON_MDGPIO_A;
++		i2c.a_data_reg = RADEON_MDGPIO_A;
++		i2c.en_clk_reg = RADEON_MDGPIO_EN;
++		i2c.en_data_reg = RADEON_MDGPIO_EN;
++		i2c.y_clk_reg = RADEON_MDGPIO_Y;
++		i2c.y_data_reg = RADEON_MDGPIO_Y;
+ 	} else {
++		i2c.mask_clk_mask = RADEON_GPIO_EN_1;
++		i2c.mask_data_mask = RADEON_GPIO_EN_0;
++		i2c.a_clk_mask = RADEON_GPIO_A_1;
++		i2c.a_data_mask = RADEON_GPIO_A_0;
++		i2c.en_clk_mask = RADEON_GPIO_EN_1;
++		i2c.en_data_mask = RADEON_GPIO_EN_0;
++		i2c.y_clk_mask = RADEON_GPIO_Y_1;
++		i2c.y_data_mask = RADEON_GPIO_Y_0;
++
+ 		i2c.mask_clk_reg = ddc_line;
+ 		i2c.mask_data_reg = ddc_line;
+ 		i2c.a_clk_reg = ddc_line;
+ 		i2c.a_data_reg = ddc_line;
+-		i2c.put_clk_reg = ddc_line;
+-		i2c.put_data_reg = ddc_line;
+-		i2c.get_clk_reg = ddc_line;
+-		i2c.get_data_reg = ddc_line;
++		i2c.en_clk_reg = ddc_line;
++		i2c.en_data_reg = ddc_line;
++		i2c.y_clk_reg = ddc_line;
++		i2c.y_data_reg = ddc_line;
+ 	}
+ 
++	if (rdev->family < CHIP_R200)
++		i2c.hw_capable = false;
++	else {
++		switch (ddc_line) {
++		case RADEON_GPIO_VGA_DDC:
++		case RADEON_GPIO_DVI_DDC:
++			i2c.hw_capable = true;
++			break;
++		case RADEON_GPIO_MONID:
++			/* hw i2c on RADEON_GPIO_MONID doesn't seem to work
++			 * reliably on some pre-r4xx hardware; not sure why.
++			 */
++			i2c.hw_capable = false;
++			break;
++		default:
++			i2c.hw_capable = false;
++			break;
++		}
++	}
++	i2c.mm_i2c = false;
++	i2c.i2c_id = 0;
++
+ 	if (ddc_line)
+ 		i2c.valid = true;
+ 	else
+@@ -495,7 +528,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
+ 	uint16_t sclk, mclk;
+ 
+ 	if (rdev->bios == NULL)
+-		return NULL;
++		return false;
+ 
+ 	pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
+ 	if (pll_info) {
+@@ -562,6 +595,48 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
+ 	return false;
+ }
+ 
++bool radeon_combios_sideport_present(struct radeon_device *rdev)
++{
++	struct drm_device *dev = rdev->ddev;
++	u16 igp_info;
++
++	igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
++
++	if (igp_info) {
++		if (RBIOS16(igp_info + 0x4))
++			return true;
++	}
++	return false;
++}
++
++static const uint32_t default_primarydac_adj[CHIP_LAST] = {
++	0x00000808,		/* r100  */
++	0x00000808,		/* rv100 */
++	0x00000808,		/* rs100 */
++	0x00000808,		/* rv200 */
++	0x00000808,		/* rs200 */
++	0x00000808,		/* r200  */
++	0x00000808,		/* rv250 */
++	0x00000000,		/* rs300 */
++	0x00000808,		/* rv280 */
++	0x00000808,		/* r300  */
++	0x00000808,		/* r350  */
++	0x00000808,		/* rv350 */
++	0x00000808,		/* rv380 */
++	0x00000808,		/* r420  */
++	0x00000808,		/* r423  */
++	0x00000808,		/* rv410 */
++	0x00000000,		/* rs400 */
++	0x00000000,		/* rs480 */
++};
++
++static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
++							  struct radeon_encoder_primary_dac *p_dac)
++{
++	p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
++	return;
++}
++
+ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+ 								       radeon_encoder
+ 								       *encoder)
+@@ -571,20 +646,20 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+ 	uint16_t dac_info;
+ 	uint8_t rev, bg, dac;
+ 	struct radeon_encoder_primary_dac *p_dac = NULL;
++	int found = 0;
+ 
+-	if (rdev->bios == NULL)
++	p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
++			GFP_KERNEL);
++
++	if (!p_dac)
+ 		return NULL;
+ 
++	if (rdev->bios == NULL)
++		goto out;
++
+ 	/* check CRT table */
+ 	dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+ 	if (dac_info) {
+-		p_dac =
+-		    kzalloc(sizeof(struct radeon_encoder_primary_dac),
+-			    GFP_KERNEL);
+-
+-		if (!p_dac)
+-			return NULL;
+-
+ 		rev = RBIOS8(dac_info) & 0x3;
+ 		if (rev < 2) {
+ 			bg = RBIOS8(dac_info + 0x2) & 0xf;
+@@ -595,20 +670,26 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
+ 			dac = RBIOS8(dac_info + 0x3) & 0xf;
+ 			p_dac->ps2_pdac_adj = (bg << 8) | (dac);
+ 		}
+-
++		found = 1;
+ 	}
+ 
++out:
++	if (!found) /* fallback to defaults */
++		radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
++
+ 	return p_dac;
+ }
+ 
+-static enum radeon_tv_std
+-radeon_combios_get_tv_info(struct radeon_encoder *encoder)
++enum radeon_tv_std
++radeon_combios_get_tv_info(struct radeon_device *rdev)
+ {
+-	struct drm_device *dev = encoder->base.dev;
+-	struct radeon_device *rdev = dev->dev_private;
++	struct drm_device *dev = rdev->ddev;
+ 	uint16_t tv_info;
+ 	enum radeon_tv_std tv_std = TV_STD_NTSC;
+ 
++	if (rdev->bios == NULL)
++		return tv_std;
++
+ 	tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
+ 	if (tv_info) {
+ 		if (RBIOS8(tv_info + 6) == 'T') {
+@@ -746,7 +827,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
+ 			tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
+ 			found = 1;
+ 		}
+-		tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
++		tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
+ 	}
+ 	if (!found) {
+ 		/* then check CRT table */
+@@ -890,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
+ 			 lvds->native_mode.vdisplay);
+ 
+ 		lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
+-		if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
+-			lvds->panel_vcc_delay = 2000;
++		lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
+ 
+ 		lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
+ 		lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
+@@ -993,8 +1073,8 @@ static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
+ 	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R420  */
+ 	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_R423  */
+ 	{{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}},	/* CHIP_RV410 */
+-	{{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},	/* CHIP_RS400 */
+-	{{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}},	/* CHIP_RS480 */
++	{ {0, 0}, {0, 0}, {0, 0}, {0, 0} },	/* CHIP_RS400 */
++	{ {0, 0}, {0, 0}, {0, 0}, {0, 0} },	/* CHIP_RS480 */
+ };
+ 
+ bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+@@ -1028,7 +1108,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+ 	tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
+ 
+ 	if (tmds_info) {
+-
+ 		ver = RBIOS8(tmds_info);
+ 		DRM_INFO("DFP table revision: %d\n", ver);
+ 		if (ver == 3) {
+@@ -1063,51 +1142,139 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+ 					  tmds->tmds_pll[i].value);
+ 			}
+ 		}
+-	} else
++	} else {
+ 		DRM_INFO("No TMDS info found in BIOS\n");
++		return false;
++	}
+ 	return true;
+ }
+ 
+-struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder)
++bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
++						struct radeon_encoder_ext_tmds *tmds)
+ {
+-	struct radeon_encoder_int_tmds *tmds = NULL;
+-	bool ret;
++	struct drm_device *dev = encoder->base.dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_i2c_bus_rec i2c_bus;
+ 
+-	tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL);
++	/* default for macs */
++	i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
++	tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ 
+-	if (!tmds)
+-		return NULL;
+-
+-	ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds);
+-	if (ret == false)
+-		radeon_legacy_get_tmds_info_from_table(encoder, tmds);
++	/* XXX some macs have duallink chips */
++	switch (rdev->mode_info.connector_table) {
++	case CT_POWERBOOK_EXTERNAL:
++	case CT_MINI_EXTERNAL:
++	default:
++		tmds->dvo_chip = DVO_SIL164;
++		tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
++		break;
++	}
+ 
+-	return tmds;
++	return true;
+ }
+ 
+-void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder)
++bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
++						  struct radeon_encoder_ext_tmds *tmds)
+ {
+ 	struct drm_device *dev = encoder->base.dev;
+ 	struct radeon_device *rdev = dev->dev_private;
+-	uint16_t ext_tmds_info;
+-	uint8_t ver;
++	uint16_t offset;
++	uint8_t ver, id, blocks, clk, data;
++	int i;
++	enum radeon_combios_ddc gpio;
++	struct radeon_i2c_bus_rec i2c_bus;
+ 
+ 	if (rdev->bios == NULL)
+-		return;
++		return false;
+ 
+-	ext_tmds_info =
+-	    combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
+-	if (ext_tmds_info) {
+-		ver = RBIOS8(ext_tmds_info);
+-		DRM_INFO("External TMDS Table revision: %d\n", ver);
+-		// TODO
++	tmds->i2c_bus = NULL;
++	if (rdev->flags & RADEON_IS_IGP) {
++		offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
++		if (offset) {
++			ver = RBIOS8(offset);
++			DRM_INFO("GPIO Table revision: %d\n", ver);
++			blocks = RBIOS8(offset + 2);
++			for (i = 0; i < blocks; i++) {
++				id = RBIOS8(offset + 3 + (i * 5) + 0);
++				if (id == 136) {
++					clk = RBIOS8(offset + 3 + (i * 5) + 3);
++					data = RBIOS8(offset + 3 + (i * 5) + 4);
++					i2c_bus.valid = true;
++					i2c_bus.mask_clk_mask = (1 << clk);
++					i2c_bus.mask_data_mask = (1 << data);
++					i2c_bus.a_clk_mask = (1 << clk);
++					i2c_bus.a_data_mask = (1 << data);
++					i2c_bus.en_clk_mask = (1 << clk);
++					i2c_bus.en_data_mask = (1 << data);
++					i2c_bus.y_clk_mask = (1 << clk);
++					i2c_bus.y_data_mask = (1 << data);
++					i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK;
++					i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK;
++					i2c_bus.a_clk_reg = RADEON_GPIOPAD_A;
++					i2c_bus.a_data_reg = RADEON_GPIOPAD_A;
++					i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN;
++					i2c_bus.en_data_reg = RADEON_GPIOPAD_EN;
++					i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y;
++					i2c_bus.y_data_reg = RADEON_GPIOPAD_Y;
++					tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
++					tmds->dvo_chip = DVO_SIL164;
++					tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
++					break;
++				}
++			}
++		}
++	} else {
++		offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
++		if (offset) {
++			ver = RBIOS8(offset);
++			DRM_INFO("External TMDS Table revision: %d\n", ver);
++			tmds->slave_addr = RBIOS8(offset + 4 + 2);
++			tmds->slave_addr >>= 1; /* 7 bit addressing */
++			gpio = RBIOS8(offset + 4 + 3);
++			switch (gpio) {
++			case DDC_MONID:
++				i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
++				tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
++				break;
++			case DDC_DVI:
++				i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
++				tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
++				break;
++			case DDC_VGA:
++				i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
++				tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
++				break;
++			case DDC_CRT2:
++				/* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
++				if (rdev->family >= CHIP_R300)
++					i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
++				else
++					i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
++				tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
++				break;
++			case DDC_LCD: /* MM i2c */
++				DRM_ERROR("MM i2c requires hw i2c engine\n");
++				break;
++			default:
++				DRM_ERROR("Unsupported gpio %d\n", gpio);
++				break;
++			}
++		}
+ 	}
++
++	if (!tmds->i2c_bus) {
++		DRM_INFO("No valid Ext TMDS info found in BIOS\n");
++		return false;
++	}
++
++	return true;
+ }
+ 
+ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ {
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct radeon_i2c_bus_rec ddc_i2c;
++	struct radeon_hpd hpd;
+ 
+ 	rdev->mode_info.connector_table = radeon_connector_table;
+ 	if (rdev->mode_info.connector_table == CT_NONE) {
+@@ -1168,7 +1335,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 		/* these are the most common settings */
+ 		if (rdev->flags & RADEON_SINGLE_CRTC) {
+ 			/* VGA - primary dac */
+-			ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
++			ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
++			hpd.hpd = RADEON_HPD_NONE;
+ 			radeon_add_legacy_encoder(dev,
+ 						  radeon_get_encoder_id(dev,
+ 									ATOM_DEVICE_CRT1_SUPPORT,
+@@ -1178,10 +1346,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 						    ATOM_DEVICE_CRT1_SUPPORT,
+ 						    DRM_MODE_CONNECTOR_VGA,
+ 						    &ddc_i2c,
+-						    CONNECTOR_OBJECT_ID_VGA);
++						    CONNECTOR_OBJECT_ID_VGA,
++						    &hpd);
+ 		} else if (rdev->flags & RADEON_IS_MOBILITY) {
+ 			/* LVDS */
+-			ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK);
++			ddc_i2c = combios_setup_i2c_bus(rdev, 0);
++			hpd.hpd = RADEON_HPD_NONE;
+ 			radeon_add_legacy_encoder(dev,
+ 						  radeon_get_encoder_id(dev,
+ 									ATOM_DEVICE_LCD1_SUPPORT,
+@@ -1191,10 +1361,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 						    ATOM_DEVICE_LCD1_SUPPORT,
+ 						    DRM_MODE_CONNECTOR_LVDS,
+ 						    &ddc_i2c,
+-						    CONNECTOR_OBJECT_ID_LVDS);
++						    CONNECTOR_OBJECT_ID_LVDS,
++						    &hpd);
+ 
+ 			/* VGA - primary dac */
+-			ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
++			ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
++			hpd.hpd = RADEON_HPD_NONE;
+ 			radeon_add_legacy_encoder(dev,
+ 						  radeon_get_encoder_id(dev,
+ 									ATOM_DEVICE_CRT1_SUPPORT,
+@@ -1204,10 +1376,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 						    ATOM_DEVICE_CRT1_SUPPORT,
+ 						    DRM_MODE_CONNECTOR_VGA,
+ 						    &ddc_i2c,
+-						    CONNECTOR_OBJECT_ID_VGA);
++						    CONNECTOR_OBJECT_ID_VGA,
++						    &hpd);
+ 		} else {
+ 			/* DVI-I - tv dac, int tmds */
+-			ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
++			ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
++			hpd.hpd = RADEON_HPD_1;
+ 			radeon_add_legacy_encoder(dev,
+ 						  radeon_get_encoder_id(dev,
+ 									ATOM_DEVICE_DFP1_SUPPORT,
+@@ -1223,10 +1397,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 						    ATOM_DEVICE_CRT2_SUPPORT,
+ 						    DRM_MODE_CONNECTOR_DVII,
+ 						    &ddc_i2c,
+-						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
++						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
++						    &hpd);
+ 
+ 			/* VGA - primary dac */
+-			ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
++			ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
++			hpd.hpd = RADEON_HPD_NONE;
+ 			radeon_add_legacy_encoder(dev,
+ 						  radeon_get_encoder_id(dev,
+ 									ATOM_DEVICE_CRT1_SUPPORT,
+@@ -1236,11 +1412,14 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 						    ATOM_DEVICE_CRT1_SUPPORT,
+ 						    DRM_MODE_CONNECTOR_VGA,
+ 						    &ddc_i2c,
+-						    CONNECTOR_OBJECT_ID_VGA);
++						    CONNECTOR_OBJECT_ID_VGA,
++						    &hpd);
+ 		}
+ 
+ 		if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
+ 			/* TV - tv dac */
++			ddc_i2c.valid = false;
++			hpd.hpd = RADEON_HPD_NONE;
+ 			radeon_add_legacy_encoder(dev,
+ 						  radeon_get_encoder_id(dev,
+ 									ATOM_DEVICE_TV1_SUPPORT,
+@@ -1250,14 +1429,16 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 						    ATOM_DEVICE_TV1_SUPPORT,
+ 						    DRM_MODE_CONNECTOR_SVIDEO,
+ 						    &ddc_i2c,
+-						    CONNECTOR_OBJECT_ID_SVIDEO);
++						    CONNECTOR_OBJECT_ID_SVIDEO,
++						    &hpd);
+ 		}
+ 		break;
+ 	case CT_IBOOK:
+ 		DRM_INFO("Connector Table: %d (ibook)\n",
+ 			 rdev->mode_info.connector_table);
+ 		/* LVDS */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_LCD1_SUPPORT,
+@@ -1265,9 +1446,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					  ATOM_DEVICE_LCD1_SUPPORT);
+ 		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_LVDS);
++					    CONNECTOR_OBJECT_ID_LVDS,
++					    &hpd);
+ 		/* VGA - TV DAC */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_CRT2_SUPPORT,
+@@ -1275,8 +1458,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					  ATOM_DEVICE_CRT2_SUPPORT);
+ 		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_VGA);
++					    CONNECTOR_OBJECT_ID_VGA,
++					    &hpd);
+ 		/* TV - TV DAC */
++		ddc_i2c.valid = false;
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_TV1_SUPPORT,
+@@ -1285,13 +1471,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_SVIDEO,
+ 					    &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SVIDEO);
++					    CONNECTOR_OBJECT_ID_SVIDEO,
++					    &hpd);
+ 		break;
+ 	case CT_POWERBOOK_EXTERNAL:
+ 		DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
+ 			 rdev->mode_info.connector_table);
+ 		/* LVDS */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_LCD1_SUPPORT,
+@@ -1299,9 +1487,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					  ATOM_DEVICE_LCD1_SUPPORT);
+ 		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_LVDS);
++					    CONNECTOR_OBJECT_ID_LVDS,
++					    &hpd);
+ 		/* DVI-I - primary dac, ext tmds */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
++		hpd.hpd = RADEON_HPD_2; /* ??? */
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_DFP2_SUPPORT,
+@@ -1317,8 +1507,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					    ATOM_DEVICE_DFP2_SUPPORT |
+ 					    ATOM_DEVICE_CRT1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I);
++					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
++					    &hpd);
+ 		/* TV - TV DAC */
++		ddc_i2c.valid = false;
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_TV1_SUPPORT,
+@@ -1327,13 +1520,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_SVIDEO,
+ 					    &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SVIDEO);
++					    CONNECTOR_OBJECT_ID_SVIDEO,
++					    &hpd);
+ 		break;
+ 	case CT_POWERBOOK_INTERNAL:
+ 		DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
+ 			 rdev->mode_info.connector_table);
+ 		/* LVDS */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_LCD1_SUPPORT,
+@@ -1341,9 +1536,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					  ATOM_DEVICE_LCD1_SUPPORT);
+ 		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_LVDS);
++					    CONNECTOR_OBJECT_ID_LVDS,
++					    &hpd);
+ 		/* DVI-I - primary dac, int tmds */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
++		hpd.hpd = RADEON_HPD_1; /* ??? */
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_DFP1_SUPPORT,
+@@ -1358,8 +1555,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					    ATOM_DEVICE_DFP1_SUPPORT |
+ 					    ATOM_DEVICE_CRT1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
++					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
++					    &hpd);
+ 		/* TV - TV DAC */
++		ddc_i2c.valid = false;
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_TV1_SUPPORT,
+@@ -1368,13 +1568,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_SVIDEO,
+ 					    &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SVIDEO);
++					    CONNECTOR_OBJECT_ID_SVIDEO,
++					    &hpd);
+ 		break;
+ 	case CT_POWERBOOK_VGA:
+ 		DRM_INFO("Connector Table: %d (powerbook vga)\n",
+ 			 rdev->mode_info.connector_table);
+ 		/* LVDS */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_LCD1_SUPPORT,
+@@ -1382,9 +1584,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					  ATOM_DEVICE_LCD1_SUPPORT);
+ 		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_LVDS);
++					    CONNECTOR_OBJECT_ID_LVDS,
++					    &hpd);
+ 		/* VGA - primary dac */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_CRT1_SUPPORT,
+@@ -1392,8 +1596,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					  ATOM_DEVICE_CRT1_SUPPORT);
+ 		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_VGA);
++					    CONNECTOR_OBJECT_ID_VGA,
++					    &hpd);
+ 		/* TV - TV DAC */
++		ddc_i2c.valid = false;
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_TV1_SUPPORT,
+@@ -1402,13 +1609,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_SVIDEO,
+ 					    &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SVIDEO);
++					    CONNECTOR_OBJECT_ID_SVIDEO,
++					    &hpd);
+ 		break;
+ 	case CT_MINI_EXTERNAL:
+ 		DRM_INFO("Connector Table: %d (mini external tmds)\n",
+ 			 rdev->mode_info.connector_table);
+ 		/* DVI-I - tv dac, ext tmds */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
++		hpd.hpd = RADEON_HPD_2; /* ??? */
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_DFP2_SUPPORT,
+@@ -1424,8 +1633,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					    ATOM_DEVICE_DFP2_SUPPORT |
+ 					    ATOM_DEVICE_CRT2_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
++					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
++					    &hpd);
+ 		/* TV - TV DAC */
++		ddc_i2c.valid = false;
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_TV1_SUPPORT,
+@@ -1434,13 +1646,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_SVIDEO,
+ 					    &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SVIDEO);
++					    CONNECTOR_OBJECT_ID_SVIDEO,
++					    &hpd);
+ 		break;
+ 	case CT_MINI_INTERNAL:
+ 		DRM_INFO("Connector Table: %d (mini internal tmds)\n",
+ 			 rdev->mode_info.connector_table);
+ 		/* DVI-I - tv dac, int tmds */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
++		hpd.hpd = RADEON_HPD_1; /* ??? */
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_DFP1_SUPPORT,
+@@ -1455,8 +1669,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					    ATOM_DEVICE_DFP1_SUPPORT |
+ 					    ATOM_DEVICE_CRT2_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
++					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
++					    &hpd);
+ 		/* TV - TV DAC */
++		ddc_i2c.valid = false;
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_TV1_SUPPORT,
+@@ -1465,13 +1682,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_SVIDEO,
+ 					    &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SVIDEO);
++					    CONNECTOR_OBJECT_ID_SVIDEO,
++					    &hpd);
+ 		break;
+ 	case CT_IMAC_G5_ISIGHT:
+ 		DRM_INFO("Connector Table: %d (imac g5 isight)\n",
+ 			 rdev->mode_info.connector_table);
+ 		/* DVI-D - int tmds */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
++		hpd.hpd = RADEON_HPD_1; /* ??? */
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_DFP1_SUPPORT,
+@@ -1479,9 +1698,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					  ATOM_DEVICE_DFP1_SUPPORT);
+ 		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
++					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
++					    &hpd);
+ 		/* VGA - tv dac */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_CRT2_SUPPORT,
+@@ -1489,8 +1710,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					  ATOM_DEVICE_CRT2_SUPPORT);
+ 		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_VGA);
++					    CONNECTOR_OBJECT_ID_VGA,
++					    &hpd);
+ 		/* TV - TV DAC */
++		ddc_i2c.valid = false;
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_TV1_SUPPORT,
+@@ -1499,13 +1723,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_SVIDEO,
+ 					    &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SVIDEO);
++					    CONNECTOR_OBJECT_ID_SVIDEO,
++					    &hpd);
+ 		break;
+ 	case CT_EMAC:
+ 		DRM_INFO("Connector Table: %d (emac)\n",
+ 			 rdev->mode_info.connector_table);
+ 		/* VGA - primary dac */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_CRT1_SUPPORT,
+@@ -1513,9 +1739,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					  ATOM_DEVICE_CRT1_SUPPORT);
+ 		radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_VGA);
++					    CONNECTOR_OBJECT_ID_VGA,
++					    &hpd);
+ 		/* VGA - tv dac */
+-		ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
++		ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_CRT2_SUPPORT,
+@@ -1523,8 +1751,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 					  ATOM_DEVICE_CRT2_SUPPORT);
+ 		radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_VGA);
++					    CONNECTOR_OBJECT_ID_VGA,
++					    &hpd);
+ 		/* TV - TV DAC */
++		ddc_i2c.valid = false;
++		hpd.hpd = RADEON_HPD_NONE;
+ 		radeon_add_legacy_encoder(dev,
+ 					  radeon_get_encoder_id(dev,
+ 								ATOM_DEVICE_TV1_SUPPORT,
+@@ -1533,7 +1764,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
+ 		radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
+ 					    DRM_MODE_CONNECTOR_SVIDEO,
+ 					    &ddc_i2c,
+-					    CONNECTOR_OBJECT_ID_SVIDEO);
++					    CONNECTOR_OBJECT_ID_SVIDEO,
++					    &hpd);
+ 		break;
+ 	default:
+ 		DRM_INFO("Connector table: %d (invalid)\n",
+@@ -1550,7 +1782,8 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
+ 				       int bios_index,
+ 				       enum radeon_combios_connector
+ 				       *legacy_connector,
+-				       struct radeon_i2c_bus_rec *ddc_i2c)
++				       struct radeon_i2c_bus_rec *ddc_i2c,
++				       struct radeon_hpd *hpd)
+ {
+ 	struct radeon_device *rdev = dev->dev_private;
+ 
+@@ -1558,29 +1791,26 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
+ 	if ((rdev->family == CHIP_RS400 ||
+ 	     rdev->family == CHIP_RS480) &&
+ 	    ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
+-		*ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID);
++		*ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ 	else if ((rdev->family == CHIP_RS400 ||
+ 		  rdev->family == CHIP_RS480) &&
+ 		 ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
+-		ddc_i2c->valid = true;
++		*ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIOPAD_MASK);
+ 		ddc_i2c->mask_clk_mask = (0x20 << 8);
+ 		ddc_i2c->mask_data_mask = 0x80;
+ 		ddc_i2c->a_clk_mask = (0x20 << 8);
+ 		ddc_i2c->a_data_mask = 0x80;
+-		ddc_i2c->put_clk_mask = (0x20 << 8);
+-		ddc_i2c->put_data_mask = 0x80;
+-		ddc_i2c->get_clk_mask = (0x20 << 8);
+-		ddc_i2c->get_data_mask = 0x80;
+-		ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK;
+-		ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK;
+-		ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A;
+-		ddc_i2c->a_data_reg = RADEON_GPIOPAD_A;
+-		ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN;
+-		ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN;
+-		ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG;
+-		ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG;
++		ddc_i2c->en_clk_mask = (0x20 << 8);
++		ddc_i2c->en_data_mask = 0x80;
++		ddc_i2c->y_clk_mask = (0x20 << 8);
++		ddc_i2c->y_data_mask = 0x80;
+ 	}
+ 
++	/* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
++	if ((rdev->family >= CHIP_R300) &&
++	    ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
++		*ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
++
+ 	/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
+ 	   one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
+ 	if (dev->pdev->device == 0x515e &&
+@@ -1624,6 +1854,12 @@ static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
+ 	    dev->pdev->subsystem_device == 0x280a)
+ 		return false;
+ 
++	/* MSI S270 has non-existent TV port */
++	if (dev->pdev->device == 0x5955 &&
++	    dev->pdev->subsystem_vendor == 0x1462 &&
++	    dev->pdev->subsystem_device == 0x0131)
++		return false;
++
+ 	return true;
+ }
+ 
+@@ -1671,6 +1907,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 	enum radeon_combios_connector connector;
+ 	int i = 0;
+ 	struct radeon_i2c_bus_rec ddc_i2c;
++	struct radeon_hpd hpd;
+ 
+ 	if (rdev->bios == NULL)
+ 		return false;
+@@ -1691,26 +1928,40 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 			switch (ddc_type) {
+ 			case DDC_MONID:
+ 				ddc_i2c =
+-				    combios_setup_i2c_bus(RADEON_GPIO_MONID);
++					combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ 				break;
+ 			case DDC_DVI:
+ 				ddc_i2c =
+-				    combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
++					combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ 				break;
+ 			case DDC_VGA:
+ 				ddc_i2c =
+-				    combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
++					combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ 				break;
+ 			case DDC_CRT2:
+ 				ddc_i2c =
+-				    combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC);
++					combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
++				break;
++			default:
++				break;
++			}
++
++			switch (connector) {
++			case CONNECTOR_PROPRIETARY_LEGACY:
++			case CONNECTOR_DVI_I_LEGACY:
++			case CONNECTOR_DVI_D_LEGACY:
++				if ((tmp >> 4) & 0x1)
++					hpd.hpd = RADEON_HPD_2;
++				else
++					hpd.hpd = RADEON_HPD_1;
+ 				break;
+ 			default:
++				hpd.hpd = RADEON_HPD_NONE;
+ 				break;
+ 			}
+ 
+ 			if (!radeon_apply_legacy_quirks(dev, i, &connector,
+-						       &ddc_i2c))
++							&ddc_i2c, &hpd))
+ 				continue;
+ 
+ 			switch (connector) {
+@@ -1727,7 +1978,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 							    legacy_connector_convert
+ 							    [connector],
+ 							    &ddc_i2c,
+-							    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D);
++							    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
++							    &hpd);
+ 				break;
+ 			case CONNECTOR_CRT_LEGACY:
+ 				if (tmp & 0x1) {
+@@ -1753,7 +2005,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 							    legacy_connector_convert
+ 							    [connector],
+ 							    &ddc_i2c,
+-							    CONNECTOR_OBJECT_ID_VGA);
++							    CONNECTOR_OBJECT_ID_VGA,
++							    &hpd);
+ 				break;
+ 			case CONNECTOR_DVI_I_LEGACY:
+ 				devices = 0;
+@@ -1799,7 +2052,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 							    legacy_connector_convert
+ 							    [connector],
+ 							    &ddc_i2c,
+-							    connector_object_id);
++							    connector_object_id,
++							    &hpd);
+ 				break;
+ 			case CONNECTOR_DVI_D_LEGACY:
+ 				if ((tmp >> 4) & 0x1) {
+@@ -1817,7 +2071,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 							    legacy_connector_convert
+ 							    [connector],
+ 							    &ddc_i2c,
+-							    connector_object_id);
++							    connector_object_id,
++							    &hpd);
+ 				break;
+ 			case CONNECTOR_CTV_LEGACY:
+ 			case CONNECTOR_STV_LEGACY:
+@@ -1832,7 +2087,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 							    legacy_connector_convert
+ 							    [connector],
+ 							    &ddc_i2c,
+-							    CONNECTOR_OBJECT_ID_SVIDEO);
++							    CONNECTOR_OBJECT_ID_SVIDEO,
++							    &hpd);
+ 				break;
+ 			default:
+ 				DRM_ERROR("Unknown connector type: %d\n",
+@@ -1858,14 +2114,16 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 									0),
+ 						  ATOM_DEVICE_DFP1_SUPPORT);
+ 
+-			ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC);
++			ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
++			hpd.hpd = RADEON_HPD_NONE;
+ 			radeon_add_legacy_connector(dev,
+ 						    0,
+ 						    ATOM_DEVICE_CRT1_SUPPORT |
+ 						    ATOM_DEVICE_DFP1_SUPPORT,
+ 						    DRM_MODE_CONNECTOR_DVII,
+ 						    &ddc_i2c,
+-						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I);
++						    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
++						    &hpd);
+ 		} else {
+ 			uint16_t crt_info =
+ 				combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
+@@ -1876,13 +2134,15 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 										ATOM_DEVICE_CRT1_SUPPORT,
+ 										1),
+ 							  ATOM_DEVICE_CRT1_SUPPORT);
+-				ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC);
++				ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
++				hpd.hpd = RADEON_HPD_NONE;
+ 				radeon_add_legacy_connector(dev,
+ 							    0,
+ 							    ATOM_DEVICE_CRT1_SUPPORT,
+ 							    DRM_MODE_CONNECTOR_VGA,
+ 							    &ddc_i2c,
+-							    CONNECTOR_OBJECT_ID_VGA);
++							    CONNECTOR_OBJECT_ID_VGA,
++							    &hpd);
+ 			} else {
+ 				DRM_DEBUG("No connector info found\n");
+ 				return false;
+@@ -1910,27 +2170,27 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 				case DDC_MONID:
+ 					ddc_i2c =
+ 					    combios_setup_i2c_bus
+-					    (RADEON_GPIO_MONID);
++						(rdev, RADEON_GPIO_MONID);
+ 					break;
+ 				case DDC_DVI:
+ 					ddc_i2c =
+ 					    combios_setup_i2c_bus
+-					    (RADEON_GPIO_DVI_DDC);
++						(rdev, RADEON_GPIO_DVI_DDC);
+ 					break;
+ 				case DDC_VGA:
+ 					ddc_i2c =
+ 					    combios_setup_i2c_bus
+-					    (RADEON_GPIO_VGA_DDC);
++						(rdev, RADEON_GPIO_VGA_DDC);
+ 					break;
+ 				case DDC_CRT2:
+ 					ddc_i2c =
+ 					    combios_setup_i2c_bus
+-					    (RADEON_GPIO_CRT2_DDC);
++						(rdev, RADEON_GPIO_CRT2_DDC);
+ 					break;
+ 				case DDC_LCD:
+ 					ddc_i2c =
+ 					    combios_setup_i2c_bus
+-					    (RADEON_LCD_GPIO_MASK);
++						(rdev, RADEON_GPIOPAD_MASK);
+ 					ddc_i2c.mask_clk_mask =
+ 					    RBIOS32(lcd_ddc_info + 3);
+ 					ddc_i2c.mask_data_mask =
+@@ -1939,19 +2199,19 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 					    RBIOS32(lcd_ddc_info + 3);
+ 					ddc_i2c.a_data_mask =
+ 					    RBIOS32(lcd_ddc_info + 7);
+-					ddc_i2c.put_clk_mask =
++					ddc_i2c.en_clk_mask =
+ 					    RBIOS32(lcd_ddc_info + 3);
+-					ddc_i2c.put_data_mask =
++					ddc_i2c.en_data_mask =
+ 					    RBIOS32(lcd_ddc_info + 7);
+-					ddc_i2c.get_clk_mask =
++					ddc_i2c.y_clk_mask =
+ 					    RBIOS32(lcd_ddc_info + 3);
+-					ddc_i2c.get_data_mask =
++					ddc_i2c.y_data_mask =
+ 					    RBIOS32(lcd_ddc_info + 7);
+ 					break;
+ 				case DDC_GPIO:
+ 					ddc_i2c =
+ 					    combios_setup_i2c_bus
+-					    (RADEON_MDGPIO_EN_REG);
++						(rdev, RADEON_MDGPIO_MASK);
+ 					ddc_i2c.mask_clk_mask =
+ 					    RBIOS32(lcd_ddc_info + 3);
+ 					ddc_i2c.mask_data_mask =
+@@ -1960,13 +2220,13 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 					    RBIOS32(lcd_ddc_info + 3);
+ 					ddc_i2c.a_data_mask =
+ 					    RBIOS32(lcd_ddc_info + 7);
+-					ddc_i2c.put_clk_mask =
++					ddc_i2c.en_clk_mask =
+ 					    RBIOS32(lcd_ddc_info + 3);
+-					ddc_i2c.put_data_mask =
++					ddc_i2c.en_data_mask =
+ 					    RBIOS32(lcd_ddc_info + 7);
+-					ddc_i2c.get_clk_mask =
++					ddc_i2c.y_clk_mask =
+ 					    RBIOS32(lcd_ddc_info + 3);
+-					ddc_i2c.get_data_mask =
++					ddc_i2c.y_data_mask =
+ 					    RBIOS32(lcd_ddc_info + 7);
+ 					break;
+ 				default:
+@@ -1977,12 +2237,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 			} else
+ 				ddc_i2c.valid = false;
+ 
++			hpd.hpd = RADEON_HPD_NONE;
+ 			radeon_add_legacy_connector(dev,
+ 						    5,
+ 						    ATOM_DEVICE_LCD1_SUPPORT,
+ 						    DRM_MODE_CONNECTOR_LVDS,
+ 						    &ddc_i2c,
+-						    CONNECTOR_OBJECT_ID_LVDS);
++						    CONNECTOR_OBJECT_ID_LVDS,
++						    &hpd);
+ 		}
+ 	}
+ 
+@@ -1993,6 +2255,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 		if (tv_info) {
+ 			if (RBIOS8(tv_info + 6) == 'T') {
+ 				if (radeon_apply_legacy_tv_quirks(dev)) {
++					hpd.hpd = RADEON_HPD_NONE;
+ 					radeon_add_legacy_encoder(dev,
+ 								  radeon_get_encoder_id
+ 								  (dev,
+@@ -2003,7 +2266,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 								    ATOM_DEVICE_TV1_SUPPORT,
+ 								    DRM_MODE_CONNECTOR_SVIDEO,
+ 								    &ddc_i2c,
+-								    CONNECTOR_OBJECT_ID_SVIDEO);
++								    CONNECTOR_OBJECT_ID_SVIDEO,
++								    &hpd);
+ 				}
+ 			}
+ 		}
+@@ -2014,6 +2278,193 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
+ 	return true;
+ }
+ 
++void radeon_external_tmds_setup(struct drm_encoder *encoder)
++{
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
++
++	if (!tmds)
++		return;
++
++	switch (tmds->dvo_chip) {
++	case DVO_SIL164:
++		/* sil 164 */
++		radeon_i2c_do_lock(tmds->i2c_bus, 1);
++		radeon_i2c_sw_put_byte(tmds->i2c_bus,
++				       tmds->slave_addr,
++				       0x08, 0x30);
++		radeon_i2c_sw_put_byte(tmds->i2c_bus,
++				       tmds->slave_addr,
++				       0x09, 0x00);
++		radeon_i2c_sw_put_byte(tmds->i2c_bus,
++				       tmds->slave_addr,
++				       0x0a, 0x90);
++		radeon_i2c_sw_put_byte(tmds->i2c_bus,
++				       tmds->slave_addr,
++				       0x0c, 0x89);
++		radeon_i2c_sw_put_byte(tmds->i2c_bus,
++				       tmds->slave_addr,
++				       0x08, 0x3b);
++		radeon_i2c_do_lock(tmds->i2c_bus, 0);
++		break;
++	case DVO_SIL1178:
++		/* sil 1178 - untested */
++		/*
++		 * 0x0f, 0x44
++		 * 0x0f, 0x4c
++		 * 0x0e, 0x01
++		 * 0x0a, 0x80
++		 * 0x09, 0x30
++		 * 0x0c, 0xc9
++		 * 0x0d, 0x70
++		 * 0x08, 0x32
++		 * 0x08, 0x33
++		 */
++		break;
++	default:
++		break;
++	}
++
++}
++
++bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	uint16_t offset;
++	uint8_t blocks, slave_addr, rev;
++	uint32_t index, id;
++	uint32_t reg, val, and_mask, or_mask;
++	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
++
++	if (rdev->bios == NULL)
++		return false;
++
++	if (!tmds)
++		return false;
++
++	if (rdev->flags & RADEON_IS_IGP) {
++		offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE);
++		rev = RBIOS8(offset);
++		if (offset) {
++			rev = RBIOS8(offset);
++			if (rev > 1) {
++				blocks = RBIOS8(offset + 3);
++				index = offset + 4;
++				while (blocks > 0) {
++					id = RBIOS16(index);
++					index += 2;
++					switch (id >> 13) {
++					case 0:
++						reg = (id & 0x1fff) * 4;
++						val = RBIOS32(index);
++						index += 4;
++						WREG32(reg, val);
++						break;
++					case 2:
++						reg = (id & 0x1fff) * 4;
++						and_mask = RBIOS32(index);
++						index += 4;
++						or_mask = RBIOS32(index);
++						index += 4;
++						val = RREG32(reg);
++						val = (val & and_mask) | or_mask;
++						WREG32(reg, val);
++						break;
++					case 3:
++						val = RBIOS16(index);
++						index += 2;
++						udelay(val);
++						break;
++					case 4:
++						val = RBIOS16(index);
++						index += 2;
++						udelay(val * 1000);
++						break;
++					case 6:
++						slave_addr = id & 0xff;
++						slave_addr >>= 1; /* 7 bit addressing */
++						index++;
++						reg = RBIOS8(index);
++						index++;
++						val = RBIOS8(index);
++						index++;
++						radeon_i2c_do_lock(tmds->i2c_bus, 1);
++						radeon_i2c_sw_put_byte(tmds->i2c_bus,
++								       slave_addr,
++								       reg, val);
++						radeon_i2c_do_lock(tmds->i2c_bus, 0);
++						break;
++					default:
++						DRM_ERROR("Unknown id %d\n", id >> 13);
++						break;
++					}
++					blocks--;
++				}
++				return true;
++			}
++		}
++	} else {
++		offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
++		if (offset) {
++			index = offset + 10;
++			id = RBIOS16(index);
++			while (id != 0xffff) {
++				index += 2;
++				switch (id >> 13) {
++				case 0:
++					reg = (id & 0x1fff) * 4;
++					val = RBIOS32(index);
++					WREG32(reg, val);
++					break;
++				case 2:
++					reg = (id & 0x1fff) * 4;
++					and_mask = RBIOS32(index);
++					index += 4;
++					or_mask = RBIOS32(index);
++					index += 4;
++					val = RREG32(reg);
++					val = (val & and_mask) | or_mask;
++					WREG32(reg, val);
++					break;
++				case 4:
++					val = RBIOS16(index);
++					index += 2;
++					udelay(val);
++					break;
++				case 5:
++					reg = id & 0x1fff;
++					and_mask = RBIOS32(index);
++					index += 4;
++					or_mask = RBIOS32(index);
++					index += 4;
++					val = RREG32_PLL(reg);
++					val = (val & and_mask) | or_mask;
++					WREG32_PLL(reg, val);
++					break;
++				case 6:
++					reg = id & 0x1fff;
++					val = RBIOS8(index);
++					index += 1;
++					radeon_i2c_do_lock(tmds->i2c_bus, 1);
++					radeon_i2c_sw_put_byte(tmds->i2c_bus,
++							       tmds->slave_addr,
++							       reg, val);
++					radeon_i2c_do_lock(tmds->i2c_bus, 0);
++					break;
++				default:
++					DRM_ERROR("Unknown id %d\n", id >> 13);
++					break;
++				}
++				id = RBIOS16(index);
++			}
++			return true;
++		}
++	}
++	return false;
++}
++
+ static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
+ {
+ 	struct radeon_device *rdev = dev->dev_private;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 29763ce..65f8194 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -40,6 +40,28 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+ 				       struct drm_encoder *encoder,
+ 				       bool connected);
+ 
++void radeon_connector_hotplug(struct drm_connector *connector)
++{
++	struct drm_device *dev = connector->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++
++	if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
++		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
++
++	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
++	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
++		if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
++		    (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) {
++			if (radeon_dp_needs_link_train(radeon_connector)) {
++				if (connector->encoder)
++					dp_link_train(connector->encoder, connector);
++			}
++		}
++	}
++
++}
++
+ static void radeon_property_change_mode(struct drm_encoder *encoder)
+ {
+ 	struct drm_crtc *crtc = encoder->crtc;
+@@ -188,6 +210,18 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
+ 		drm_mode_set_name(mode);
+ 
+ 		DRM_DEBUG("Adding native panel mode %s\n", mode->name);
++	} else if (native_mode->hdisplay != 0 &&
++		   native_mode->vdisplay != 0) {
++		/* mac laptops without an edid */
++		/* Note that this is not necessarily the exact panel mode,
++		 * but an approximation based on the cvt formula.  For these
++		 * systems we should ideally read the mode info out of the
++		 * registers or add a mode table, but this works and is much
++		 * simpler.
++		 */
++		mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
++		mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
++		DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name);
+ 	}
+ 	return mode;
+ }
+@@ -445,10 +479,10 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
+ 		ret = connector_status_connected;
+ 	else {
+ 		if (radeon_connector->ddc_bus) {
+-			radeon_i2c_do_lock(radeon_connector, 1);
++			radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ 			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ 							      &radeon_connector->ddc_bus->adapter);
+-			radeon_i2c_do_lock(radeon_connector, 0);
++			radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ 			if (radeon_connector->edid)
+ 				ret = connector_status_connected;
+ 		}
+@@ -546,24 +580,26 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
+ 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ 	struct drm_encoder *encoder;
+ 	struct drm_encoder_helper_funcs *encoder_funcs;
+-	bool dret;
++	bool dret = false;
+ 	enum drm_connector_status ret = connector_status_disconnected;
+ 
+ 	encoder = radeon_best_single_encoder(connector);
+ 	if (!encoder)
+ 		ret = connector_status_disconnected;
+ 
+-	radeon_i2c_do_lock(radeon_connector, 1);
+-	dret = radeon_ddc_probe(radeon_connector);
+-	radeon_i2c_do_lock(radeon_connector, 0);
++	if (radeon_connector->ddc_bus) {
++		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
++		dret = radeon_ddc_probe(radeon_connector);
++		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
++	}
+ 	if (dret) {
+ 		if (radeon_connector->edid) {
+ 			kfree(radeon_connector->edid);
+ 			radeon_connector->edid = NULL;
+ 		}
+-		radeon_i2c_do_lock(radeon_connector, 1);
++		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ 		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+-		radeon_i2c_do_lock(radeon_connector, 0);
++		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ 
+ 		if (!radeon_connector->edid) {
+ 			DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+@@ -583,7 +619,7 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
+ 				ret = connector_status_connected;
+ 		}
+ 	} else {
+-		if (radeon_connector->dac_load_detect) {
++		if (radeon_connector->dac_load_detect && encoder) {
+ 			encoder_funcs = encoder->helper_private;
+ 			ret = encoder_funcs->detect(encoder, connector);
+ 		}
+@@ -706,19 +742,21 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
+ 	struct drm_mode_object *obj;
+ 	int i;
+ 	enum drm_connector_status ret = connector_status_disconnected;
+-	bool dret;
++	bool dret = false;
+ 
+-	radeon_i2c_do_lock(radeon_connector, 1);
+-	dret = radeon_ddc_probe(radeon_connector);
+-	radeon_i2c_do_lock(radeon_connector, 0);
++	if (radeon_connector->ddc_bus) {
++		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
++		dret = radeon_ddc_probe(radeon_connector);
++		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
++	}
+ 	if (dret) {
+ 		if (radeon_connector->edid) {
+ 			kfree(radeon_connector->edid);
+ 			radeon_connector->edid = NULL;
+ 		}
+-		radeon_i2c_do_lock(radeon_connector, 1);
++		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ 		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+-		radeon_i2c_do_lock(radeon_connector, 0);
++		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ 
+ 		if (!radeon_connector->edid) {
+ 			DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+@@ -735,6 +773,39 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
+ 				ret = connector_status_disconnected;
+ 			} else
+ 				ret = connector_status_connected;
++
++			/* multiple connectors on the same encoder with the same ddc line
++			 * This tends to be HDMI and DVI on the same encoder with the
++			 * same ddc line.  If the edid says HDMI, consider the HDMI port
++			 * connected and the DVI port disconnected.  If the edid doesn't
++			 * say HDMI, vice versa.
++			 */
++			if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
++				struct drm_device *dev = connector->dev;
++				struct drm_connector *list_connector;
++				struct radeon_connector *list_radeon_connector;
++				list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
++					if (connector == list_connector)
++						continue;
++					list_radeon_connector = to_radeon_connector(list_connector);
++					if (radeon_connector->devices == list_radeon_connector->devices) {
++						if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
++							if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
++								kfree(radeon_connector->edid);
++								radeon_connector->edid = NULL;
++								ret = connector_status_disconnected;
++							}
++						} else {
++							if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
++							    (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
++								kfree(radeon_connector->edid);
++								radeon_connector->edid = NULL;
++								ret = connector_status_disconnected;
++							}
++						}
++					}
++				}
++			}
+ 		}
+ 	}
+ 
+@@ -833,10 +904,18 @@ static void radeon_dvi_force(struct drm_connector *connector)
+ static int radeon_dvi_mode_valid(struct drm_connector *connector,
+ 				  struct drm_display_mode *mode)
+ {
++	struct drm_device *dev = connector->dev;
++	struct radeon_device *rdev = dev->dev_private;
+ 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ 
+ 	/* XXX check mode bandwidth */
+ 
++	/* clocks over 135 MHz have heat issues with DVI on RV100 */
++	if (radeon_connector->use_digital &&
++	    (rdev->family == CHIP_RV100) &&
++	    (mode->clock > 135000))
++		return MODE_CLOCK_HIGH;
++
+ 	if (radeon_connector->use_digital && (mode->clock > 165000)) {
+ 		if ((radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
+ 		    (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
+@@ -863,6 +942,93 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = {
+ 	.force = radeon_dvi_force,
+ };
+ 
++static void radeon_dp_connector_destroy(struct drm_connector *connector)
++{
++	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
++
++	if (radeon_connector->ddc_bus)
++		radeon_i2c_destroy(radeon_connector->ddc_bus);
++	if (radeon_connector->edid)
++		kfree(radeon_connector->edid);
++	if (radeon_dig_connector->dp_i2c_bus)
++		radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
++	kfree(radeon_connector->con_priv);
++	drm_sysfs_connector_remove(connector);
++	drm_connector_cleanup(connector);
++	kfree(connector);
++}
++
++static int radeon_dp_get_modes(struct drm_connector *connector)
++{
++	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++	int ret;
++
++	ret = radeon_ddc_get_modes(radeon_connector);
++	return ret;
++}
++
++static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
++{
++	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++	enum drm_connector_status ret = connector_status_disconnected;
++	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
++	u8 sink_type;
++
++	if (radeon_connector->edid) {
++		kfree(radeon_connector->edid);
++		radeon_connector->edid = NULL;
++	}
++
++	sink_type = radeon_dp_getsinktype(radeon_connector);
++	if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
++	    (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
++		if (radeon_dp_getdpcd(radeon_connector)) {
++			radeon_dig_connector->dp_sink_type = sink_type;
++			ret = connector_status_connected;
++		}
++	} else {
++		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
++		if (radeon_ddc_probe(radeon_connector)) {
++			radeon_dig_connector->dp_sink_type = sink_type;
++			ret = connector_status_connected;
++		}
++		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
++	}
++
++	return ret;
++}
++
++static int radeon_dp_mode_valid(struct drm_connector *connector,
++				  struct drm_display_mode *mode)
++{
++	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
++
++	/* XXX check mode bandwidth */
++
++	if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
++	    (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
++		return radeon_dp_mode_valid_helper(radeon_connector, mode);
++	else
++		return MODE_OK;
++}
++
++struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
++	.get_modes = radeon_dp_get_modes,
++	.mode_valid = radeon_dp_mode_valid,
++	.best_encoder = radeon_dvi_encoder,
++};
++
++struct drm_connector_funcs radeon_dp_connector_funcs = {
++	.dpms = drm_helper_connector_dpms,
++	.detect = radeon_dp_detect,
++	.fill_modes = drm_helper_probe_single_connector_modes,
++	.set_property = radeon_connector_set_property,
++	.destroy = radeon_dp_connector_destroy,
++	.force = radeon_dvi_force,
++};
++
+ void
+ radeon_add_atom_connector(struct drm_device *dev,
+ 			  uint32_t connector_id,
+@@ -871,7 +1037,8 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 			  struct radeon_i2c_bus_rec *i2c_bus,
+ 			  bool linkb,
+ 			  uint32_t igp_lane_info,
+-			  uint16_t connector_object_id)
++			  uint16_t connector_object_id,
++			  struct radeon_hpd *hpd)
+ {
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_connector *connector;
+@@ -893,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 			return;
+ 		}
+ 		if (radeon_connector->ddc_bus && i2c_bus->valid) {
+-			if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
+-				    sizeof(struct radeon_i2c_bus_rec)) == 0) {
++			if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
+ 				radeon_connector->shared_ddc = true;
+ 				shared_ddc = true;
+ 			}
+@@ -911,6 +1077,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 	radeon_connector->devices = supported_device;
+ 	radeon_connector->shared_ddc = shared_ddc;
+ 	radeon_connector->connector_object_id = connector_object_id;
++	radeon_connector->hpd = *hpd;
+ 	switch (connector_type) {
+ 	case DRM_MODE_CONNECTOR_VGA:
+ 		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+@@ -963,10 +1130,12 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 		drm_connector_attach_property(&radeon_connector->base,
+ 					      rdev->mode_info.coherent_mode_property,
+ 					      1);
+-		radeon_connector->dac_load_detect = true;
+-		drm_connector_attach_property(&radeon_connector->base,
+-					      rdev->mode_info.load_detect_property,
+-					      1);
++		if (connector_type == DRM_MODE_CONNECTOR_DVII) {
++			radeon_connector->dac_load_detect = true;
++			drm_connector_attach_property(&radeon_connector->base,
++						      rdev->mode_info.load_detect_property,
++						      1);
++		}
+ 		break;
+ 	case DRM_MODE_CONNECTOR_HDMIA:
+ 	case DRM_MODE_CONNECTOR_HDMIB:
+@@ -991,22 +1160,36 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 		subpixel_order = SubPixelHorizontalRGB;
+ 		break;
+ 	case DRM_MODE_CONNECTOR_DisplayPort:
++	case DRM_MODE_CONNECTOR_eDP:
+ 		radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
+ 		if (!radeon_dig_connector)
+ 			goto failed;
+ 		radeon_dig_connector->linkb = linkb;
+ 		radeon_dig_connector->igp_lane_info = igp_lane_info;
+ 		radeon_connector->con_priv = radeon_dig_connector;
+-		drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
+-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
++		drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
++		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
+ 		if (ret)
+ 			goto failed;
+ 		if (i2c_bus->valid) {
+-			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
++			/* add DP i2c bus */
++			if (connector_type == DRM_MODE_CONNECTOR_eDP)
++				radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
++			else
++				radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
++			if (!radeon_dig_connector->dp_i2c_bus)
++				goto failed;
++			if (connector_type == DRM_MODE_CONNECTOR_eDP)
++				radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP");
++			else
++				radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
+ 			if (!radeon_connector->ddc_bus)
+ 				goto failed;
+ 		}
+ 		subpixel_order = SubPixelHorizontalRGB;
++		drm_connector_attach_property(&radeon_connector->base,
++					      rdev->mode_info.coherent_mode_property,
++					      1);
+ 		break;
+ 	case DRM_MODE_CONNECTOR_SVIDEO:
+ 	case DRM_MODE_CONNECTOR_Composite:
+@@ -1020,6 +1203,9 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 			drm_connector_attach_property(&radeon_connector->base,
+ 						      rdev->mode_info.load_detect_property,
+ 						      1);
++			drm_connector_attach_property(&radeon_connector->base,
++						      rdev->mode_info.tv_std_property,
++						      radeon_atombios_get_tv_info(rdev));
+ 		}
+ 		break;
+ 	case DRM_MODE_CONNECTOR_LVDS:
+@@ -1038,7 +1224,6 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 			if (!radeon_connector->ddc_bus)
+ 				goto failed;
+ 		}
+-		drm_mode_create_scaling_mode_property(dev);
+ 		drm_connector_attach_property(&radeon_connector->base,
+ 					      dev->mode_config.scaling_mode_property,
+ 					      DRM_MODE_SCALE_FULLSCREEN);
+@@ -1063,7 +1248,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
+ 			    uint32_t supported_device,
+ 			    int connector_type,
+ 			    struct radeon_i2c_bus_rec *i2c_bus,
+-			    uint16_t connector_object_id)
++			    uint16_t connector_object_id,
++			    struct radeon_hpd *hpd)
+ {
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_connector *connector;
+@@ -1093,6 +1279,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
+ 	radeon_connector->connector_id = connector_id;
+ 	radeon_connector->devices = supported_device;
+ 	radeon_connector->connector_object_id = connector_object_id;
++	radeon_connector->hpd = *hpd;
+ 	switch (connector_type) {
+ 	case DRM_MODE_CONNECTOR_VGA:
+ 		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
+@@ -1159,7 +1346,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
+ 				radeon_connector->dac_load_detect = false;
+ 			drm_connector_attach_property(&radeon_connector->base,
+ 						      rdev->mode_info.load_detect_property,
+-						      1);
++						      radeon_connector->dac_load_detect);
++			drm_connector_attach_property(&radeon_connector->base,
++						      rdev->mode_info.tv_std_property,
++						      radeon_combios_get_tv_info(rdev));
+ 		}
+ 		break;
+ 	case DRM_MODE_CONNECTOR_LVDS:
+diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
+index 4f7afc7..06123ba 100644
+--- a/drivers/gpu/drm/radeon/radeon_cp.c
++++ b/drivers/gpu/drm/radeon/radeon_cp.c
+@@ -1941,8 +1941,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
+ 	for (t = 0; t < dev_priv->usec_timeout; t++) {
+ 		u32 done_age = GET_SCRATCH(dev_priv, 1);
+ 		DRM_DEBUG("done_age = %d\n", done_age);
+-		for (i = start; i < dma->buf_count; i++) {
+-			buf = dma->buflist[i];
++		for (i = 0; i < dma->buf_count; i++) {
++			buf = dma->buflist[start];
+ 			buf_priv = buf->dev_private;
+ 			if (buf->file_priv == NULL || (buf->pending &&
+ 						       buf_priv->age <=
+@@ -1951,7 +1951,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
+ 				buf->pending = 0;
+ 				return buf;
+ 			}
+-			start = 0;
++			if (++start >= dma->buf_count)
++				start = 0;
+ 		}
+ 
+ 		if (t) {
+@@ -1960,47 +1961,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
+ 		}
+ 	}
+ 
+-	DRM_DEBUG("returning NULL!\n");
+ 	return NULL;
+ }
+ 
+-#if 0
+-struct drm_buf *radeon_freelist_get(struct drm_device * dev)
+-{
+-	struct drm_device_dma *dma = dev->dma;
+-	drm_radeon_private_t *dev_priv = dev->dev_private;
+-	drm_radeon_buf_priv_t *buf_priv;
+-	struct drm_buf *buf;
+-	int i, t;
+-	int start;
+-	u32 done_age;
+-
+-	done_age = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
+-	if (++dev_priv->last_buf >= dma->buf_count)
+-		dev_priv->last_buf = 0;
+-
+-	start = dev_priv->last_buf;
+-	dev_priv->stats.freelist_loops++;
+-
+-	for (t = 0; t < 2; t++) {
+-		for (i = start; i < dma->buf_count; i++) {
+-			buf = dma->buflist[i];
+-			buf_priv = buf->dev_private;
+-			if (buf->file_priv == 0 || (buf->pending &&
+-						    buf_priv->age <=
+-						    done_age)) {
+-				dev_priv->stats.requested_bufs++;
+-				buf->pending = 0;
+-				return buf;
+-			}
+-		}
+-		start = 0;
+-	}
+-
+-	return NULL;
+-}
+-#endif
+-
+ void radeon_freelist_reset(struct drm_device * dev)
+ {
+ 	struct drm_device_dma *dma = dev->dma;
+@@ -2182,6 +2145,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
+ 			 &master_priv->sarea);
+ 	if (ret) {
+ 		DRM_ERROR("SAREA setup failed\n");
++		kfree(master_priv);
+ 		return ret;
+ 	}
+ 	master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 5ab2cf9..e9d0850 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -76,17 +76,17 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
+ 			}
+ 			p->relocs_ptr[i] = &p->relocs[i];
+ 			p->relocs[i].robj = p->relocs[i].gobj->driver_private;
+-			p->relocs[i].lobj.robj = p->relocs[i].robj;
++			p->relocs[i].lobj.bo = p->relocs[i].robj;
+ 			p->relocs[i].lobj.rdomain = r->read_domains;
+ 			p->relocs[i].lobj.wdomain = r->write_domain;
+ 			p->relocs[i].handle = r->handle;
+ 			p->relocs[i].flags = r->flags;
+ 			INIT_LIST_HEAD(&p->relocs[i].lobj.list);
+-			radeon_object_list_add_object(&p->relocs[i].lobj,
+-						      &p->validated);
++			radeon_bo_list_add_object(&p->relocs[i].lobj,
++						&p->validated);
+ 		}
+ 	}
+-	return radeon_object_list_validate(&p->validated, p->ib->fence);
++	return radeon_bo_list_validate(&p->validated);
+ }
+ 
+ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
+@@ -189,11 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
+ {
+ 	unsigned i;
+ 
+-	if (error) {
+-		radeon_object_list_unvalidate(&parser->validated);
+-	} else {
+-		radeon_object_list_clean(&parser->validated);
++	if (!error && parser->ib) {
++		radeon_bo_list_fence(&parser->validated, parser->ib->fence);
+ 	}
++	radeon_bo_list_unreserve(&parser->validated);
+ 	for (i = 0; i < parser->nrelocs; i++) {
+ 		if (parser->relocs[i].gobj) {
+ 			mutex_lock(&parser->rdev->ddev->struct_mutex);
+@@ -230,6 +229,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+ 	memset(&parser, 0, sizeof(struct radeon_cs_parser));
+ 	parser.filp = filp;
+ 	parser.rdev = rdev;
++	parser.dev = rdev->dev;
+ 	r = radeon_cs_parser_init(&parser, data);
+ 	if (r) {
+ 		DRM_ERROR("Failed to initialize parser !\n");
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 41bb76f..768b150 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev)
+ 	if (rdev->family < CHIP_R600) {
+ 		int i;
+ 
+-		for (i = 0; i < 8; i++) {
+-			WREG32(RADEON_SURFACE0_INFO +
+-			       i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
+-			       0);
++		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
++			if (rdev->surface_regs[i].bo)
++				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
++			else
++				radeon_clear_surface_reg(rdev, i);
+ 		}
+ 		/* enable surfaces */
+ 		WREG32(RADEON_SURFACE_CNTL, 0);
+@@ -208,6 +209,24 @@ bool radeon_card_posted(struct radeon_device *rdev)
+ 
+ }
+ 
++bool radeon_boot_test_post_card(struct radeon_device *rdev)
++{
++	if (radeon_card_posted(rdev))
++		return true;
++
++	if (rdev->bios) {
++		DRM_INFO("GPU not posted. posting now...\n");
++		if (rdev->is_atom_bios)
++			atom_asic_init(rdev->mode_info.atom_context);
++		else
++			radeon_combios_asic_init(rdev->ddev);
++		return true;
++	} else {
++		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
++		return false;
++	}
++}
++
+ int radeon_dummy_page_init(struct radeon_device *rdev)
+ {
+ 	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
+@@ -372,6 +391,12 @@ int radeon_asic_init(struct radeon_device *rdev)
+ 		/* FIXME: not supported yet */
+ 		return -EINVAL;
+ 	}
++
++	if (rdev->flags & RADEON_IS_IGP) {
++		rdev->asic->get_memory_clock = NULL;
++		rdev->asic->set_memory_clock = NULL;
++	}
++
+ 	return 0;
+ }
+ 
+@@ -462,13 +487,18 @@ int radeon_atombios_init(struct radeon_device *rdev)
+ 	atom_card_info->pll_write = cail_pll_write;
+ 
+ 	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
++	mutex_init(&rdev->mode_info.atom_context->mutex);
+ 	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
++	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
+ 	return 0;
+ }
+ 
+ void radeon_atombios_fini(struct radeon_device *rdev)
+ {
+-	kfree(rdev->mode_info.atom_context);
++	if (rdev->mode_info.atom_context) {
++		kfree(rdev->mode_info.atom_context->scratch);
++		kfree(rdev->mode_info.atom_context);
++	}
+ 	kfree(rdev->mode_info.atom_card_info);
+ }
+ 
+@@ -514,11 +544,75 @@ void radeon_agp_disable(struct radeon_device *rdev)
+ 		rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+ 		rdev->asic->gart_set_page = &r100_pci_gart_set_page;
+ 	}
++	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
++}
++
++void radeon_check_arguments(struct radeon_device *rdev)
++{
++	/* vramlimit must be a power of two */
++	switch (radeon_vram_limit) {
++	case 0:
++	case 4:
++	case 8:
++	case 16:
++	case 32:
++	case 64:
++	case 128:
++	case 256:
++	case 512:
++	case 1024:
++	case 2048:
++	case 4096:
++		break;
++	default:
++		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
++				radeon_vram_limit);
++		radeon_vram_limit = 0;
++		break;
++	}
++	radeon_vram_limit = radeon_vram_limit << 20;
++	/* gtt size must be power of two and greater or equal to 32M */
++	switch (radeon_gart_size) {
++	case 4:
++	case 8:
++	case 16:
++		dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
++				radeon_gart_size);
++		radeon_gart_size = 512;
++		break;
++	case 32:
++	case 64:
++	case 128:
++	case 256:
++	case 512:
++	case 1024:
++	case 2048:
++	case 4096:
++		break;
++	default:
++		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
++				radeon_gart_size);
++		radeon_gart_size = 512;
++		break;
++	}
++	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
++	/* AGP mode can only be -1, 1, 2, 4, 8 */
++	switch (radeon_agpmode) {
++	case -1:
++	case 0:
++	case 1:
++	case 2:
++	case 4:
++	case 8:
++		break;
++	default:
++		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
++				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
++		radeon_agpmode = 0;
++		break;
++	}
+ }
+ 
+-/*
+- * Radeon device.
+- */
+ int radeon_device_init(struct radeon_device *rdev,
+ 		       struct drm_device *ddev,
+ 		       struct pci_dev *pdev,
+@@ -544,16 +638,24 @@ int radeon_device_init(struct radeon_device *rdev,
+ 	mutex_init(&rdev->cs_mutex);
+ 	mutex_init(&rdev->ib_pool.mutex);
+ 	mutex_init(&rdev->cp.mutex);
++	if (rdev->family >= CHIP_R600)
++		spin_lock_init(&rdev->ih.lock);
++	mutex_init(&rdev->gem.mutex);
+ 	rwlock_init(&rdev->fence_drv.lock);
+ 	INIT_LIST_HEAD(&rdev->gem.objects);
+ 
++	/* setup workqueue */
++	rdev->wq = create_workqueue("radeon");
++	if (rdev->wq == NULL)
++		return -ENOMEM;
++
+ 	/* Set asic functions */
+ 	r = radeon_asic_init(rdev);
+-	if (r) {
++	if (r)
+ 		return r;
+-	}
++	radeon_check_arguments(rdev);
+ 
+-	if (radeon_agpmode == -1) {
++	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
+ 		radeon_agp_disable(rdev);
+ 	}
+ 
+@@ -620,6 +722,7 @@ void radeon_device_fini(struct radeon_device *rdev)
+ 	DRM_INFO("radeon: finishing device.\n");
+ 	rdev->shutdown = true;
+ 	radeon_fini(rdev);
++	destroy_workqueue(rdev->wq);
+ 	vga_client_register(rdev->pdev, NULL, NULL, NULL);
+ 	iounmap(rdev->rmmio);
+ 	rdev->rmmio = NULL;
+@@ -631,38 +734,46 @@ void radeon_device_fini(struct radeon_device *rdev)
+  */
+ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
+ {
+-	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_device *rdev;
+ 	struct drm_crtc *crtc;
++	int r;
+ 
+-	if (dev == NULL || rdev == NULL) {
++	if (dev == NULL || dev->dev_private == NULL) {
+ 		return -ENODEV;
+ 	}
+ 	if (state.event == PM_EVENT_PRETHAW) {
+ 		return 0;
+ 	}
++	rdev = dev->dev_private;
++
+ 	/* unpin the front buffers */
+ 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ 		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
+-		struct radeon_object *robj;
++		struct radeon_bo *robj;
+ 
+ 		if (rfb == NULL || rfb->obj == NULL) {
+ 			continue;
+ 		}
+ 		robj = rfb->obj->driver_private;
+-		if (robj != rdev->fbdev_robj) {
+-			radeon_object_unpin(robj);
++		if (robj != rdev->fbdev_rbo) {
++			r = radeon_bo_reserve(robj, false);
++			if (unlikely(r == 0)) {
++				radeon_bo_unpin(robj);
++				radeon_bo_unreserve(robj);
++			}
+ 		}
+ 	}
+ 	/* evict vram memory */
+-	radeon_object_evict_vram(rdev);
++	radeon_bo_evict_vram(rdev);
+ 	/* wait for gpu to finish processing current batch */
+ 	radeon_fence_wait_last(rdev);
+ 
+ 	radeon_save_bios_scratch_regs(rdev);
+ 
+ 	radeon_suspend(rdev);
++	radeon_hpd_fini(rdev);
+ 	/* evict remaining vram memory */
+-	radeon_object_evict_vram(rdev);
++	radeon_bo_evict_vram(rdev);
+ 
+ 	pci_save_state(dev->pdev);
+ 	if (state.event == PM_EVENT_SUSPEND) {
+@@ -695,6 +806,8 @@ int radeon_resume_kms(struct drm_device *dev)
+ 	fb_set_suspend(rdev->fbdev_info, 0);
+ 	release_console_sem();
+ 
++	/* reset hpd state */
++	radeon_hpd_init(rdev);
+ 	/* blat the mode back in */
+ 	drm_helper_resume_force_mode(dev);
+ 	return 0;
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index c85df4a..7e17a36 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -234,7 +234,7 @@ static const char *encoder_names[34] = {
+ 	"INTERNAL_UNIPHY2",
+ };
+ 
+-static const char *connector_names[13] = {
++static const char *connector_names[15] = {
+ 	"Unknown",
+ 	"VGA",
+ 	"DVI-I",
+@@ -248,6 +248,18 @@ static const char *connector_names[13] = {
+ 	"DisplayPort",
+ 	"HDMI-A",
+ 	"HDMI-B",
++	"TV",
++	"eDP",
++};
++
++static const char *hpd_names[7] = {
++	"NONE",
++	"HPD1",
++	"HPD2",
++	"HPD3",
++	"HPD4",
++	"HPD5",
++	"HPD6",
+ };
+ 
+ static void radeon_print_display_setup(struct drm_device *dev)
+@@ -264,16 +276,27 @@ static void radeon_print_display_setup(struct drm_device *dev)
+ 		radeon_connector = to_radeon_connector(connector);
+ 		DRM_INFO("Connector %d:\n", i);
+ 		DRM_INFO("  %s\n", connector_names[connector->connector_type]);
+-		if (radeon_connector->ddc_bus)
++		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
++			DRM_INFO("  %s\n", hpd_names[radeon_connector->hpd.hpd]);
++		if (radeon_connector->ddc_bus) {
+ 			DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ 				 radeon_connector->ddc_bus->rec.mask_clk_reg,
+ 				 radeon_connector->ddc_bus->rec.mask_data_reg,
+ 				 radeon_connector->ddc_bus->rec.a_clk_reg,
+ 				 radeon_connector->ddc_bus->rec.a_data_reg,
+-				 radeon_connector->ddc_bus->rec.put_clk_reg,
+-				 radeon_connector->ddc_bus->rec.put_data_reg,
+-				 radeon_connector->ddc_bus->rec.get_clk_reg,
+-				 radeon_connector->ddc_bus->rec.get_data_reg);
++				 radeon_connector->ddc_bus->rec.en_clk_reg,
++				 radeon_connector->ddc_bus->rec.en_data_reg,
++				 radeon_connector->ddc_bus->rec.y_clk_reg,
++				 radeon_connector->ddc_bus->rec.y_data_reg);
++		} else {
++			if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
++			    connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
++			    connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
++			    connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
++			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
++			    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
++				DRM_INFO("  DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati at lists.x.org\n");
++		}
+ 		DRM_INFO("  Encoders:\n");
+ 		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ 			radeon_encoder = to_radeon_encoder(encoder);
+@@ -317,13 +340,17 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
+ 				ret = radeon_get_atom_connector_info_from_object_table(dev);
+ 			else
+ 				ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
+-		} else
++		} else {
+ 			ret = radeon_get_legacy_connector_info_from_bios(dev);
++			if (ret == false)
++				ret = radeon_get_legacy_connector_info_from_table(dev);
++		}
+ 	} else {
+ 		if (!ASIC_IS_AVIVO(rdev))
+ 			ret = radeon_get_legacy_connector_info_from_table(dev);
+ 	}
+ 	if (ret) {
++		radeon_setup_encoder_clones(dev);
+ 		radeon_print_display_setup(dev);
+ 		list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
+ 			radeon_ddc_dump(drm_connector);
+@@ -336,12 +363,19 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
+ {
+ 	int ret = 0;
+ 
++	if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
++	    (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
++		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
++		if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
++		     dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
++			radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
++	}
+ 	if (!radeon_connector->ddc_bus)
+ 		return -1;
+ 	if (!radeon_connector->edid) {
+-		radeon_i2c_do_lock(radeon_connector, 1);
++		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ 		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+-		radeon_i2c_do_lock(radeon_connector, 0);
++		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ 	}
+ 
+ 	if (radeon_connector->edid) {
+@@ -361,9 +395,9 @@ static int radeon_ddc_dump(struct drm_connector *connector)
+ 
+ 	if (!radeon_connector->ddc_bus)
+ 		return -1;
+-	radeon_i2c_do_lock(radeon_connector, 1);
++	radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ 	edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
+-	radeon_i2c_do_lock(radeon_connector, 0);
++	radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ 	if (edid) {
+ 		kfree(edid);
+ 	}
+@@ -386,11 +420,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
+ 			uint32_t *fb_div_p,
+ 			uint32_t *frac_fb_div_p,
+ 			uint32_t *ref_div_p,
+-			uint32_t *post_div_p,
+-			int flags)
++			uint32_t *post_div_p)
+ {
+ 	uint32_t min_ref_div = pll->min_ref_div;
+ 	uint32_t max_ref_div = pll->max_ref_div;
++	uint32_t min_post_div = pll->min_post_div;
++	uint32_t max_post_div = pll->max_post_div;
+ 	uint32_t min_fractional_feed_div = 0;
+ 	uint32_t max_fractional_feed_div = 0;
+ 	uint32_t best_vco = pll->best_vco;
+@@ -406,7 +441,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
+ 	DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
+ 	freq = freq * 1000;
+ 
+-	if (flags & RADEON_PLL_USE_REF_DIV)
++	if (pll->flags & RADEON_PLL_USE_REF_DIV)
+ 		min_ref_div = max_ref_div = pll->reference_div;
+ 	else {
+ 		while (min_ref_div < max_ref_div-1) {
+@@ -421,19 +456,22 @@ void radeon_compute_pll(struct radeon_pll *pll,
+ 		}
+ 	}
+ 
+-	if (flags & RADEON_PLL_USE_FRAC_FB_DIV) {
++	if (pll->flags & RADEON_PLL_USE_POST_DIV)
++		min_post_div = max_post_div = pll->post_div;
++
++	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ 		min_fractional_feed_div = pll->min_frac_feedback_div;
+ 		max_fractional_feed_div = pll->max_frac_feedback_div;
+ 	}
+ 
+-	for (post_div = pll->min_post_div; post_div <= pll->max_post_div; ++post_div) {
++	for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
+ 		uint32_t ref_div;
+ 
+-		if ((flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
++		if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+ 			continue;
+ 
+ 		/* legacy radeons only have a few post_divs */
+-		if (flags & RADEON_PLL_LEGACY) {
++		if (pll->flags & RADEON_PLL_LEGACY) {
+ 			if ((post_div == 5) ||
+ 			    (post_div == 7) ||
+ 			    (post_div == 9) ||
+@@ -480,7 +518,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
+ 					tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div;
+ 					current_freq = radeon_div(tmp, ref_div * post_div);
+ 
+-					if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
++					if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
+ 						error = freq - current_freq;
+ 						error = error < 0 ? 0xffffffff : error;
+ 					} else
+@@ -507,12 +545,12 @@ void radeon_compute_pll(struct radeon_pll *pll,
+ 							best_freq = current_freq;
+ 							best_error = error;
+ 							best_vco_diff = vco_diff;
+-						} else if (((flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
+-							   ((flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
+-							   ((flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
+-							   ((flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
+-							   ((flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
+-							   ((flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
++						} else if (((pll->flags & RADEON_PLL_PREFER_LOW_REF_DIV) && (ref_div < best_ref_div)) ||
++							   ((pll->flags & RADEON_PLL_PREFER_HIGH_REF_DIV) && (ref_div > best_ref_div)) ||
++							   ((pll->flags & RADEON_PLL_PREFER_LOW_FB_DIV) && (feedback_div < best_feedback_div)) ||
++							   ((pll->flags & RADEON_PLL_PREFER_HIGH_FB_DIV) && (feedback_div > best_feedback_div)) ||
++							   ((pll->flags & RADEON_PLL_PREFER_LOW_POST_DIV) && (post_div < best_post_div)) ||
++							   ((pll->flags & RADEON_PLL_PREFER_HIGH_POST_DIV) && (post_div > best_post_div))) {
+ 							best_post_div = post_div;
+ 							best_ref_div = ref_div;
+ 							best_feedback_div = feedback_div;
+@@ -542,6 +580,97 @@ void radeon_compute_pll(struct radeon_pll *pll,
+ 	*post_div_p = best_post_div;
+ }
+ 
++void radeon_compute_pll_avivo(struct radeon_pll *pll,
++			      uint64_t freq,
++			      uint32_t *dot_clock_p,
++			      uint32_t *fb_div_p,
++			      uint32_t *frac_fb_div_p,
++			      uint32_t *ref_div_p,
++			      uint32_t *post_div_p)
++{
++	fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
++	fixed20_12 pll_out_max, pll_out_min;
++	fixed20_12 pll_in_max, pll_in_min;
++	fixed20_12 reference_freq;
++	fixed20_12 error, ffreq, a, b;
++
++	pll_out_max.full = rfixed_const(pll->pll_out_max);
++	pll_out_min.full = rfixed_const(pll->pll_out_min);
++	pll_in_max.full = rfixed_const(pll->pll_in_max);
++	pll_in_min.full = rfixed_const(pll->pll_in_min);
++	reference_freq.full = rfixed_const(pll->reference_freq);
++	do_div(freq, 10);
++	ffreq.full = rfixed_const(freq);
++	error.full = rfixed_const(100 * 100);
++
++	/* max p */
++	p.full = rfixed_div(pll_out_max, ffreq);
++	p.full = rfixed_floor(p);
++
++	/* min m */
++	m.full = rfixed_div(reference_freq, pll_in_max);
++	m.full = rfixed_ceil(m);
++
++	while (1) {
++		n.full = rfixed_div(ffreq, reference_freq);
++		n.full = rfixed_mul(n, m);
++		n.full = rfixed_mul(n, p);
++
++		f_vco.full = rfixed_div(n, m);
++		f_vco.full = rfixed_mul(f_vco, reference_freq);
++
++		f_pclk.full = rfixed_div(f_vco, p);
++
++		if (f_pclk.full > ffreq.full)
++			error.full = f_pclk.full - ffreq.full;
++		else
++			error.full = ffreq.full - f_pclk.full;
++		error.full = rfixed_div(error, f_pclk);
++		a.full = rfixed_const(100 * 100);
++		error.full = rfixed_mul(error, a);
++
++		a.full = rfixed_mul(m, p);
++		a.full = rfixed_div(n, a);
++		best_freq.full = rfixed_mul(reference_freq, a);
++
++		if (rfixed_trunc(error) < 25)
++			break;
++
++		a.full = rfixed_const(1);
++		m.full = m.full + a.full;
++		a.full = rfixed_div(reference_freq, m);
++		if (a.full >= pll_in_min.full)
++			continue;
++
++		m.full = rfixed_div(reference_freq, pll_in_max);
++		m.full = rfixed_ceil(m);
++		a.full= rfixed_const(1);
++		p.full = p.full - a.full;
++		a.full = rfixed_mul(p, ffreq);
++		if (a.full >= pll_out_min.full)
++			continue;
++		else {
++			DRM_ERROR("Unable to find pll dividers\n");
++			break;
++		}
++	}
++
++	a.full = rfixed_const(10);
++	b.full = rfixed_mul(n, a);
++
++	frac_n.full = rfixed_floor(n);
++	frac_n.full = rfixed_mul(frac_n, a);
++	frac_n.full = b.full - frac_n.full;
++
++	*dot_clock_p = rfixed_trunc(best_freq);
++	*fb_div_p = rfixed_trunc(n);
++	*frac_fb_div_p = rfixed_trunc(frac_n);
++	*ref_div_p = rfixed_trunc(m);
++	*post_div_p = rfixed_trunc(p);
++
++	DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
++}
++
+ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
+ {
+ 	struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
+@@ -551,7 +680,6 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
+ 		radeonfb_remove(dev, fb);
+ 
+ 	if (radeon_fb->obj) {
+-		radeon_gem_object_unpin(radeon_fb->obj);
+ 		mutex_lock(&dev->struct_mutex);
+ 		drm_gem_object_unreference(radeon_fb->obj);
+ 		mutex_unlock(&dev->struct_mutex);
+@@ -599,7 +727,11 @@ radeon_user_framebuffer_create(struct drm_device *dev,
+ 	struct drm_gem_object *obj;
+ 
+ 	obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+-
++	if (obj ==  NULL) {
++		dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
++			"can't create framebuffer\n", mode_cmd->handle);
++		return NULL;
++	}
+ 	return radeon_framebuffer_create(dev, mode_cmd, obj);
+ }
+ 
+@@ -629,7 +761,7 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
+ 	{ TV_STD_SECAM, "secam" },
+ };
+ 
+-int radeon_modeset_create_props(struct radeon_device *rdev)
++static int radeon_modeset_create_props(struct radeon_device *rdev)
+ {
+ 	int i, sz;
+ 
+@@ -642,7 +774,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
+ 			return -ENOMEM;
+ 
+ 		rdev->mode_info.coherent_mode_property->values[0] = 0;
+-		rdev->mode_info.coherent_mode_property->values[0] = 1;
++		rdev->mode_info.coherent_mode_property->values[1] = 1;
+ 	}
+ 
+ 	if (!ASIC_IS_AVIVO(rdev)) {
+@@ -666,7 +798,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
+ 	if (!rdev->mode_info.load_detect_property)
+ 		return -ENOMEM;
+ 	rdev->mode_info.load_detect_property->values[0] = 0;
+-	rdev->mode_info.load_detect_property->values[0] = 1;
++	rdev->mode_info.load_detect_property->values[1] = 1;
+ 
+ 	drm_mode_create_scaling_mode_property(rdev->ddev);
+ 
+@@ -723,6 +855,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
+ 	if (!ret) {
+ 		return ret;
+ 	}
++	/* initialize hpd */
++	radeon_hpd_init(rdev);
+ 	drm_helper_initial_config(rdev->ddev);
+ 	return 0;
+ }
+@@ -730,6 +864,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
+ void radeon_modeset_fini(struct radeon_device *rdev)
+ {
+ 	if (rdev->mode_info.mode_config_initialized) {
++		radeon_hpd_fini(rdev);
+ 		drm_mode_config_cleanup(rdev->ddev);
+ 		rdev->mode_info.mode_config_initialized = false;
+ 	}
+@@ -750,9 +885,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+ 		if (encoder->crtc != crtc)
+ 			continue;
+ 		if (first) {
+-			radeon_crtc->rmx_type = radeon_encoder->rmx_type;
++			/* set scaling */
++			if (radeon_encoder->rmx_type == RMX_OFF)
++				radeon_crtc->rmx_type = RMX_OFF;
++			else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
++				 mode->vdisplay < radeon_encoder->native_mode.vdisplay)
++				radeon_crtc->rmx_type = radeon_encoder->rmx_type;
++			else
++				radeon_crtc->rmx_type = RMX_OFF;
++			/* copy native mode */
+ 			memcpy(&radeon_crtc->native_mode,
+-				&radeon_encoder->native_mode,
++			       &radeon_encoder->native_mode,
+ 				sizeof(struct drm_display_mode));
+ 			first = false;
+ 		} else {
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
+index 7f50fb8..8ba3de7 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -86,6 +86,8 @@ int radeon_benchmarking = 0;
+ int radeon_testing = 0;
+ int radeon_connector_table = 0;
+ int radeon_tv = 1;
++int radeon_new_pll = 1;
++int radeon_audio = 1;
+ 
+ MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
+ module_param_named(no_wb, radeon_no_wb, int, 0444);
+@@ -120,6 +122,12 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
+ MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
+ module_param_named(tv, radeon_tv, int, 0444);
+ 
++MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
++module_param_named(new_pll, radeon_new_pll, int, 0444);
++
++MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
++module_param_named(audio, radeon_audio, int, 0444);
++
+ static int radeon_suspend(struct drm_device *dev, pm_message_t state)
+ {
+ 	drm_radeon_private_t *dev_priv = dev->dev_private;
+@@ -188,7 +196,7 @@ static struct drm_driver driver_old = {
+ 		 .owner = THIS_MODULE,
+ 		 .open = drm_open,
+ 		 .release = drm_release,
+-		 .ioctl = drm_ioctl,
++		 .unlocked_ioctl = drm_ioctl,
+ 		 .mmap = drm_mmap,
+ 		 .poll = drm_poll,
+ 		 .fasync = drm_fasync,
+@@ -276,7 +284,7 @@ static struct drm_driver kms_driver = {
+ 		 .owner = THIS_MODULE,
+ 		 .open = drm_open,
+ 		 .release = drm_release,
+-		 .ioctl = drm_ioctl,
++		 .unlocked_ioctl = drm_ioctl,
+ 		 .mmap = radeon_mmap,
+ 		 .poll = drm_poll,
+ 		 .fasync = drm_fasync,
+diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
+index 350962e..c57ad60 100644
+--- a/drivers/gpu/drm/radeon/radeon_drv.h
++++ b/drivers/gpu/drm/radeon/radeon_drv.h
+@@ -106,9 +106,10 @@
+  * 1.29- R500 3D cmd buffer support
+  * 1.30- Add support for occlusion queries
+  * 1.31- Add support for num Z pipes from GET_PARAM
++ * 1.32- fixes for rv740 setup
+  */
+ #define DRIVER_MAJOR		1
+-#define DRIVER_MINOR		31
++#define DRIVER_MINOR		32
+ #define DRIVER_PATCHLEVEL	0
+ 
+ enum radeon_cp_microcode_version {
+@@ -1104,7 +1105,6 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
+ #       define R600_IT_WAIT_REG_MEM             0x00003C00
+ #       define R600_IT_MEM_WRITE                0x00003D00
+ #       define R600_IT_INDIRECT_BUFFER          0x00003200
+-#       define R600_IT_CP_INTERRUPT             0x00004000
+ #       define R600_IT_SURFACE_SYNC             0x00004300
+ #              define R600_CB0_DEST_BASE_ENA    (1 << 6)
+ #              define R600_TC_ACTION_ENA        (1 << 23)
+diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
+index d42bc51..3c91724 100644
+--- a/drivers/gpu/drm/radeon/radeon_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
+@@ -35,6 +35,51 @@ extern int atom_debug;
+ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+ 				struct drm_display_mode *mode);
+ 
++static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	struct drm_encoder *clone_encoder;
++	uint32_t index_mask = 0;
++	int count;
++
++	/* DIG routing gets problematic */
++	if (rdev->family >= CHIP_R600)
++		return index_mask;
++	/* LVDS/TV are too wacky */
++	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
++		return index_mask;
++	/* DVO requires 2x ppll clocks depending on tmds chip */
++	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
++		return index_mask;
++	
++	count = -1;
++	list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
++		struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
++		count++;
++
++		if (clone_encoder == encoder)
++			continue;
++		if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
++			continue;
++		if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
++			continue;
++		else
++			index_mask |= (1 << count);
++	}
++	return index_mask;
++}
++
++void radeon_setup_encoder_clones(struct drm_device *dev)
++{
++	struct drm_encoder *encoder;
++
++	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
++		encoder->possible_clones = radeon_encoder_clones(encoder);
++	}
++}
++
+ uint32_t
+ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
+ {
+@@ -111,6 +156,26 @@ radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t
+ 	return ret;
+ }
+ 
++static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
++{
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	switch (radeon_encoder->encoder_id) {
++	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
++	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
++	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
++	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
++	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
++	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
++	case ENCODER_OBJECT_ID_INTERNAL_DDI:
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
++	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
++	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++		return true;
++	default:
++		return false;
++	}
++}
+ void
+ radeon_link_encoder_connector(struct drm_device *dev)
+ {
+@@ -157,35 +222,12 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
+ 
+ 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ 		radeon_connector = to_radeon_connector(connector);
+-		if (radeon_encoder->devices & radeon_connector->devices)
++		if (radeon_encoder->active_device & radeon_connector->devices)
+ 			return connector;
+ 	}
+ 	return NULL;
+ }
+ 
+-/* used for both atom and legacy */
+-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
+-			   struct drm_display_mode *mode,
+-			   struct drm_display_mode *adjusted_mode)
+-{
+-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+-	struct drm_device *dev = encoder->dev;
+-	struct radeon_device *rdev = dev->dev_private;
+-	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+-
+-	if (mode->hdisplay < native_mode->hdisplay ||
+-	    mode->vdisplay < native_mode->vdisplay) {
+-		int mode_id = adjusted_mode->base.id;
+-		*adjusted_mode = *native_mode;
+-		if (!ASIC_IS_AVIVO(rdev)) {
+-			adjusted_mode->hdisplay = mode->hdisplay;
+-			adjusted_mode->vdisplay = mode->vdisplay;
+-		}
+-		adjusted_mode->base.id = mode_id;
+-	}
+-}
+-
+-
+ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ 				   struct drm_display_mode *mode,
+ 				   struct drm_display_mode *adjusted_mode)
+@@ -198,14 +240,26 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ 	radeon_encoder_set_active_device(encoder);
+ 	drm_mode_set_crtcinfo(adjusted_mode, 0);
+ 
+-	if (radeon_encoder->rmx_type != RMX_OFF)
+-		radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
+-
+ 	/* hw bug */
+ 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+ 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ 
++	/* get the native mode for LVDS */
++	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
++		struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
++		int mode_id = adjusted_mode->base.id;
++		*adjusted_mode = *native_mode;
++		if (!ASIC_IS_AVIVO(rdev)) {
++			adjusted_mode->hdisplay = mode->hdisplay;
++			adjusted_mode->vdisplay = mode->vdisplay;
++			adjusted_mode->crtc_hdisplay = mode->hdisplay;
++			adjusted_mode->crtc_vdisplay = mode->vdisplay;
++		}
++		adjusted_mode->base.id = mode_id;
++	}
++
++	/* get the native mode for TV */
+ 	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+ 		struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+ 		if (tv_dac) {
+@@ -218,6 +272,12 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ 		}
+ 	}
+ 
++	if (ASIC_IS_DCE3(rdev) &&
++	    (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
++		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
++		radeon_dp_set_link_config(connector, mode);
++	}
++
+ 	return true;
+ }
+ 
+@@ -392,7 +452,7 @@ union lvds_encoder_control {
+ 	LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
+ };
+ 
+-static void
++void
+ atombios_digital_setup(struct drm_encoder *encoder, int action)
+ {
+ 	struct drm_device *dev = encoder->dev;
+@@ -400,6 +460,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	union lvds_encoder_control args;
+ 	int index = 0;
++	int hdmi_detected = 0;
+ 	uint8_t frev, crev;
+ 	struct radeon_encoder_atom_dig *dig;
+ 	struct drm_connector *connector;
+@@ -420,6 +481,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
+ 	if (!radeon_connector->con_priv)
+ 		return;
+ 
++	if (drm_detect_hdmi_monitor(radeon_connector->edid))
++		hdmi_detected = 1;
++
+ 	dig_connector = radeon_connector->con_priv;
+ 
+ 	memset(&args, 0, sizeof(args));
+@@ -449,13 +513,13 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
+ 		case 1:
+ 			args.v1.ucMisc = 0;
+ 			args.v1.ucAction = action;
+-			if (drm_detect_hdmi_monitor(radeon_connector->edid))
++			if (hdmi_detected)
+ 				args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+ 			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ 			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+-				if (dig->lvds_misc & (1 << 0))
++				if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
+ 					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+-				if (dig->lvds_misc & (1 << 1))
++				if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
+ 					args.v1.ucMisc |= (1 << 1);
+ 			} else {
+ 				if (dig_connector->linkb)
+@@ -474,7 +538,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
+ 				if (dig->coherent_mode)
+ 					args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
+ 			}
+-			if (drm_detect_hdmi_monitor(radeon_connector->edid))
++			if (hdmi_detected)
+ 				args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+ 			args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ 			args.v2.ucTruncate = 0;
+@@ -482,18 +546,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
+ 			args.v2.ucTemporal = 0;
+ 			args.v2.ucFRC = 0;
+ 			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+-				if (dig->lvds_misc & (1 << 0))
++				if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
+ 					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+-				if (dig->lvds_misc & (1 << 5)) {
++				if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) {
+ 					args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
+-					if (dig->lvds_misc & (1 << 1))
++					if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
+ 						args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
+ 				}
+-				if (dig->lvds_misc & (1 << 6)) {
++				if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) {
+ 					args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
+-					if (dig->lvds_misc & (1 << 1))
++					if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
+ 						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
+-					if (((dig->lvds_misc >> 2) & 0x3) == 2)
++					if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
+ 						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
+ 				}
+ 			} else {
+@@ -514,7 +578,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
+ 	}
+ 
+ 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+-
++	r600_hdmi_enable(encoder, hdmi_detected);
+ }
+ 
+ int
+@@ -522,6 +586,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
+ {
+ 	struct drm_connector *connector;
+ 	struct radeon_connector *radeon_connector;
++	struct radeon_connector_atom_dig *radeon_dig_connector;
+ 
+ 	connector = radeon_get_connector_for_encoder(encoder);
+ 	if (!connector)
+@@ -551,21 +616,23 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
+ 		return ATOM_ENCODER_MODE_LVDS;
+ 		break;
+ 	case DRM_MODE_CONNECTOR_DisplayPort:
+-		/*if (radeon_output->MonType == MT_DP)
+-		  return ATOM_ENCODER_MODE_DP;
+-		  else*/
+-		if (drm_detect_hdmi_monitor(radeon_connector->edid))
++	case DRM_MODE_CONNECTOR_eDP:
++		radeon_dig_connector = radeon_connector->con_priv;
++		if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
++		    (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
++			return ATOM_ENCODER_MODE_DP;
++		else if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ 			return ATOM_ENCODER_MODE_HDMI;
+ 		else
+ 			return ATOM_ENCODER_MODE_DVI;
+ 		break;
+-	case CONNECTOR_DVI_A:
+-	case CONNECTOR_VGA:
++	case DRM_MODE_CONNECTOR_DVIA:
++	case DRM_MODE_CONNECTOR_VGA:
+ 		return ATOM_ENCODER_MODE_CRT;
+ 		break;
+-	case CONNECTOR_STV:
+-	case CONNECTOR_CTV:
+-	case CONNECTOR_DIN:
++	case DRM_MODE_CONNECTOR_Composite:
++	case DRM_MODE_CONNECTOR_SVIDEO:
++	case DRM_MODE_CONNECTOR_9PinDIN:
+ 		/* fix me */
+ 		return ATOM_ENCODER_MODE_TV;
+ 		/*return ATOM_ENCODER_MODE_CV;*/
+@@ -573,6 +640,30 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
+ 	}
+ }
+ 
++/*
++ * DIG Encoder/Transmitter Setup
++ *
++ * DCE 3.0/3.1
++ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
++ * Supports up to 3 digital outputs
++ * - 2 DIG encoder blocks.
++ * DIG1 can drive UNIPHY link A or link B
++ * DIG2 can drive UNIPHY link B or LVTMA
++ *
++ * DCE 3.2
++ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
++ * Supports up to 5 digital outputs
++ * - 2 DIG encoder blocks.
++ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
++ *
++ * Routing
++ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
++ * Examples:
++ * crtc0 -> dig2 -> LVTMA   links A+B -> TMDS/HDMI
++ * crtc1 -> dig1 -> UNIPHY0 link  B   -> DP
++ * crtc0 -> dig1 -> UNIPHY2 link  A   -> LVDS
++ * crtc1 -> dig2 -> UNIPHY1 link  B+A -> TMDS/HDMI
++ */
+ static void
+ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
+ {
+@@ -605,24 +696,11 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
+ 
+ 	memset(&args, 0, sizeof(args));
+ 
+-	if (ASIC_IS_DCE32(rdev)) {
+-		if (dig->dig_block)
+-			index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+-		else
+-			index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+-		num = dig->dig_block + 1;
+-	} else {
+-		switch (radeon_encoder->encoder_id) {
+-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+-			index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+-			num = 1;
+-			break;
+-		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+-			index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+-			num = 2;
+-			break;
+-		}
+-	}
++	if (dig->dig_encoder)
++		index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
++	else
++		index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
++	num = dig->dig_encoder + 1;
+ 
+ 	atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+ 
+@@ -652,18 +730,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
+ 		}
+ 	}
+ 
+-	if (radeon_encoder->pixel_clock > 165000) {
+-		args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B;
++	args.ucEncoderMode = atombios_get_encoder_mode(encoder);
++
++	if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
++		if (dig_connector->dp_clock == 270000)
++			args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
++		args.ucLaneNum = dig_connector->dp_lane_count;
++	} else if (radeon_encoder->pixel_clock > 165000)
+ 		args.ucLaneNum = 8;
+-	} else {
+-		if (dig_connector->linkb)
+-			args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+-		else
+-			args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
++	else
+ 		args.ucLaneNum = 4;
+-	}
+ 
+-	args.ucEncoderMode = atombios_get_encoder_mode(encoder);
++	if (dig_connector->linkb)
++		args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
++	else
++		args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+ 
+ 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ 
+@@ -674,8 +755,8 @@ union dig_transmitter_control {
+ 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+ };
+ 
+-static void
+-atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
++void
++atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
+ {
+ 	struct drm_device *dev = encoder->dev;
+ 	struct radeon_device *rdev = dev->dev_private;
+@@ -687,6 +768,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
+ 	struct drm_connector *connector;
+ 	struct radeon_connector *radeon_connector;
+ 	struct radeon_connector_atom_dig *dig_connector;
++	bool is_dp = false;
+ 
+ 	connector = radeon_get_connector_for_encoder(encoder);
+ 	if (!connector)
+@@ -704,6 +786,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
+ 
+ 	dig_connector = radeon_connector->con_priv;
+ 
++	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
++		is_dp = true;
++
+ 	memset(&args, 0, sizeof(args));
+ 
+ 	if (ASIC_IS_DCE32(rdev))
+@@ -724,17 +809,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
+ 	args.v1.ucAction = action;
+ 	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+ 		args.v1.usInitInfo = radeon_connector->connector_object_id;
++	} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
++		args.v1.asMode.ucLaneSel = lane_num;
++		args.v1.asMode.ucLaneSet = lane_set;
+ 	} else {
+-		if (radeon_encoder->pixel_clock > 165000)
++		if (is_dp)
++			args.v1.usPixelClock =
++				cpu_to_le16(dig_connector->dp_clock / 10);
++		else if (radeon_encoder->pixel_clock > 165000)
+ 			args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+ 		else
+ 			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ 	}
+ 	if (ASIC_IS_DCE32(rdev)) {
+-		if (radeon_encoder->pixel_clock > 165000)
+-			args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+-		if (dig->dig_block)
++		if (dig->dig_encoder == 1)
+ 			args.v2.acConfig.ucEncoderSel = 1;
++		if (dig_connector->linkb)
++			args.v2.acConfig.ucLinkSel = 1;
+ 
+ 		switch (radeon_encoder->encoder_id) {
+ 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+@@ -751,26 +842,30 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
+ 			break;
+ 		}
+ 
+-		if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
++		if (is_dp)
++			args.v2.acConfig.fCoherentMode = 1;
++		else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ 			if (dig->coherent_mode)
+ 				args.v2.acConfig.fCoherentMode = 1;
+ 		}
+ 	} else {
++
+ 		args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+ 
++		if (dig->dig_encoder)
++			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
++		else
++			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
++
+ 		switch (radeon_encoder->encoder_id) {
+ 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+-			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+ 			if (rdev->flags & RADEON_IS_IGP) {
+ 				if (radeon_encoder->pixel_clock > 165000) {
+-					args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
+-							     ATOM_TRANSMITTER_CONFIG_LINKA_B);
+ 					if (dig_connector->igp_lane_info & 0x3)
+ 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+ 					else if (dig_connector->igp_lane_info & 0xc)
+ 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
+ 				} else {
+-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+ 					if (dig_connector->igp_lane_info & 0x1)
+ 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ 					else if (dig_connector->igp_lane_info & 0x2)
+@@ -780,42 +875,27 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
+ 					else if (dig_connector->igp_lane_info & 0x8)
+ 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+ 				}
+-			} else {
+-				if (radeon_encoder->pixel_clock > 165000)
+-					args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
+-							     ATOM_TRANSMITTER_CONFIG_LINKA_B |
+-							     ATOM_TRANSMITTER_CONFIG_LANE_0_7);
+-				else {
+-					if (dig_connector->linkb)
+-						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+-					else
+-						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+-				}
+-			}
+-			break;
+-		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+-			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+-			if (radeon_encoder->pixel_clock > 165000)
+-				args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
+-						     ATOM_TRANSMITTER_CONFIG_LINKA_B |
+-						     ATOM_TRANSMITTER_CONFIG_LANE_0_7);
+-			else {
+-				if (dig_connector->linkb)
+-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+-				else
+-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ 			}
+ 			break;
+ 		}
+ 
+-		if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
++		if (radeon_encoder->pixel_clock > 165000)
++			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
++
++		if (dig_connector->linkb)
++			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
++		else
++			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
++
++		if (is_dp)
++			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
++		else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ 			if (dig->coherent_mode)
+ 				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ 		}
+ 	}
+ 
+ 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+-
+ }
+ 
+ static void
+@@ -918,12 +998,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
+ 	if (is_dig) {
+ 		switch (mode) {
+ 		case DRM_MODE_DPMS_ON:
+-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
++			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
++			{
++				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
++				dp_link_train(encoder, connector);
++			}
+ 			break;
+ 		case DRM_MODE_DPMS_STANDBY:
+ 		case DRM_MODE_DPMS_SUSPEND:
+ 		case DRM_MODE_DPMS_OFF:
+-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
++			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ 			break;
+ 		}
+ 	} else {
+@@ -957,6 +1041,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+ 	union crtc_sourc_param args;
+ 	int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+ 	uint8_t frev, crev;
++	struct radeon_encoder_atom_dig *dig;
+ 
+ 	memset(&args, 0, sizeof(args));
+ 
+@@ -1020,20 +1105,16 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+ 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+-				if (ASIC_IS_DCE32(rdev)) {
+-					if (radeon_crtc->crtc_id)
+-						args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+-					else
+-						args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+-				} else
++			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
++				dig = radeon_encoder->enc_priv;
++				if (dig->dig_encoder)
++					args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
++				else
+ 					args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ 				break;
+ 			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ 				args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
+ 				break;
+-			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+-				args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+-				break;
+ 			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ 				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+ 					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+@@ -1060,7 +1141,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+ 	}
+ 
+ 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+-
+ }
+ 
+ static void
+@@ -1094,6 +1174,47 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
+ 	}
+ }
+ 
++static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
++{
++	struct drm_device *dev = encoder->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	struct drm_encoder *test_encoder;
++	struct radeon_encoder_atom_dig *dig;
++	uint32_t dig_enc_in_use = 0;
++	/* on DCE32 and encoder can driver any block so just crtc id */
++	if (ASIC_IS_DCE32(rdev)) {
++		return radeon_crtc->crtc_id;
++	}
++
++	/* on DCE3 - LVTMA can only be driven by DIGB */
++	list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
++		struct radeon_encoder *radeon_test_encoder;
++
++		if (encoder == test_encoder)
++			continue;
++
++		if (!radeon_encoder_is_digital(test_encoder))
++			continue;
++
++		radeon_test_encoder = to_radeon_encoder(test_encoder);
++		dig = radeon_test_encoder->enc_priv;
++
++		if (dig->dig_encoder >= 0)
++			dig_enc_in_use |= (1 << dig->dig_encoder);
++	}
++
++	if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
++		if (dig_enc_in_use & 0x2)
++			DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
++		return 1;
++	}
++	if (!(dig_enc_in_use & 1))
++		return 0;
++	return 1;
++}
++
+ static void
+ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ 			     struct drm_display_mode *mode,
+@@ -1104,11 +1225,11 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ 
+-	if (radeon_encoder->enc_priv) {
+-		struct radeon_encoder_atom_dig *dig;
+-
+-		dig = radeon_encoder->enc_priv;
+-		dig->dig_block = radeon_crtc->crtc_id;
++	if (radeon_encoder->active_device &
++	    (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
++		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
++		if (dig)
++			dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+ 	}
+ 	radeon_encoder->pixel_clock = adjusted_mode->clock;
+ 
+@@ -1134,14 +1255,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ 		/* disable the encoder and transmitter */
+-		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
++		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ 		atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+ 
+ 		/* setup and enable the encoder and transmitter */
+ 		atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
+-		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT);
+-		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
+-		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
++		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
++		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
++		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ 		break;
+ 	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ 		atombios_ddia_setup(encoder, ATOM_ENABLE);
+@@ -1160,6 +1281,8 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ 		break;
+ 	}
+ 	atombios_apply_encoder_quirks(encoder, adjusted_mode);
++
++	r600_hdmi_setmode(encoder, adjusted_mode);
+ }
+ 
+ static bool
+@@ -1266,7 +1389,13 @@ static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
+ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
+ {
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	struct radeon_encoder_atom_dig *dig;
+ 	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
++
++	if (radeon_encoder_is_digital(encoder)) {
++		dig = radeon_encoder->enc_priv;
++		dig->dig_encoder = -1;
++	}
+ 	radeon_encoder->active_device = 0;
+ }
+ 
+@@ -1323,6 +1452,7 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
+ 
+ 	/* coherent mode by default */
+ 	dig->coherent_mode = true;
++	dig->dig_encoder = -1;
+ 
+ 	return dig;
+ }
+@@ -1354,7 +1484,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
+ 		encoder->possible_crtcs = 0x1;
+ 	else
+ 		encoder->possible_crtcs = 0x3;
+-	encoder->possible_clones = 0;
+ 
+ 	radeon_encoder->enc_priv = NULL;
+ 
+@@ -1406,4 +1535,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
+ 		drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+ 		break;
+ 	}
++
++	r600_hdmi_init(encoder);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
+index b38c4c8..d71e346 100644
+--- a/drivers/gpu/drm/radeon/radeon_fb.c
++++ b/drivers/gpu/drm/radeon/radeon_fb.c
+@@ -59,7 +59,7 @@ static struct fb_ops radeonfb_ops = {
+ };
+ 
+ /**
+- * Curretly it is assumed that the old framebuffer is reused.
++ * Currently it is assumed that the old framebuffer is reused.
+  *
+  * LOCKING
+  * caller should hold the mode config lock.
+@@ -140,7 +140,7 @@ int radeonfb_create(struct drm_device *dev,
+ 	struct radeon_framebuffer *rfb;
+ 	struct drm_mode_fb_cmd mode_cmd;
+ 	struct drm_gem_object *gobj = NULL;
+-	struct radeon_object *robj = NULL;
++	struct radeon_bo *rbo = NULL;
+ 	struct device *device = &rdev->pdev->dev;
+ 	int size, aligned_size, ret;
+ 	u64 fb_gpuaddr;
+@@ -168,14 +168,14 @@ int radeonfb_create(struct drm_device *dev,
+ 	ret = radeon_gem_object_create(rdev, aligned_size, 0,
+ 			RADEON_GEM_DOMAIN_VRAM,
+ 			false, ttm_bo_type_kernel,
+-			false, &gobj);
++			&gobj);
+ 	if (ret) {
+ 		printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
+ 		       surface_width, surface_height);
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+-	robj = gobj->driver_private;
++	rbo = gobj->driver_private;
+ 
+ 	if (fb_tiled)
+ 		tiling_flags = RADEON_TILING_MACRO;
+@@ -192,8 +192,13 @@ int radeonfb_create(struct drm_device *dev,
+ 	}
+ #endif
+ 
+-	if (tiling_flags)
+-		radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch);
++	if (tiling_flags) {
++		ret = radeon_bo_set_tiling_flags(rbo,
++					tiling_flags | RADEON_TILING_SURFACE,
++					mode_cmd.pitch);
++		if (ret)
++			dev_err(rdev->dev, "FB failed to set tiling flags\n");
++	}
+ 	mutex_lock(&rdev->ddev->struct_mutex);
+ 	fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
+ 	if (fb == NULL) {
+@@ -201,10 +206,19 @@ int radeonfb_create(struct drm_device *dev,
+ 		ret = -ENOMEM;
+ 		goto out_unref;
+ 	}
+-	ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
++	ret = radeon_bo_reserve(rbo, false);
++	if (unlikely(ret != 0))
++		goto out_unref;
++	ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
++	if (ret) {
++		radeon_bo_unreserve(rbo);
++		goto out_unref;
++	}
++	if (fb_tiled)
++		radeon_bo_check_tiling(rbo, 0, 0);
++	ret = radeon_bo_kmap(rbo, &fbptr);
++	radeon_bo_unreserve(rbo);
+ 	if (ret) {
+-		printk(KERN_ERR "failed to pin framebuffer\n");
+-		ret = -ENOMEM;
+ 		goto out_unref;
+ 	}
+ 
+@@ -213,7 +227,7 @@ int radeonfb_create(struct drm_device *dev,
+ 	*fb_p = fb;
+ 	rfb = to_radeon_framebuffer(fb);
+ 	rdev->fbdev_rfb = rfb;
+-	rdev->fbdev_robj = robj;
++	rdev->fbdev_rbo = rbo;
+ 
+ 	info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
+ 	if (info == NULL) {
+@@ -234,15 +248,7 @@ int radeonfb_create(struct drm_device *dev,
+ 	if (ret)
+ 		goto out_unref;
+ 
+-	if (fb_tiled)
+-		radeon_object_check_tiling(robj, 0, 0);
+-
+-	ret = radeon_object_kmap(robj, &fbptr);
+-	if (ret) {
+-		goto out_unref;
+-	}
+-
+-	memset_io(fbptr, 0, aligned_size);
++	memset_io(fbptr, 0x0, aligned_size);
+ 
+ 	strcpy(info->fix.id, "radeondrmfb");
+ 
+@@ -288,8 +294,12 @@ int radeonfb_create(struct drm_device *dev,
+ 	return 0;
+ 
+ out_unref:
+-	if (robj) {
+-		radeon_object_kunmap(robj);
++	if (rbo) {
++		ret = radeon_bo_reserve(rbo, false);
++		if (likely(ret == 0)) {
++			radeon_bo_kunmap(rbo);
++			radeon_bo_unreserve(rbo);
++		}
+ 	}
+ 	if (fb && ret) {
+ 		list_del(&fb->filp_head);
+@@ -321,14 +331,22 @@ int radeon_parse_options(char *options)
+ 
+ int radeonfb_probe(struct drm_device *dev)
+ {
+-	return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
++	struct radeon_device *rdev = dev->dev_private;
++	int bpp_sel = 32;
++
++	/* select 8 bpp console on RN50 or 16MB cards */
++	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
++		bpp_sel = 8;
++
++	return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
+ }
+ 
+ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+ {
+ 	struct fb_info *info;
+ 	struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
+-	struct radeon_object *robj;
++	struct radeon_bo *rbo;
++	int r;
+ 
+ 	if (!fb) {
+ 		return -EINVAL;
+@@ -336,10 +354,14 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
+ 	info = fb->fbdev;
+ 	if (info) {
+ 		struct radeon_fb_device *rfbdev = info->par;
+-		robj = rfb->obj->driver_private;
++		rbo = rfb->obj->driver_private;
+ 		unregister_framebuffer(info);
+-		radeon_object_kunmap(robj);
+-		radeon_object_unpin(robj);
++		r = radeon_bo_reserve(rbo, false);
++		if (likely(r == 0)) {
++			radeon_bo_kunmap(rbo);
++			radeon_bo_unpin(rbo);
++			radeon_bo_unreserve(rbo);
++		}
+ 		drm_fb_helper_free(&rfbdev->helper);
+ 		framebuffer_release(info);
+ 	}
+diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
+index 3beb26d..8495d4e 100644
+--- a/drivers/gpu/drm/radeon/radeon_fence.c
++++ b/drivers/gpu/drm/radeon/radeon_fence.c
+@@ -140,16 +140,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
+ 
+ bool radeon_fence_signaled(struct radeon_fence *fence)
+ {
+-	struct radeon_device *rdev = fence->rdev;
+ 	unsigned long irq_flags;
+ 	bool signaled = false;
+ 
+-	if (rdev->gpu_lockup) {
++	if (!fence)
+ 		return true;
+-	}
+-	if (fence == NULL) {
++
++	if (fence->rdev->gpu_lockup)
+ 		return true;
+-	}
++
+ 	write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+ 	signaled = fence->signaled;
+ 	/* if we are shuting down report all fence as signaled */
+@@ -168,37 +167,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
+ 	return signaled;
+ }
+ 
+-int r600_fence_wait(struct radeon_fence *fence,  bool intr, bool lazy)
+-{
+-	struct radeon_device *rdev;
+-	int ret = 0;
+-
+-	rdev = fence->rdev;
+-
+-	__set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+-
+-	while (1) {
+-		if (radeon_fence_signaled(fence))
+-			break;
+-
+-		if (time_after_eq(jiffies, fence->timeout)) {
+-			ret = -EBUSY;
+-			break;
+-		}
+-
+-		if (lazy)
+-			schedule_timeout(1);
+-
+-		if (intr && signal_pending(current)) {
+-			ret = -ERESTARTSYS;
+-			break;
+-		}
+-	}
+-	__set_current_state(TASK_RUNNING);
+-	return ret;
+-}
+-
+-
+ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
+ {
+ 	struct radeon_device *rdev;
+@@ -216,13 +184,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
+ 		return 0;
+ 	}
+ 
+-	if (rdev->family >= CHIP_R600) {
+-		r = r600_fence_wait(fence, intr, 0);
+-		if (r == -ERESTARTSYS)
+-			return -EBUSY;
+-		return r;
+-	}
+-
+ retry:
+ 	cur_jiffies = jiffies;
+ 	timeout = HZ / 100;
+@@ -231,14 +192,17 @@ retry:
+ 	}
+ 
+ 	if (intr) {
++		radeon_irq_kms_sw_irq_get(rdev);
+ 		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
+ 				radeon_fence_signaled(fence), timeout);
+-		if (unlikely(r == -ERESTARTSYS)) {
+-			return -EBUSY;
+-		}
++		radeon_irq_kms_sw_irq_put(rdev);
++		if (unlikely(r < 0))
++			return r;
+ 	} else {
++		radeon_irq_kms_sw_irq_get(rdev);
+ 		r = wait_event_timeout(rdev->fence_drv.queue,
+ 			 radeon_fence_signaled(fence), timeout);
++		radeon_irq_kms_sw_irq_put(rdev);
+ 	}
+ 	if (unlikely(!radeon_fence_signaled(fence))) {
+ 		if (unlikely(r == 0)) {
+@@ -359,7 +323,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
+ 	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ 	r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
+ 	if (r) {
+-		DRM_ERROR("Fence failed to get a scratch register.");
++		dev_err(rdev->dev, "fence failed to get scratch register\n");
+ 		write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ 		return r;
+ 	}
+@@ -370,9 +334,10 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
+ 	INIT_LIST_HEAD(&rdev->fence_drv.signaled);
+ 	rdev->fence_drv.count_timeout = 0;
+ 	init_waitqueue_head(&rdev->fence_drv.queue);
++	rdev->fence_drv.initialized = true;
+ 	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ 	if (radeon_debugfs_fence_init(rdev)) {
+-		DRM_ERROR("Failed to register debugfs file for fence !\n");
++		dev_err(rdev->dev, "fence debugfs file creation failed\n");
+ 	}
+ 	return 0;
+ }
+@@ -381,11 +346,13 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
+ {
+ 	unsigned long irq_flags;
+ 
++	if (!rdev->fence_drv.initialized)
++		return;
+ 	wake_up_all(&rdev->fence_drv.queue);
+ 	write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ 	radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
+ 	write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+-	DRM_INFO("radeon: fence finalized\n");
++	rdev->fence_drv.initialized = false;
+ }
+ 
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
+index 90187d1..3d4d84e 100644
+--- a/drivers/gpu/drm/radeon/radeon_fixed.h
++++ b/drivers/gpu/drm/radeon/radeon_fixed.h
+@@ -38,6 +38,23 @@ typedef union rfixed {
+ #define fixed_init_half(A) { .full = rfixed_const_half((A)) }
+ #define rfixed_trunc(A) ((A).full >> 12)
+ 
++static inline u32 rfixed_floor(fixed20_12 A)
++{
++	u32 non_frac = rfixed_trunc(A);
++
++	return rfixed_const(non_frac);
++}
++
++static inline u32 rfixed_ceil(fixed20_12 A)
++{
++	u32 non_frac = rfixed_trunc(A);
++
++	if (A.full > rfixed_const(non_frac))
++		return rfixed_const(non_frac + 1);
++	else
++		return rfixed_const(non_frac);
++}
++
+ static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
+ {
+ 	u64 tmp = ((u64)A.full << 13);
+diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
+index a68d756..e73d56e 100644
+--- a/drivers/gpu/drm/radeon/radeon_gart.c
++++ b/drivers/gpu/drm/radeon/radeon_gart.c
+@@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
+ 	int r;
+ 
+ 	if (rdev->gart.table.vram.robj == NULL) {
+-		r = radeon_object_create(rdev, NULL,
+-					 rdev->gart.table_size,
+-					 true,
+-					 RADEON_GEM_DOMAIN_VRAM,
+-					 false, &rdev->gart.table.vram.robj);
++		r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
++					true, RADEON_GEM_DOMAIN_VRAM,
++					&rdev->gart.table.vram.robj);
+ 		if (r) {
+ 			return r;
+ 		}
+@@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
+ 	uint64_t gpu_addr;
+ 	int r;
+ 
+-	r = radeon_object_pin(rdev->gart.table.vram.robj,
+-			      RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+-	if (r) {
+-		radeon_object_unref(&rdev->gart.table.vram.robj);
++	r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
++	if (unlikely(r != 0))
+ 		return r;
+-	}
+-	r = radeon_object_kmap(rdev->gart.table.vram.robj,
+-			       (void **)&rdev->gart.table.vram.ptr);
++	r = radeon_bo_pin(rdev->gart.table.vram.robj,
++				RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
+ 	if (r) {
+-		radeon_object_unpin(rdev->gart.table.vram.robj);
+-		radeon_object_unref(&rdev->gart.table.vram.robj);
+-		DRM_ERROR("radeon: failed to map gart vram table.\n");
++		radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ 		return r;
+ 	}
++	r = radeon_bo_kmap(rdev->gart.table.vram.robj,
++				(void **)&rdev->gart.table.vram.ptr);
++	if (r)
++		radeon_bo_unpin(rdev->gart.table.vram.robj);
++	radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ 	rdev->gart.table_addr = gpu_addr;
+-	return 0;
++	return r;
+ }
+ 
+ void radeon_gart_table_vram_free(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	if (rdev->gart.table.vram.robj == NULL) {
+ 		return;
+ 	}
+-	radeon_object_kunmap(rdev->gart.table.vram.robj);
+-	radeon_object_unpin(rdev->gart.table.vram.robj);
+-	radeon_object_unref(&rdev->gart.table.vram.robj);
++	r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
++	if (likely(r == 0)) {
++		radeon_bo_kunmap(rdev->gart.table.vram.robj);
++		radeon_bo_unpin(rdev->gart.table.vram.robj);
++		radeon_bo_unreserve(rdev->gart.table.vram.robj);
++	}
++	radeon_bo_unref(&rdev->gart.table.vram.robj);
+ }
+ 
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
+index d880edf..db8e9a3 100644
+--- a/drivers/gpu/drm/radeon/radeon_gem.c
++++ b/drivers/gpu/drm/radeon/radeon_gem.c
+@@ -38,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj)
+ 
+ void radeon_gem_object_free(struct drm_gem_object *gobj)
+ {
+-	struct radeon_object *robj = gobj->driver_private;
++	struct radeon_bo *robj = gobj->driver_private;
+ 
+ 	gobj->driver_private = NULL;
+ 	if (robj) {
+-		radeon_object_unref(&robj);
++		radeon_bo_unref(&robj);
+ 	}
+ }
+ 
+ int radeon_gem_object_create(struct radeon_device *rdev, int size,
+-			     int alignment, int initial_domain,
+-			     bool discardable, bool kernel,
+-			     bool interruptible,
+-			     struct drm_gem_object **obj)
++				int alignment, int initial_domain,
++				bool discardable, bool kernel,
++				struct drm_gem_object **obj)
+ {
+ 	struct drm_gem_object *gobj;
+-	struct radeon_object *robj;
++	struct radeon_bo *robj;
+ 	int r;
+ 
+ 	*obj = NULL;
+@@ -65,11 +64,11 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
+ 	if (alignment < PAGE_SIZE) {
+ 		alignment = PAGE_SIZE;
+ 	}
+-	r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
+-				 interruptible, &robj);
++	r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
+ 	if (r) {
+-		DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
+-			  size, initial_domain, alignment);
++		if (r != -ERESTARTSYS)
++			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
++				  size, initial_domain, alignment, r);
+ 		mutex_lock(&rdev->ddev->struct_mutex);
+ 		drm_gem_object_unreference(gobj);
+ 		mutex_unlock(&rdev->ddev->struct_mutex);
+@@ -83,33 +82,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
+ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
+ 			  uint64_t *gpu_addr)
+ {
+-	struct radeon_object *robj = obj->driver_private;
+-	uint32_t flags;
++	struct radeon_bo *robj = obj->driver_private;
++	int r;
+ 
+-	switch (pin_domain) {
+-	case RADEON_GEM_DOMAIN_VRAM:
+-		flags = TTM_PL_FLAG_VRAM;
+-		break;
+-	case RADEON_GEM_DOMAIN_GTT:
+-		flags = TTM_PL_FLAG_TT;
+-		break;
+-	default:
+-		flags = TTM_PL_FLAG_SYSTEM;
+-		break;
+-	}
+-	return radeon_object_pin(robj, flags, gpu_addr);
++	r = radeon_bo_reserve(robj, false);
++	if (unlikely(r != 0))
++		return r;
++	r = radeon_bo_pin(robj, pin_domain, gpu_addr);
++	radeon_bo_unreserve(robj);
++	return r;
+ }
+ 
+ void radeon_gem_object_unpin(struct drm_gem_object *obj)
+ {
+-	struct radeon_object *robj = obj->driver_private;
+-	radeon_object_unpin(robj);
++	struct radeon_bo *robj = obj->driver_private;
++	int r;
++
++	r = radeon_bo_reserve(robj, false);
++	if (likely(r == 0)) {
++		radeon_bo_unpin(robj);
++		radeon_bo_unreserve(robj);
++	}
+ }
+ 
+ int radeon_gem_set_domain(struct drm_gem_object *gobj,
+ 			  uint32_t rdomain, uint32_t wdomain)
+ {
+-	struct radeon_object *robj;
++	struct radeon_bo *robj;
+ 	uint32_t domain;
+ 	int r;
+ 
+@@ -127,7 +126,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
+ 	}
+ 	if (domain == RADEON_GEM_DOMAIN_CPU) {
+ 		/* Asking for cpu access wait for object idle */
+-		r = radeon_object_wait(robj);
++		r = radeon_bo_wait(robj, NULL, false);
+ 		if (r) {
+ 			printk(KERN_ERR "Failed to wait for object !\n");
+ 			return r;
+@@ -144,7 +143,7 @@ int radeon_gem_init(struct radeon_device *rdev)
+ 
+ void radeon_gem_fini(struct radeon_device *rdev)
+ {
+-	radeon_object_force_delete(rdev);
++	radeon_bo_force_delete(rdev);
+ }
+ 
+ 
+@@ -158,9 +157,13 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
+ 	struct drm_radeon_gem_info *args = data;
+ 
+ 	args->vram_size = rdev->mc.real_vram_size;
+-	/* FIXME: report somethings that makes sense */
+-	args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024);
+-	args->gart_size = rdev->mc.gtt_size;
++	args->vram_visible = rdev->mc.real_vram_size;
++	if (rdev->stollen_vga_memory)
++		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
++	if (rdev->fbdev_rbo)
++		args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
++	args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
++		RADEON_IB_POOL_SIZE*64*1024;
+ 	return 0;
+ }
+ 
+@@ -192,8 +195,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
+ 	/* create a gem object to contain this object in */
+ 	args->size = roundup(args->size, PAGE_SIZE);
+ 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
+-				     args->initial_domain, false,
+-				     false, true, &gobj);
++					args->initial_domain, false,
++					false, &gobj);
+ 	if (r) {
+ 		return r;
+ 	}
+@@ -218,7 +221,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+ 	 * just validate the BO into a certain domain */
+ 	struct drm_radeon_gem_set_domain *args = data;
+ 	struct drm_gem_object *gobj;
+-	struct radeon_object *robj;
++	struct radeon_bo *robj;
+ 	int r;
+ 
+ 	/* for now if someone requests domain CPU -
+@@ -244,19 +247,18 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ {
+ 	struct drm_radeon_gem_mmap *args = data;
+ 	struct drm_gem_object *gobj;
+-	struct radeon_object *robj;
+-	int r;
++	struct radeon_bo *robj;
+ 
+ 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ 	if (gobj == NULL) {
+ 		return -EINVAL;
+ 	}
+ 	robj = gobj->driver_private;
+-	r = radeon_object_mmap(robj, &args->addr_ptr);
++	args->addr_ptr = radeon_bo_mmap_offset(robj);
+ 	mutex_lock(&dev->struct_mutex);
+ 	drm_gem_object_unreference(gobj);
+ 	mutex_unlock(&dev->struct_mutex);
+-	return r;
++	return 0;
+ }
+ 
+ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+@@ -264,16 +266,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
+ {
+ 	struct drm_radeon_gem_busy *args = data;
+ 	struct drm_gem_object *gobj;
+-	struct radeon_object *robj;
++	struct radeon_bo *robj;
+ 	int r;
+-	uint32_t cur_placement;
++	uint32_t cur_placement = 0;
+ 
+ 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ 	if (gobj == NULL) {
+ 		return -EINVAL;
+ 	}
+ 	robj = gobj->driver_private;
+-	r = radeon_object_busy_domain(robj, &cur_placement);
++	r = radeon_bo_wait(robj, &cur_placement, true);
+ 	switch (cur_placement) {
+ 	case TTM_PL_VRAM:
+ 		args->domain = RADEON_GEM_DOMAIN_VRAM;
+@@ -297,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+ {
+ 	struct drm_radeon_gem_wait_idle *args = data;
+ 	struct drm_gem_object *gobj;
+-	struct radeon_object *robj;
++	struct radeon_bo *robj;
+ 	int r;
+ 
+ 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+@@ -305,7 +307,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+ 		return -EINVAL;
+ 	}
+ 	robj = gobj->driver_private;
+-	r = radeon_object_wait(robj);
++	r = radeon_bo_wait(robj, NULL, false);
++	/* callback hw specific functions if any */
++	if (robj->rdev->asic->ioctl_wait_idle)
++		robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+ 	mutex_lock(&dev->struct_mutex);
+ 	drm_gem_object_unreference(gobj);
+ 	mutex_unlock(&dev->struct_mutex);
+@@ -317,7 +322,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+ {
+ 	struct drm_radeon_gem_set_tiling *args = data;
+ 	struct drm_gem_object *gobj;
+-	struct radeon_object *robj;
++	struct radeon_bo *robj;
+ 	int r = 0;
+ 
+ 	DRM_DEBUG("%d \n", args->handle);
+@@ -325,7 +330,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+ 	if (gobj == NULL)
+ 		return -EINVAL;
+ 	robj = gobj->driver_private;
+-	radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch);
++	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
+ 	mutex_lock(&dev->struct_mutex);
+ 	drm_gem_object_unreference(gobj);
+ 	mutex_unlock(&dev->struct_mutex);
+@@ -337,16 +342,20 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+ {
+ 	struct drm_radeon_gem_get_tiling *args = data;
+ 	struct drm_gem_object *gobj;
+-	struct radeon_object *robj;
++	struct radeon_bo *rbo;
+ 	int r = 0;
+ 
+ 	DRM_DEBUG("\n");
+ 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ 	if (gobj == NULL)
+ 		return -EINVAL;
+-	robj = gobj->driver_private;
+-	radeon_object_get_tiling_flags(robj, &args->tiling_flags,
+-				       &args->pitch);
++	rbo = gobj->driver_private;
++	r = radeon_bo_reserve(rbo, false);
++	if (unlikely(r != 0))
++		goto out;
++	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
++	radeon_bo_unreserve(rbo);
++out:
+ 	mutex_lock(&dev->struct_mutex);
+ 	drm_gem_object_unreference(gobj);
+ 	mutex_unlock(&dev->struct_mutex);
+diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
+index dd438d3..da3da1e 100644
+--- a/drivers/gpu/drm/radeon/radeon_i2c.c
++++ b/drivers/gpu/drm/radeon/radeon_i2c.c
+@@ -59,35 +59,43 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
+ }
+ 
+ 
+-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
++void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
+ {
+-	struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
++	struct radeon_device *rdev = i2c->dev->dev_private;
++	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ 	uint32_t temp;
+-	struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
+ 
+ 	/* RV410 appears to have a bug where the hw i2c in reset
+ 	 * holds the i2c port in a bad state - switch hw i2c away before
+ 	 * doing DDC - do this for all r200s/r300s/r400s for safety sake
+ 	 */
+-	if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
+-		if (rec->a_clk_reg == RADEON_GPIO_MONID) {
+-			WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+-						R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
+-		} else {
+-			WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+-						R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
++	if (rec->hw_capable) {
++		if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
++			if (rec->a_clk_reg == RADEON_GPIO_MONID) {
++				WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
++							       R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
++			} else {
++				WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
++							       R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
++			}
+ 		}
+ 	}
+-	if (lock_state) {
+-		temp = RREG32(rec->a_clk_reg);
+-		temp &= ~(rec->a_clk_mask);
+-		WREG32(rec->a_clk_reg, temp);
+-
+-		temp = RREG32(rec->a_data_reg);
+-		temp &= ~(rec->a_data_mask);
+-		WREG32(rec->a_data_reg, temp);
+-	}
+ 
++	/* clear the output pin values */
++	temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
++	WREG32(rec->a_clk_reg, temp);
++
++	temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
++	WREG32(rec->a_data_reg, temp);
++
++	/* set the pins to input */
++	temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
++	WREG32(rec->en_clk_reg, temp);
++
++	temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
++	WREG32(rec->en_data_reg, temp);
++
++	/* mask the gpio pins for software use */
+ 	temp = RREG32(rec->mask_clk_reg);
+ 	if (lock_state)
+ 		temp |= rec->mask_clk_mask;
+@@ -112,8 +120,9 @@ static int get_clock(void *i2c_priv)
+ 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ 	uint32_t val;
+ 
+-	val = RREG32(rec->get_clk_reg);
+-	val &= rec->get_clk_mask;
++	/* read the value off the pin */
++	val = RREG32(rec->y_clk_reg);
++	val &= rec->y_clk_mask;
+ 
+ 	return (val != 0);
+ }
+@@ -126,8 +135,10 @@ static int get_data(void *i2c_priv)
+ 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ 	uint32_t val;
+ 
+-	val = RREG32(rec->get_data_reg);
+-	val &= rec->get_data_mask;
++	/* read the value off the pin */
++	val = RREG32(rec->y_data_reg);
++	val &= rec->y_data_mask;
++
+ 	return (val != 0);
+ }
+ 
+@@ -138,9 +149,10 @@ static void set_clock(void *i2c_priv, int clock)
+ 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ 	uint32_t val;
+ 
+-	val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
+-	val |= clock ? 0 : rec->put_clk_mask;
+-	WREG32(rec->put_clk_reg, val);
++	/* set pin direction */
++	val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
++	val |= clock ? 0 : rec->en_clk_mask;
++	WREG32(rec->en_clk_reg, val);
+ }
+ 
+ static void set_data(void *i2c_priv, int data)
+@@ -150,14 +162,15 @@ static void set_data(void *i2c_priv, int data)
+ 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ 	uint32_t val;
+ 
+-	val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
+-	val |= data ? 0 : rec->put_data_mask;
+-	WREG32(rec->put_data_reg, val);
++	/* set pin direction */
++	val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
++	val |= data ? 0 : rec->en_data_mask;
++	WREG32(rec->en_data_reg, val);
+ }
+ 
+ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+-		struct radeon_i2c_bus_rec *rec,
+-		const char *name)
++					  struct radeon_i2c_bus_rec *rec,
++					  const char *name)
+ {
+ 	struct radeon_i2c_chan *i2c;
+ 	int ret;
+@@ -167,20 +180,19 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+ 		return NULL;
+ 
+ 	i2c->adapter.owner = THIS_MODULE;
+-	i2c->adapter.algo_data = &i2c->algo;
+ 	i2c->dev = dev;
+-	i2c->algo.setsda = set_data;
+-	i2c->algo.setscl = set_clock;
+-	i2c->algo.getsda = get_data;
+-	i2c->algo.getscl = get_clock;
+-	i2c->algo.udelay = 20;
++	i2c_set_adapdata(&i2c->adapter, i2c);
++	i2c->adapter.algo_data = &i2c->algo.bit;
++	i2c->algo.bit.setsda = set_data;
++	i2c->algo.bit.setscl = set_clock;
++	i2c->algo.bit.getsda = get_data;
++	i2c->algo.bit.getscl = get_clock;
++	i2c->algo.bit.udelay = 20;
+ 	/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
+ 	 * make this, 2 jiffies is a lot more reliable */
+-	i2c->algo.timeout = 2;
+-	i2c->algo.data = i2c;
++	i2c->algo.bit.timeout = 2;
++	i2c->algo.bit.data = i2c;
+ 	i2c->rec = *rec;
+-	i2c_set_adapdata(&i2c->adapter, i2c);
+-
+ 	ret = i2c_bit_add_bus(&i2c->adapter);
+ 	if (ret) {
+ 		DRM_INFO("Failed to register i2c %s\n", name);
+@@ -194,6 +206,38 @@ out_free:
+ 
+ }
+ 
++struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
++					     struct radeon_i2c_bus_rec *rec,
++					     const char *name)
++{
++	struct radeon_i2c_chan *i2c;
++	int ret;
++
++	i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
++	if (i2c == NULL)
++		return NULL;
++
++	i2c->rec = *rec;
++	i2c->adapter.owner = THIS_MODULE;
++	i2c->dev = dev;
++	i2c_set_adapdata(&i2c->adapter, i2c);
++	i2c->adapter.algo_data = &i2c->algo.dp;
++	i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
++	i2c->algo.dp.address = 0;
++	ret = i2c_dp_aux_add_bus(&i2c->adapter);
++	if (ret) {
++		DRM_INFO("Failed to register i2c %s\n", name);
++		goto out_free;
++	}
++
++	return i2c;
++out_free:
++	kfree(i2c);
++	return NULL;
++
++}
++
++
+ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
+ {
+ 	if (!i2c)
+@@ -207,3 +251,59 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
+ {
+ 	return NULL;
+ }
++
++void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
++			    u8 slave_addr,
++			    u8 addr,
++			    u8 *val)
++{
++	u8 out_buf[2];
++	u8 in_buf[2];
++	struct i2c_msg msgs[] = {
++		{
++			.addr = slave_addr,
++			.flags = 0,
++			.len = 1,
++			.buf = out_buf,
++		},
++		{
++			.addr = slave_addr,
++			.flags = I2C_M_RD,
++			.len = 1,
++			.buf = in_buf,
++		}
++	};
++
++	out_buf[0] = addr;
++	out_buf[1] = 0;
++
++	if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
++		*val = in_buf[0];
++		DRM_DEBUG("val = 0x%02x\n", *val);
++	} else {
++		DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
++			  addr, *val);
++	}
++}
++
++void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
++			    u8 slave_addr,
++			    u8 addr,
++			    u8 val)
++{
++	uint8_t out_buf[2];
++	struct i2c_msg msg = {
++		.addr = slave_addr,
++		.flags = 0,
++		.len = 2,
++		.buf = out_buf,
++	};
++
++	out_buf[0] = addr;
++	out_buf[1] = val;
++
++	if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
++		DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
++			  addr, val);
++}
++
+diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
+index a1bf11d..48b7cea 100644
+--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
++++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
+@@ -92,8 +92,7 @@ static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
+ 			  &init->gart_textures_offset))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
++	return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
+ }
+ 
+ typedef struct drm_radeon_clear32 {
+@@ -125,8 +124,7 @@ static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
+ 			  &clr->depth_boxes))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
++	return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
+ }
+ 
+ typedef struct drm_radeon_stipple32 {
+@@ -149,8 +147,7 @@ static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
+ 			  &request->mask))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
+ }
+ 
+ typedef struct drm_radeon_tex_image32 {
+@@ -204,8 +201,7 @@ static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
+ 			  &image->data))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
+ }
+ 
+ typedef struct drm_radeon_vertex2_32 {
+@@ -238,8 +234,7 @@ static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
+ 			  &request->prim))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
+ }
+ 
+ typedef struct drm_radeon_cmd_buffer32 {
+@@ -268,8 +263,7 @@ static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
+ 			  &request->boxes))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
+ }
+ 
+ typedef struct drm_radeon_getparam32 {
+@@ -293,8 +287,7 @@ static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
+ 			  &request->value))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
+ }
+ 
+ typedef struct drm_radeon_mem_alloc32 {
+@@ -322,8 +315,7 @@ static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
+ 			  &request->region_offset))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
+ }
+ 
+ typedef struct drm_radeon_irq_emit32 {
+@@ -345,8 +337,7 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
+ 			  &request->irq_seq))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_path.dentry->d_inode, file,
+-			 DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
++	return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
+ }
+ 
+ /* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
+@@ -372,8 +363,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
+ 			  &request->value))
+ 		return -EFAULT;
+ 
+-	return drm_ioctl(file->f_dentry->d_inode, file,
+-			 DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
++	return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
+ }
+ #else
+ #define compat_radeon_cp_setparam NULL
+@@ -413,12 +403,10 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
+ 		fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
+ 
+-	lock_kernel();		/* XXX for now */
+ 	if (fn != NULL)
+ 		ret = (*fn) (filp, cmd, arg);
+ 	else
+-		ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+-	unlock_kernel();
++		ret = drm_ioctl(filp, cmd, arg);
+ 
+ 	return ret;
+ }
+@@ -431,9 +419,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long
+ 	if (nr < DRM_COMMAND_BASE)
+ 		return drm_compat_ioctl(filp, cmd, arg);
+ 
+-	lock_kernel();		/* XXX for now */
+-	ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+-	unlock_kernel();
++	ret = drm_ioctl(filp, cmd, arg);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
+index b79ecc4..2f349a3 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq.c
++++ b/drivers/gpu/drm/radeon/radeon_irq.c
+@@ -289,16 +289,16 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
+ 	drm_radeon_irq_emit_t *emit = data;
+ 	int result;
+ 
+-	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+-		return -EINVAL;
+-
+-	LOCK_TEST_WITH_RETURN(dev, file_priv);
+-
+ 	if (!dev_priv) {
+ 		DRM_ERROR("called with no initialization\n");
+ 		return -EINVAL;
+ 	}
+ 
++	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
++		return -EINVAL;
++
++	LOCK_TEST_WITH_RETURN(dev, file_priv);
++
+ 	result = radeon_emit_irq(dev);
+ 
+ 	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index a0fe623..3cfd60f 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -39,11 +39,32 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
+ 	return radeon_irq_process(rdev);
+ }
+ 
++/*
++ * Handle hotplug events outside the interrupt handler proper.
++ */
++static void radeon_hotplug_work_func(struct work_struct *work)
++{
++	struct radeon_device *rdev = container_of(work, struct radeon_device,
++						  hotplug_work);
++	struct drm_device *dev = rdev->ddev;
++	struct drm_mode_config *mode_config = &dev->mode_config;
++	struct drm_connector *connector;
++
++	if (mode_config->num_connector) {
++		list_for_each_entry(connector, &mode_config->connector_list, head)
++			radeon_connector_hotplug(connector);
++	}
++	/* Just fire off a uevent and let userspace tell us what to do */
++	drm_sysfs_hotplug_event(dev);
++}
++
+ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
+ {
+ 	struct radeon_device *rdev = dev->dev_private;
+ 	unsigned i;
+ 
++	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
++
+ 	/* Disable *all* interrupts */
+ 	rdev->irq.sw_int = false;
+ 	for (i = 0; i < 2; i++) {
+@@ -76,6 +97,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
+ 	rdev->irq.sw_int = false;
+ 	for (i = 0; i < 2; i++) {
+ 		rdev->irq.crtc_vblank_int[i] = false;
++		rdev->irq.hpd[i] = false;
+ 	}
+ 	radeon_irq_set(rdev);
+ }
+@@ -87,30 +109,69 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
+ 
+ 	if (rdev->flags & RADEON_SINGLE_CRTC)
+ 		num_crtc = 1;
+-
++	spin_lock_init(&rdev->irq.sw_lock);
+ 	r = drm_vblank_init(rdev->ddev, num_crtc);
+ 	if (r) {
+ 		return r;
+ 	}
+ 	/* enable msi */
+ 	rdev->msi_enabled = 0;
+-	if (rdev->family >= CHIP_RV380) {
++	/* MSIs don't seem to work on my rs780;
++	 * not sure about rs880 or other rs780s.
++	 * Needs more investigation.
++	 */
++	if ((rdev->family >= CHIP_RV380) &&
++	    (rdev->family != CHIP_RS780) &&
++	    (rdev->family != CHIP_RS880)) {
+ 		int ret = pci_enable_msi(rdev->pdev);
+-		if (!ret)
++		if (!ret) {
+ 			rdev->msi_enabled = 1;
++			DRM_INFO("radeon: using MSI.\n");
++		}
+ 	}
+-	drm_irq_install(rdev->ddev);
+ 	rdev->irq.installed = true;
++	r = drm_irq_install(rdev->ddev);
++	if (r) {
++		rdev->irq.installed = false;
++		return r;
++	}
+ 	DRM_INFO("radeon: irq initialized.\n");
+ 	return 0;
+ }
+ 
+ void radeon_irq_kms_fini(struct radeon_device *rdev)
+ {
++	drm_vblank_cleanup(rdev->ddev);
+ 	if (rdev->irq.installed) {
+-		rdev->irq.installed = false;
+ 		drm_irq_uninstall(rdev->ddev);
++		rdev->irq.installed = false;
+ 		if (rdev->msi_enabled)
+ 			pci_disable_msi(rdev->pdev);
+ 	}
+ }
++
++void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
++{
++	unsigned long irqflags;
++
++	spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
++	if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
++		rdev->irq.sw_int = true;
++		radeon_irq_set(rdev);
++	}
++	spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
++}
++
++void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
++{
++	unsigned long irqflags;
++
++	spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
++	BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
++	if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
++		rdev->irq.sw_int = false;
++		radeon_irq_set(rdev);
++	}
++	spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
++}
++
+diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
+index ba12862..f23b056 100644
+--- a/drivers/gpu/drm/radeon/radeon_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_kms.c
+@@ -30,10 +30,19 @@
+ #include "radeon.h"
+ #include "radeon_drm.h"
+ 
++int radeon_driver_unload_kms(struct drm_device *dev)
++{
++	struct radeon_device *rdev = dev->dev_private;
++
++	if (rdev == NULL)
++		return 0;
++	radeon_modeset_fini(rdev);
++	radeon_device_fini(rdev);
++	kfree(rdev);
++	dev->dev_private = NULL;
++	return 0;
++}
+ 
+-/*
+- * Driver load/unload
+- */
+ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
+ {
+ 	struct radeon_device *rdev;
+@@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
+ 	 */
+ 	r = radeon_device_init(rdev, dev, dev->pdev, flags);
+ 	if (r) {
+-		DRM_ERROR("Fatal error while trying to initialize radeon.\n");
+-		return r;
++		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
++		goto out;
+ 	}
+ 	/* Again modeset_init should fail only on fatal error
+ 	 * otherwise it should provide enough functionalities
+ 	 * for shadowfb to run
+ 	 */
+ 	r = radeon_modeset_init(rdev);
+-	if (r) {
+-		return r;
+-	}
+-	return 0;
+-}
+-
+-int radeon_driver_unload_kms(struct drm_device *dev)
+-{
+-	struct radeon_device *rdev = dev->dev_private;
+-
+-	if (rdev == NULL)
+-		return 0;
+-	radeon_modeset_fini(rdev);
+-	radeon_device_fini(rdev);
+-	kfree(rdev);
+-	dev->dev_private = NULL;
+-	return 0;
++	if (r)
++		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
++out:
++	if (r)
++		radeon_driver_unload_kms(dev);
++	return r;
+ }
+ 
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+index 22ce4d6..b6d8081 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+@@ -30,9 +30,20 @@
+ #include "radeon.h"
+ #include "atom.h"
+ 
++static void radeon_overscan_setup(struct drm_crtc *crtc,
++				  struct drm_display_mode *mode)
++{
++	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++
++	WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
++	WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
++	WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
++}
++
+ static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
+-				       struct drm_display_mode *mode,
+-				       struct drm_display_mode *adjusted_mode)
++				       struct drm_display_mode *mode)
+ {
+ 	struct drm_device *dev = crtc->dev;
+ 	struct radeon_device *rdev = dev->dev_private;
+@@ -328,69 +339,6 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
+ 	}
+ }
+ 
+-/* properly set crtc bpp when using atombios */
+-void radeon_legacy_atom_set_surface(struct drm_crtc *crtc)
+-{
+-	struct drm_device *dev = crtc->dev;
+-	struct radeon_device *rdev = dev->dev_private;
+-	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+-	int format;
+-	uint32_t crtc_gen_cntl;
+-	uint32_t disp_merge_cntl;
+-	uint32_t crtc_pitch;
+-
+-	switch (crtc->fb->bits_per_pixel) {
+-	case 8:
+-		format = 2;
+-		break;
+-	case 15:      /*  555 */
+-		format = 3;
+-		break;
+-	case 16:      /*  565 */
+-		format = 4;
+-		break;
+-	case 24:      /*  RGB */
+-		format = 5;
+-		break;
+-	case 32:      /* xRGB */
+-		format = 6;
+-		break;
+-	default:
+-		return;
+-	}
+-
+-	crtc_pitch  = ((((crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8)) * crtc->fb->bits_per_pixel) +
+-			((crtc->fb->bits_per_pixel * 8) - 1)) /
+-		       (crtc->fb->bits_per_pixel * 8));
+-	crtc_pitch |= crtc_pitch << 16;
+-
+-	WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
+-
+-	switch (radeon_crtc->crtc_id) {
+-	case 0:
+-		disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
+-		disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
+-		WREG32(RADEON_DISP_MERGE_CNTL, disp_merge_cntl);
+-
+-		crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0xfffff0ff;
+-		crtc_gen_cntl |= (format << 8);
+-		crtc_gen_cntl |= RADEON_CRTC_EXT_DISP_EN;
+-		WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
+-		break;
+-	case 1:
+-		disp_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
+-		disp_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
+-		WREG32(RADEON_DISP2_MERGE_CNTL, disp_merge_cntl);
+-
+-		crtc_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0xfffff0ff;
+-		crtc_gen_cntl |= (format << 8);
+-		WREG32(RADEON_CRTC2_GEN_CNTL, crtc_gen_cntl);
+-		WREG32(RADEON_FP_H2_SYNC_STRT_WID,   RREG32(RADEON_CRTC2_H_SYNC_STRT_WID));
+-		WREG32(RADEON_FP_V2_SYNC_STRT_WID,   RREG32(RADEON_CRTC2_V_SYNC_STRT_WID));
+-		break;
+-	}
+-}
+-
+ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ 			 struct drm_framebuffer *old_fb)
+ {
+@@ -399,14 +347,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 	struct radeon_framebuffer *radeon_fb;
+ 	struct drm_gem_object *obj;
++	struct radeon_bo *rbo;
+ 	uint64_t base;
+ 	uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
+ 	uint32_t crtc_pitch, pitch_pixels;
+ 	uint32_t tiling_flags;
+ 	int format;
+ 	uint32_t gen_cntl_reg, gen_cntl_val;
++	int r;
+ 
+ 	DRM_DEBUG("\n");
++	/* no fb bound */
++	if (!crtc->fb) {
++		DRM_DEBUG("No FB bound\n");
++		return 0;
++	}
+ 
+ 	radeon_fb = to_radeon_framebuffer(crtc->fb);
+ 
+@@ -430,10 +385,22 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ 		return false;
+ 	}
+ 
++	/* Pin framebuffer & get tilling informations */
+ 	obj = radeon_fb->obj;
+-	if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
++	rbo = obj->driver_private;
++	r = radeon_bo_reserve(rbo, false);
++	if (unlikely(r != 0))
++		return r;
++	r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
++	if (unlikely(r != 0)) {
++		radeon_bo_unreserve(rbo);
+ 		return -EINVAL;
+ 	}
++	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
++	radeon_bo_unreserve(rbo);
++	if (tiling_flags & RADEON_TILING_MICRO)
++		DRM_ERROR("trying to scanout microtiled buffer\n");
++
+ 	/* if scanout was in GTT this really wouldn't work */
+ 	/* crtc offset is from display base addr not FB location */
+ 	radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
+@@ -448,10 +415,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ 		       (crtc->fb->bits_per_pixel * 8));
+ 	crtc_pitch |= crtc_pitch << 16;
+ 
+-	radeon_object_get_tiling_flags(obj->driver_private,
+-				       &tiling_flags, NULL);
+-	if (tiling_flags & RADEON_TILING_MICRO)
+-		DRM_ERROR("trying to scanout microtiled buffer\n");
+ 
+ 	if (tiling_flags & RADEON_TILING_MACRO) {
+ 		if (ASIC_IS_R300(rdev))
+@@ -529,7 +492,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ 
+ 	if (old_fb && old_fb != crtc->fb) {
+ 		radeon_fb = to_radeon_framebuffer(old_fb);
+-		radeon_gem_object_unpin(radeon_fb->obj);
++		rbo = radeon_fb->obj->driver_private;
++		r = radeon_bo_reserve(rbo, false);
++		if (unlikely(r != 0))
++			return r;
++		radeon_bo_unpin(rbo);
++		radeon_bo_unreserve(rbo);
+ 	}
+ 
+ 	/* Bytes per pixel may have changed */
+@@ -641,12 +609,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
+ 		uint32_t crtc2_gen_cntl;
+ 		uint32_t disp2_merge_cntl;
+ 
+-		/* check to see if TV DAC is enabled for another crtc and keep it enabled */
+-		if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON)
+-			crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
+-		else
+-			crtc2_gen_cntl = 0;
+-
++		/* if TV DAC is enabled for another crtc and keep it enabled */
++		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
+ 		crtc2_gen_cntl |= ((format << 8)
+ 				   | RADEON_CRTC2_VSYNC_DIS
+ 				   | RADEON_CRTC2_HSYNC_DIS
+@@ -675,7 +639,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
+ 		uint32_t crtc_ext_cntl;
+ 		uint32_t disp_merge_cntl;
+ 
+-		crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN
++		crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
++		crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
+ 				 | (format << 8)
+ 				 | RADEON_CRTC_DISP_REQ_EN_B
+ 				 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
+@@ -727,7 +692,6 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+ 	uint32_t post_divider = 0;
+ 	uint32_t freq = 0;
+ 	uint8_t pll_gain;
+-	int pll_flags = RADEON_PLL_LEGACY;
+ 	bool use_bios_divs = false;
+ 	/* PLL registers */
+ 	uint32_t pll_ref_div = 0;
+@@ -761,10 +725,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+ 	else
+ 		pll = &rdev->clock.p1pll;
+ 
++	pll->flags = RADEON_PLL_LEGACY;
++
+ 	if (mode->clock > 200000) /* range limits??? */
+-		pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
++		pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
+ 	else
+-		pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
++		pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ 
+ 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ 		if (encoder->crtc == crtc) {
+@@ -776,20 +742,22 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+ 			}
+ 
+ 			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
+-				pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
++				pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
+ 			if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
+-				struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+-				struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+-				if (lvds) {
+-					if (lvds->use_bios_dividers) {
+-						pll_ref_div = lvds->panel_ref_divider;
+-						pll_fb_post_div   = (lvds->panel_fb_divider |
+-								     (lvds->panel_post_divider << 16));
+-						htotal_cntl  = 0;
+-						use_bios_divs = true;
++				if (!rdev->is_atom_bios) {
++					struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++					struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
++					if (lvds) {
++						if (lvds->use_bios_dividers) {
++							pll_ref_div = lvds->panel_ref_divider;
++							pll_fb_post_div   = (lvds->panel_fb_divider |
++									     (lvds->panel_post_divider << 16));
++							htotal_cntl  = 0;
++							use_bios_divs = true;
++						}
+ 					}
+ 				}
+-				pll_flags |= RADEON_PLL_USE_REF_DIV;
++				pll->flags |= RADEON_PLL_USE_REF_DIV;
+ 			}
+ 		}
+ 	}
+@@ -799,8 +767,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+ 	if (!use_bios_divs) {
+ 		radeon_compute_pll(pll, mode->clock,
+ 				   &freq, &feedback_div, &frac_fb_div,
+-				   &reference_div, &post_divider,
+-				   pll_flags);
++				   &reference_div, &post_divider);
+ 
+ 		for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
+ 			if (post_div->divider == post_divider)
+@@ -1026,8 +993,9 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
+ 	radeon_crtc_set_base(crtc, x, y, old_fb);
+ 	radeon_set_crtc_timing(crtc, adjusted_mode);
+ 	radeon_set_pll(crtc, adjusted_mode);
++	radeon_overscan_setup(crtc, adjusted_mode);
+ 	if (radeon_crtc->crtc_id == 0) {
+-		radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
++		radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
+ 	} else {
+ 		if (radeon_crtc->rmx_type != RMX_OFF) {
+ 			/* FIXME: only first crtc has rmx what should we
+@@ -1041,12 +1009,29 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
+ 
+ static void radeon_crtc_prepare(struct drm_crtc *crtc)
+ {
+-	radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
++	struct drm_device *dev = crtc->dev;
++	struct drm_crtc *crtci;
++
++	/*
++	* The hardware wedges sometimes if you reconfigure one CRTC
++	* whilst another is running (see fdo bug #24611).
++	*/
++	list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
++		radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
+ }
+ 
+ static void radeon_crtc_commit(struct drm_crtc *crtc)
+ {
+-	radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
++	struct drm_device *dev = crtc->dev;
++	struct drm_crtc *crtci;
++
++	/*
++	* Reenable the CRTCs that should be running.
++	*/
++	list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
++		if (crtci->enabled)
++			radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
++	}
+ }
+ 
+ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index 0038212..38e45e2 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -46,6 +46,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
+ 	int panel_pwr_delay = 2000;
++	bool is_mac = false;
+ 	DRM_DEBUG("\n");
+ 
+ 	if (radeon_encoder->enc_priv) {
+@@ -58,6 +59,15 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+ 		}
+ 	}
+ 
++	/* macs (and possibly some x86 oem systems?) wire up LVDS strangely
++	 * Taken from radeonfb.
++	 */
++	if ((rdev->mode_info.connector_table == CT_IBOOK) ||
++	    (rdev->mode_info.connector_table == CT_POWERBOOK_EXTERNAL) ||
++	    (rdev->mode_info.connector_table == CT_POWERBOOK_INTERNAL) ||
++	    (rdev->mode_info.connector_table == CT_POWERBOOK_VGA))
++		is_mac = true;
++
+ 	switch (mode) {
+ 	case DRM_MODE_DPMS_ON:
+ 		disp_pwr_man = RREG32(RADEON_DISP_PWR_MAN);
+@@ -74,6 +84,8 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+ 
+ 		lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ 		lvds_gen_cntl |= (RADEON_LVDS_ON | RADEON_LVDS_EN | RADEON_LVDS_DIGON | RADEON_LVDS_BLON);
++		if (is_mac)
++			lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
+ 		lvds_gen_cntl &= ~(RADEON_LVDS_DISPLAY_DIS);
+ 		udelay(panel_pwr_delay * 1000);
+ 		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+@@ -85,7 +97,14 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
+ 		WREG32_PLL_P(RADEON_PIXCLKS_CNTL, 0, ~RADEON_PIXCLK_LVDS_ALWAYS_ONb);
+ 		lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+ 		lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+-		lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
++		if (is_mac) {
++			lvds_gen_cntl &= ~RADEON_LVDS_BL_MOD_EN;
++			WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
++			lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_EN);
++		} else {
++			WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
++			lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
++		}
+ 		udelay(panel_pwr_delay * 1000);
+ 		WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
+ 		WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
+@@ -136,7 +155,14 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
+ 	lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
+ 
+ 	lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
+-	if ((!rdev->is_atom_bios)) {
++	if (rdev->is_atom_bios) {
++		/* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
++		 * need to call that on resume to set up the reg properly.
++		 */
++		radeon_encoder->pixel_clock = adjusted_mode->clock;
++		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
++		lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
++	} else {
+ 		struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+ 		if (lvds) {
+ 			DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
+@@ -147,8 +173,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
+ 					     (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
+ 		} else
+ 			lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+-	} else
+-		lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
++	}
+ 	lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
+ 	lvds_gen_cntl &= ~(RADEON_LVDS_ON |
+ 			   RADEON_LVDS_BLON |
+@@ -184,9 +209,9 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
+ 		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ }
+ 
+-static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
+-					  struct drm_display_mode *mode,
+-					  struct drm_display_mode *adjusted_mode)
++static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
++				     struct drm_display_mode *mode,
++				     struct drm_display_mode *adjusted_mode)
+ {
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 
+@@ -194,15 +219,24 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
+ 	radeon_encoder_set_active_device(encoder);
+ 	drm_mode_set_crtcinfo(adjusted_mode, 0);
+ 
+-	if (radeon_encoder->rmx_type != RMX_OFF)
+-		radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
++	/* get the native mode for LVDS */
++	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
++		struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
++		int mode_id = adjusted_mode->base.id;
++		*adjusted_mode = *native_mode;
++		adjusted_mode->hdisplay = mode->hdisplay;
++		adjusted_mode->vdisplay = mode->vdisplay;
++		adjusted_mode->crtc_hdisplay = mode->hdisplay;
++		adjusted_mode->crtc_vdisplay = mode->vdisplay;
++		adjusted_mode->base.id = mode_id;
++	}
+ 
+ 	return true;
+ }
+ 
+ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
+ 	.dpms = radeon_legacy_lvds_dpms,
+-	.mode_fixup = radeon_legacy_lvds_mode_fixup,
++	.mode_fixup = radeon_legacy_mode_fixup,
+ 	.prepare = radeon_legacy_lvds_prepare,
+ 	.mode_set = radeon_legacy_lvds_mode_set,
+ 	.commit = radeon_legacy_lvds_commit,
+@@ -214,17 +248,6 @@ static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
+ 	.destroy = radeon_enc_destroy,
+ };
+ 
+-static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
+-						 struct drm_display_mode *mode,
+-						 struct drm_display_mode *adjusted_mode)
+-{
+-	/* set the active encoder to connector routing */
+-	radeon_encoder_set_active_device(encoder);
+-	drm_mode_set_crtcinfo(adjusted_mode, 0);
+-
+-	return true;
+-}
+-
+ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
+ {
+ 	struct drm_device *dev = encoder->dev;
+@@ -410,7 +433,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
+ 
+ static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
+ 	.dpms = radeon_legacy_primary_dac_dpms,
+-	.mode_fixup = radeon_legacy_primary_dac_mode_fixup,
++	.mode_fixup = radeon_legacy_mode_fixup,
+ 	.prepare = radeon_legacy_primary_dac_prepare,
+ 	.mode_set = radeon_legacy_primary_dac_mode_set,
+ 	.commit = radeon_legacy_primary_dac_commit,
+@@ -423,16 +446,6 @@ static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
+ 	.destroy = radeon_enc_destroy,
+ };
+ 
+-static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
+-					      struct drm_display_mode *mode,
+-					      struct drm_display_mode *adjusted_mode)
+-{
+-
+-	drm_mode_set_crtcinfo(adjusted_mode, 0);
+-
+-	return true;
+-}
+-
+ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
+ {
+ 	struct drm_device *dev = encoder->dev;
+@@ -584,7 +597,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
+ 
+ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
+ 	.dpms = radeon_legacy_tmds_int_dpms,
+-	.mode_fixup = radeon_legacy_tmds_int_mode_fixup,
++	.mode_fixup = radeon_legacy_mode_fixup,
+ 	.prepare = radeon_legacy_tmds_int_prepare,
+ 	.mode_set = radeon_legacy_tmds_int_mode_set,
+ 	.commit = radeon_legacy_tmds_int_commit,
+@@ -596,17 +609,6 @@ static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
+ 	.destroy = radeon_enc_destroy,
+ };
+ 
+-static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
+-					      struct drm_display_mode *mode,
+-					      struct drm_display_mode *adjusted_mode)
+-{
+-	/* set the active encoder to connector routing */
+-	radeon_encoder_set_active_device(encoder);
+-	drm_mode_set_crtcinfo(adjusted_mode, 0);
+-
+-	return true;
+-}
+-
+ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
+ {
+ 	struct drm_device *dev = encoder->dev;
+@@ -697,6 +699,8 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+ 			/*if (mode->clock > 165000)
+ 			  fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
+ 		}
++		if (!radeon_combios_external_tmds_setup(encoder))
++			radeon_external_tmds_setup(encoder);
+ 	}
+ 
+ 	if (radeon_crtc->crtc_id == 0) {
+@@ -724,9 +728,22 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
+ 		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+ }
+ 
++static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
++{
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
++	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
++	if (tmds) {
++		if (tmds->i2c_bus)
++			radeon_i2c_destroy(tmds->i2c_bus);
++	}
++	kfree(radeon_encoder->enc_priv);
++	drm_encoder_cleanup(encoder);
++	kfree(radeon_encoder);
++}
++
+ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
+ 	.dpms = radeon_legacy_tmds_ext_dpms,
+-	.mode_fixup = radeon_legacy_tmds_ext_mode_fixup,
++	.mode_fixup = radeon_legacy_mode_fixup,
+ 	.prepare = radeon_legacy_tmds_ext_prepare,
+ 	.mode_set = radeon_legacy_tmds_ext_mode_set,
+ 	.commit = radeon_legacy_tmds_ext_commit,
+@@ -735,20 +752,9 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs
+ 
+ 
+ static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
+-	.destroy = radeon_enc_destroy,
++	.destroy = radeon_ext_tmds_enc_destroy,
+ };
+ 
+-static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
+-					    struct drm_display_mode *mode,
+-					    struct drm_display_mode *adjusted_mode)
+-{
+-	/* set the active encoder to connector routing */
+-	radeon_encoder_set_active_device(encoder);
+-	drm_mode_set_crtcinfo(adjusted_mode, 0);
+-
+-	return true;
+-}
+-
+ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
+ {
+ 	struct drm_device *dev = encoder->dev;
+@@ -1265,7 +1271,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
+ 
+ static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
+ 	.dpms = radeon_legacy_tv_dac_dpms,
+-	.mode_fixup = radeon_legacy_tv_dac_mode_fixup,
++	.mode_fixup = radeon_legacy_mode_fixup,
+ 	.prepare = radeon_legacy_tv_dac_prepare,
+ 	.mode_set = radeon_legacy_tv_dac_mode_set,
+ 	.commit = radeon_legacy_tv_dac_commit,
+@@ -1302,6 +1308,29 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
+ 	return tmds;
+ }
+ 
++static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
++{
++	struct drm_device *dev = encoder->base.dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_encoder_ext_tmds *tmds = NULL;
++	bool ret;
++
++	if (rdev->is_atom_bios)
++		return NULL;
++
++	tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
++
++	if (!tmds)
++		return NULL;
++
++	ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
++
++	if (ret == false)
++		radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
++
++	return tmds;
++}
++
+ void
+ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
+ {
+@@ -1329,7 +1358,6 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
+ 		encoder->possible_crtcs = 0x1;
+ 	else
+ 		encoder->possible_crtcs = 0x3;
+-	encoder->possible_clones = 0;
+ 
+ 	radeon_encoder->enc_priv = NULL;
+ 
+@@ -1373,7 +1401,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
+ 		drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
+ 		drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
+ 		if (!rdev->is_atom_bios)
+-			radeon_combios_get_ext_tmds_info(radeon_encoder);
++			radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
+ 		break;
+ 	}
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+index 3a12bb0..417684d 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+@@ -77,7 +77,7 @@ struct radeon_tv_mode_constants {
+ 	unsigned pix_to_tv;
+ };
+ 
+-static const uint16_t hor_timing_NTSC[] = {
++static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
+ 	0x0007,
+ 	0x003f,
+ 	0x0263,
+@@ -98,7 +98,7 @@ static const uint16_t hor_timing_NTSC[] = {
+ 	0
+ };
+ 
+-static const uint16_t vert_timing_NTSC[] = {
++static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
+ 	0x2001,
+ 	0x200d,
+ 	0x1006,
+@@ -115,7 +115,7 @@ static const uint16_t vert_timing_NTSC[] = {
+ 	0
+ };
+ 
+-static const uint16_t hor_timing_PAL[] = {
++static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
+ 	0x0007,
+ 	0x0058,
+ 	0x027c,
+@@ -136,7 +136,7 @@ static const uint16_t hor_timing_PAL[] = {
+ 	0
+ };
+ 
+-static const uint16_t vert_timing_PAL[] = 	{
++static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
+ 	0x2001,
+ 	0x200c,
+ 	0x1005,
+@@ -623,9 +623,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
+ 	}
+ 	flicker_removal = (tmp + 500) / 1000;
+ 
+-	if (flicker_removal < 3)
+-		flicker_removal = 3;
+-	for (i = 0; i < 6; ++i) {
++	if (flicker_removal < 2)
++		flicker_removal = 2;
++	for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
+ 		if (flicker_removal == SLOPE_limit[i])
+ 			break;
+ 	}
+diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
+index ace726a..e81b2ae 100644
+--- a/drivers/gpu/drm/radeon/radeon_mode.h
++++ b/drivers/gpu/drm/radeon/radeon_mode.h
+@@ -33,6 +33,7 @@
+ #include <drm_crtc.h>
+ #include <drm_mode.h>
+ #include <drm_edid.h>
++#include <drm_dp_helper.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c-id.h>
+ #include <linux/i2c-algo-bit.h>
+@@ -45,32 +46,6 @@ struct radeon_device;
+ #define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
+ #define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
+ 
+-enum radeon_connector_type {
+-	CONNECTOR_NONE,
+-	CONNECTOR_VGA,
+-	CONNECTOR_DVI_I,
+-	CONNECTOR_DVI_D,
+-	CONNECTOR_DVI_A,
+-	CONNECTOR_STV,
+-	CONNECTOR_CTV,
+-	CONNECTOR_LVDS,
+-	CONNECTOR_DIGITAL,
+-	CONNECTOR_SCART,
+-	CONNECTOR_HDMI_TYPE_A,
+-	CONNECTOR_HDMI_TYPE_B,
+-	CONNECTOR_0XC,
+-	CONNECTOR_0XD,
+-	CONNECTOR_DIN,
+-	CONNECTOR_DISPLAY_PORT,
+-	CONNECTOR_UNSUPPORTED
+-};
+-
+-enum radeon_dvi_type {
+-	DVI_AUTO,
+-	DVI_DIGITAL,
+-	DVI_ANALOG
+-};
+-
+ enum radeon_rmx_type {
+ 	RMX_OFF,
+ 	RMX_FULL,
+@@ -87,26 +62,48 @@ enum radeon_tv_std {
+ 	TV_STD_SCART_PAL,
+ 	TV_STD_SECAM,
+ 	TV_STD_PAL_CN,
++	TV_STD_PAL_N,
+ };
+ 
++/* radeon gpio-based i2c
++ * 1. "mask" reg and bits
++ *    grabs the gpio pins for software use
++ *    0=not held  1=held
++ * 2. "a" reg and bits
++ *    output pin value
++ *    0=low 1=high
++ * 3. "en" reg and bits
++ *    sets the pin direction
++ *    0=input 1=output
++ * 4. "y" reg and bits
++ *    input pin value
++ *    0=low 1=high
++ */
+ struct radeon_i2c_bus_rec {
+ 	bool valid;
++	/* id used by atom */
++	uint8_t i2c_id;
++	/* can be used with hw i2c engine */
++	bool hw_capable;
++	/* uses multi-media i2c engine */
++	bool mm_i2c;
++	/* regs and bits */
+ 	uint32_t mask_clk_reg;
+ 	uint32_t mask_data_reg;
+ 	uint32_t a_clk_reg;
+ 	uint32_t a_data_reg;
+-	uint32_t put_clk_reg;
+-	uint32_t put_data_reg;
+-	uint32_t get_clk_reg;
+-	uint32_t get_data_reg;
++	uint32_t en_clk_reg;
++	uint32_t en_data_reg;
++	uint32_t y_clk_reg;
++	uint32_t y_data_reg;
+ 	uint32_t mask_clk_mask;
+ 	uint32_t mask_data_mask;
+-	uint32_t put_clk_mask;
+-	uint32_t put_data_mask;
+-	uint32_t get_clk_mask;
+-	uint32_t get_data_mask;
+ 	uint32_t a_clk_mask;
+ 	uint32_t a_data_mask;
++	uint32_t en_clk_mask;
++	uint32_t en_data_mask;
++	uint32_t y_clk_mask;
++	uint32_t y_data_mask;
+ };
+ 
+ struct radeon_tmds_pll {
+@@ -128,16 +125,24 @@ struct radeon_tmds_pll {
+ #define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9)
+ #define RADEON_PLL_USE_FRAC_FB_DIV      (1 << 10)
+ #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
++#define RADEON_PLL_USE_POST_DIV         (1 << 12)
+ 
+ struct radeon_pll {
+-	uint16_t reference_freq;
+-	uint16_t reference_div;
++	/* reference frequency */
++	uint32_t reference_freq;
++
++	/* fixed dividers */
++	uint32_t reference_div;
++	uint32_t post_div;
++
++	/* pll in/out limits */
+ 	uint32_t pll_in_min;
+ 	uint32_t pll_in_max;
+ 	uint32_t pll_out_min;
+ 	uint32_t pll_out_max;
+-	uint16_t xclk;
++	uint32_t best_vco;
+ 
++	/* divider limits */
+ 	uint32_t min_ref_div;
+ 	uint32_t max_ref_div;
+ 	uint32_t min_post_div;
+@@ -146,13 +151,21 @@ struct radeon_pll {
+ 	uint32_t max_feedback_div;
+ 	uint32_t min_frac_feedback_div;
+ 	uint32_t max_frac_feedback_div;
+-	uint32_t best_vco;
++
++	/* flags for the current clock */
++	uint32_t flags;
++
++	/* pll id */
++	uint32_t id;
+ };
+ 
+ struct radeon_i2c_chan {
+-	struct drm_device *dev;
+ 	struct i2c_adapter adapter;
+-	struct i2c_algo_bit_data algo;
++	struct drm_device *dev;
++	union {
++		struct i2c_algo_dp_aux_data dp;
++		struct i2c_algo_bit_data bit;
++	} algo;
+ 	struct radeon_i2c_bus_rec rec;
+ };
+ 
+@@ -170,6 +183,11 @@ enum radeon_connector_table {
+ 	CT_EMAC,
+ };
+ 
++enum radeon_dvo_chip {
++	DVO_SIL164,
++	DVO_SIL1178,
++};
++
+ struct radeon_mode_info {
+ 	struct atom_context *atom_context;
+ 	struct card_info *atom_card_info;
+@@ -261,6 +279,13 @@ struct radeon_encoder_int_tmds {
+ 	struct radeon_tmds_pll tmds_pll[4];
+ };
+ 
++struct radeon_encoder_ext_tmds {
++	/* tmds over dvo */
++	struct radeon_i2c_chan *i2c_bus;
++	uint8_t slave_addr;
++	enum radeon_dvo_chip dvo_chip;
++};
++
+ /* spread spectrum */
+ struct radeon_atom_ss {
+ 	uint16_t percentage;
+@@ -274,7 +299,7 @@ struct radeon_atom_ss {
+ struct radeon_encoder_atom_dig {
+ 	/* atom dig */
+ 	bool coherent_mode;
+-	int dig_block;
++	int dig_encoder; /* -1 disabled, 0 DIGA, 1 DIGB */
+ 	/* atom lvds */
+ 	uint32_t lvds_misc;
+ 	uint16_t panel_pwr_delay;
+@@ -297,11 +322,43 @@ struct radeon_encoder {
+ 	enum radeon_rmx_type rmx_type;
+ 	struct drm_display_mode native_mode;
+ 	void *enc_priv;
++	int hdmi_offset;
++	int hdmi_audio_workaround;
++	int hdmi_buffer_status;
+ };
+ 
+ struct radeon_connector_atom_dig {
+ 	uint32_t igp_lane_info;
+ 	bool linkb;
++	/* displayport */
++	struct radeon_i2c_chan *dp_i2c_bus;
++	u8 dpcd[8];
++	u8 dp_sink_type;
++	int dp_clock;
++	int dp_lane_count;
++};
++
++struct radeon_gpio_rec {
++	bool valid;
++	u8 id;
++	u32 reg;
++	u32 mask;
++};
++
++enum radeon_hpd_id {
++	RADEON_HPD_NONE = 0,
++	RADEON_HPD_1,
++	RADEON_HPD_2,
++	RADEON_HPD_3,
++	RADEON_HPD_4,
++	RADEON_HPD_5,
++	RADEON_HPD_6,
++};
++
++struct radeon_hpd {
++	enum radeon_hpd_id hpd;
++	u8 plugged_state;
++	struct radeon_gpio_rec gpio;
+ };
+ 
+ struct radeon_connector {
+@@ -318,6 +375,7 @@ struct radeon_connector {
+ 	void *con_priv;
+ 	bool dac_load_detect;
+ 	uint16_t connector_object_id;
++	struct radeon_hpd hpd;
+ };
+ 
+ struct radeon_framebuffer {
+@@ -325,10 +383,42 @@ struct radeon_framebuffer {
+ 	struct drm_gem_object *obj;
+ };
+ 
++extern enum radeon_tv_std
++radeon_combios_get_tv_info(struct radeon_device *rdev);
++extern enum radeon_tv_std
++radeon_atombios_get_tv_info(struct radeon_device *rdev);
++
++extern void radeon_connector_hotplug(struct drm_connector *connector);
++extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
++extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
++				       struct drm_display_mode *mode);
++extern void radeon_dp_set_link_config(struct drm_connector *connector,
++				      struct drm_display_mode *mode);
++extern void dp_link_train(struct drm_encoder *encoder,
++			  struct drm_connector *connector);
++extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
++extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
++extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
++					   int action, uint8_t lane_num,
++					   uint8_t lane_set);
++extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
++				uint8_t write_byte, uint8_t *read_byte);
++
++extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
++						    struct radeon_i2c_bus_rec *rec,
++						    const char *name);
+ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
+ 						 struct radeon_i2c_bus_rec *rec,
+ 						 const char *name);
+ extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
++extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
++				   u8 slave_addr,
++				   u8 addr,
++				   u8 *val);
++extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
++				   u8 slave_addr,
++				   u8 addr,
++				   u8 val);
+ extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
+ extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
+ 
+@@ -340,8 +430,17 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
+ 			       uint32_t *fb_div_p,
+ 			       uint32_t *frac_fb_div_p,
+ 			       uint32_t *ref_div_p,
+-			       uint32_t *post_div_p,
+-			       int flags);
++			       uint32_t *post_div_p);
++
++extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
++				     uint64_t freq,
++				     uint32_t *dot_clock_p,
++				     uint32_t *fb_div_p,
++				     uint32_t *frac_fb_div_p,
++				     uint32_t *ref_div_p,
++				     uint32_t *post_div_p);
++
++extern void radeon_setup_encoder_clones(struct drm_device *dev);
+ 
+ struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
+ struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
+@@ -349,6 +448,7 @@ struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int
+ struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
+ struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
+ extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
++extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
+ extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
+ extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
+ 
+@@ -364,7 +464,6 @@ extern void atombios_crtc_dpms(struct drm_crtc *crtc, int mode);
+ 
+ extern int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ 				 struct drm_framebuffer *old_fb);
+-extern void radeon_legacy_atom_set_surface(struct drm_crtc *crtc);
+ 
+ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+ 				  struct drm_file *file_priv,
+@@ -378,12 +477,16 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev);
+ extern bool radeon_combios_get_clock_info(struct drm_device *dev);
+ extern struct radeon_encoder_atom_dig *
+ radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
+-bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+-				   struct radeon_encoder_int_tmds *tmds);
+-bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+-					   struct radeon_encoder_int_tmds *tmds);
+-bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+-					    struct radeon_encoder_int_tmds *tmds);
++extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
++					  struct radeon_encoder_int_tmds *tmds);
++extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
++						     struct radeon_encoder_int_tmds *tmds);
++extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
++						   struct radeon_encoder_int_tmds *tmds);
++extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
++							 struct radeon_encoder_ext_tmds *tmds);
++extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
++						       struct radeon_encoder_ext_tmds *tmds);
+ extern struct radeon_encoder_primary_dac *
+ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
+ extern struct radeon_encoder_tv_dac *
+@@ -395,6 +498,8 @@ extern struct radeon_encoder_tv_dac *
+ radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
+ extern struct radeon_encoder_primary_dac *
+ radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
++extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
++extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
+ extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
+ extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
+ extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
+@@ -426,16 +531,13 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
+ 			       struct radeon_crtc *radeon_crtc);
+ void radeon_legacy_init_crtc(struct drm_device *dev,
+ 			     struct radeon_crtc *radeon_crtc);
+-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
++extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
+ 
+ void radeon_get_clock_info(struct drm_device *dev);
+ 
+ extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
+ extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
+ 
+-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
+-			   struct drm_display_mode *mode,
+-			   struct drm_display_mode *adjusted_mode);
+ void radeon_enc_destroy(struct drm_encoder *encoder);
+ void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
+ void radeon_combios_asic_init(struct drm_device *dev);
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 1f056da..f1da370 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -34,100 +34,62 @@
+ #include "radeon_drm.h"
+ #include "radeon.h"
+ 
+-struct radeon_object {
+-	struct ttm_buffer_object	tobj;
+-	struct list_head		list;
+-	struct radeon_device		*rdev;
+-	struct drm_gem_object		*gobj;
+-	struct ttm_bo_kmap_obj		kmap;
+-	unsigned			pin_count;
+-	uint64_t			gpu_addr;
+-	void				*kptr;
+-	bool				is_iomem;
+-	uint32_t			tiling_flags;
+-	uint32_t			pitch;
+-	int				surface_reg;
+-};
+ 
+ int radeon_ttm_init(struct radeon_device *rdev);
+ void radeon_ttm_fini(struct radeon_device *rdev);
++static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
+ 
+ /*
+  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
+  * function are calling it.
+  */
+ 
+-static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
++static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+ {
+-	return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
+-}
++	struct radeon_bo *bo;
+ 
+-static void radeon_object_unreserve(struct radeon_object *robj)
+-{
+-	ttm_bo_unreserve(&robj->tobj);
++	bo = container_of(tbo, struct radeon_bo, tbo);
++	mutex_lock(&bo->rdev->gem.mutex);
++	list_del_init(&bo->list);
++	mutex_unlock(&bo->rdev->gem.mutex);
++	radeon_bo_clear_surface_reg(bo);
++	kfree(bo);
+ }
+ 
+-static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
++bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
+ {
+-	struct radeon_object *robj;
+-
+-	robj = container_of(tobj, struct radeon_object, tobj);
+-	list_del_init(&robj->list);
+-	radeon_object_clear_surface_reg(robj);
+-	kfree(robj);
++	if (bo->destroy == &radeon_ttm_bo_destroy)
++		return true;
++	return false;
+ }
+ 
+-static inline void radeon_object_gpu_addr(struct radeon_object *robj)
++void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
+ {
+-	/* Default gpu address */
+-	robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
+-	if (robj->tobj.mem.mm_node == NULL) {
+-		return;
+-	}
+-	robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
+-	switch (robj->tobj.mem.mem_type) {
+-	case TTM_PL_VRAM:
+-		robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
+-		break;
+-	case TTM_PL_TT:
+-		robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
+-		break;
+-	default:
+-		DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
+-		robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
+-		return;
+-	}
+-}
++	u32 c = 0;
+ 
+-static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
+-{
+-	uint32_t flags = 0;
+-	if (domain & RADEON_GEM_DOMAIN_VRAM) {
+-		flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
+-	}
+-	if (domain & RADEON_GEM_DOMAIN_GTT) {
+-		flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+-	}
+-	if (domain & RADEON_GEM_DOMAIN_CPU) {
+-		flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
+-	}
+-	if (!flags) {
+-		flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
+-	}
+-	return flags;
++	rbo->placement.fpfn = 0;
++	rbo->placement.lpfn = 0;
++	rbo->placement.placement = rbo->placements;
++	rbo->placement.busy_placement = rbo->placements;
++	if (domain & RADEON_GEM_DOMAIN_VRAM)
++		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
++					TTM_PL_FLAG_VRAM;
++	if (domain & RADEON_GEM_DOMAIN_GTT)
++		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
++	if (domain & RADEON_GEM_DOMAIN_CPU)
++		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
++	if (!c)
++		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
++	rbo->placement.num_placement = c;
++	rbo->placement.num_busy_placement = c;
+ }
+ 
+-int radeon_object_create(struct radeon_device *rdev,
+-			 struct drm_gem_object *gobj,
+-			 unsigned long size,
+-			 bool kernel,
+-			 uint32_t domain,
+-			 bool interruptible,
+-			 struct radeon_object **robj_ptr)
++int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
++			unsigned long size, bool kernel, u32 domain,
++			struct radeon_bo **bo_ptr)
+ {
+-	struct radeon_object *robj;
++	struct radeon_bo *bo;
+ 	enum ttm_bo_type type;
+-	uint32_t flags;
+ 	int r;
+ 
+ 	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
+@@ -138,238 +100,161 @@ int radeon_object_create(struct radeon_device *rdev,
+ 	} else {
+ 		type = ttm_bo_type_device;
+ 	}
+-	*robj_ptr = NULL;
+-	robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
+-	if (robj == NULL) {
++	*bo_ptr = NULL;
++	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
++	if (bo == NULL)
+ 		return -ENOMEM;
+-	}
+-	robj->rdev = rdev;
+-	robj->gobj = gobj;
+-	robj->surface_reg = -1;
+-	INIT_LIST_HEAD(&robj->list);
+-
+-	flags = radeon_object_flags_from_domain(domain);
+-	r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
+-				   0, 0, false, NULL, size,
+-				   &radeon_ttm_object_object_destroy);
++	bo->rdev = rdev;
++	bo->gobj = gobj;
++	bo->surface_reg = -1;
++	INIT_LIST_HEAD(&bo->list);
++
++	radeon_ttm_placement_from_domain(bo, domain);
++	/* Kernel allocation are uninterruptible */
++	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
++			&bo->placement, 0, 0, !kernel, NULL, size,
++			&radeon_ttm_bo_destroy);
+ 	if (unlikely(r != 0)) {
+-		/* ttm call radeon_ttm_object_object_destroy if error happen */
+-		DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
+-			  size, flags, 0);
++		if (r != -ERESTARTSYS)
++			dev_err(rdev->dev,
++				"object_init failed for (%lu, 0x%08X)\n",
++				size, domain);
+ 		return r;
+ 	}
+-	*robj_ptr = robj;
++	*bo_ptr = bo;
+ 	if (gobj) {
+-		list_add_tail(&robj->list, &rdev->gem.objects);
++		mutex_lock(&bo->rdev->gem.mutex);
++		list_add_tail(&bo->list, &rdev->gem.objects);
++		mutex_unlock(&bo->rdev->gem.mutex);
+ 	}
+ 	return 0;
+ }
+ 
+-int radeon_object_kmap(struct radeon_object *robj, void **ptr)
++int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
+ {
++	bool is_iomem;
+ 	int r;
+ 
+-	spin_lock(&robj->tobj.lock);
+-	if (robj->kptr) {
++	if (bo->kptr) {
+ 		if (ptr) {
+-			*ptr = robj->kptr;
++			*ptr = bo->kptr;
+ 		}
+-		spin_unlock(&robj->tobj.lock);
+ 		return 0;
+ 	}
+-	spin_unlock(&robj->tobj.lock);
+-	r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
++	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+ 	if (r) {
+ 		return r;
+ 	}
+-	spin_lock(&robj->tobj.lock);
+-	robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
+-	spin_unlock(&robj->tobj.lock);
++	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+ 	if (ptr) {
+-		*ptr = robj->kptr;
++		*ptr = bo->kptr;
+ 	}
+-	radeon_object_check_tiling(robj, 0, 0);
++	radeon_bo_check_tiling(bo, 0, 0);
+ 	return 0;
+ }
+ 
+-void radeon_object_kunmap(struct radeon_object *robj)
++void radeon_bo_kunmap(struct radeon_bo *bo)
+ {
+-	spin_lock(&robj->tobj.lock);
+-	if (robj->kptr == NULL) {
+-		spin_unlock(&robj->tobj.lock);
++	if (bo->kptr == NULL)
+ 		return;
+-	}
+-	robj->kptr = NULL;
+-	spin_unlock(&robj->tobj.lock);
+-	radeon_object_check_tiling(robj, 0, 0);
+-	ttm_bo_kunmap(&robj->kmap);
++	bo->kptr = NULL;
++	radeon_bo_check_tiling(bo, 0, 0);
++	ttm_bo_kunmap(&bo->kmap);
+ }
+ 
+-void radeon_object_unref(struct radeon_object **robj)
++void radeon_bo_unref(struct radeon_bo **bo)
+ {
+-	struct ttm_buffer_object *tobj;
++	struct ttm_buffer_object *tbo;
+ 
+-	if ((*robj) == NULL) {
++	if ((*bo) == NULL)
+ 		return;
+-	}
+-	tobj = &((*robj)->tobj);
+-	ttm_bo_unref(&tobj);
+-	if (tobj == NULL) {
+-		*robj = NULL;
+-	}
+-}
+-
+-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
+-{
+-	*offset = robj->tobj.addr_space_offset;
+-	return 0;
++	tbo = &((*bo)->tbo);
++	ttm_bo_unref(&tbo);
++	if (tbo == NULL)
++		*bo = NULL;
+ }
+ 
+-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
+-		      uint64_t *gpu_addr)
++int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
+ {
+-	uint32_t flags;
+-	uint32_t tmp;
+-	int r;
++	int r, i;
+ 
+-	flags = radeon_object_flags_from_domain(domain);
+-	spin_lock(&robj->tobj.lock);
+-	if (robj->pin_count) {
+-		robj->pin_count++;
+-		if (gpu_addr != NULL) {
+-			*gpu_addr = robj->gpu_addr;
+-		}
+-		spin_unlock(&robj->tobj.lock);
++	radeon_ttm_placement_from_domain(bo, domain);
++	if (bo->pin_count) {
++		bo->pin_count++;
++		if (gpu_addr)
++			*gpu_addr = radeon_bo_gpu_offset(bo);
+ 		return 0;
+ 	}
+-	spin_unlock(&robj->tobj.lock);
+-	r = radeon_object_reserve(robj, false);
+-	if (unlikely(r != 0)) {
+-		DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
+-		return r;
+-	}
+-	tmp = robj->tobj.mem.placement;
+-	ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
+-	robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
+-	r = ttm_buffer_object_validate(&robj->tobj,
+-				       robj->tobj.proposed_placement,
+-				       false, false);
+-	radeon_object_gpu_addr(robj);
+-	if (gpu_addr != NULL) {
+-		*gpu_addr = robj->gpu_addr;
+-	}
+-	robj->pin_count = 1;
+-	if (unlikely(r != 0)) {
+-		DRM_ERROR("radeon: failed to pin object.\n");
+-	}
+-	radeon_object_unreserve(robj);
++	radeon_ttm_placement_from_domain(bo, domain);
++	for (i = 0; i < bo->placement.num_placement; i++)
++		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
++	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
++	if (likely(r == 0)) {
++		bo->pin_count = 1;
++		if (gpu_addr != NULL)
++			*gpu_addr = radeon_bo_gpu_offset(bo);
++	}
++	if (unlikely(r != 0))
++		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
+ 	return r;
+ }
+ 
+-void radeon_object_unpin(struct radeon_object *robj)
++int radeon_bo_unpin(struct radeon_bo *bo)
+ {
+-	uint32_t flags;
+-	int r;
++	int r, i;
+ 
+-	spin_lock(&robj->tobj.lock);
+-	if (!robj->pin_count) {
+-		spin_unlock(&robj->tobj.lock);
+-		printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
+-		return;
+-	}
+-	robj->pin_count--;
+-	if (robj->pin_count) {
+-		spin_unlock(&robj->tobj.lock);
+-		return;
+-	}
+-	spin_unlock(&robj->tobj.lock);
+-	r = radeon_object_reserve(robj, false);
+-	if (unlikely(r != 0)) {
+-		DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
+-		return;
+-	}
+-	flags = robj->tobj.mem.placement;
+-	robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
+-	r = ttm_buffer_object_validate(&robj->tobj,
+-				       robj->tobj.proposed_placement,
+-				       false, false);
+-	if (unlikely(r != 0)) {
+-		DRM_ERROR("radeon: failed to unpin buffer.\n");
+-	}
+-	radeon_object_unreserve(robj);
+-}
+-
+-int radeon_object_wait(struct radeon_object *robj)
+-{
+-	int r = 0;
+-
+-	/* FIXME: should use block reservation instead */
+-	r = radeon_object_reserve(robj, true);
+-	if (unlikely(r != 0)) {
+-		DRM_ERROR("radeon: failed to reserve object for waiting.\n");
+-		return r;
+-	}
+-	spin_lock(&robj->tobj.lock);
+-	if (robj->tobj.sync_obj) {
+-		r = ttm_bo_wait(&robj->tobj, true, true, false);
+-	}
+-	spin_unlock(&robj->tobj.lock);
+-	radeon_object_unreserve(robj);
+-	return r;
+-}
+-
+-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
+-{
+-	int r = 0;
+-
+-	r = radeon_object_reserve(robj, true);
+-	if (unlikely(r != 0)) {
+-		DRM_ERROR("radeon: failed to reserve object for waiting.\n");
+-		return r;
+-	}
+-	spin_lock(&robj->tobj.lock);
+-	*cur_placement = robj->tobj.mem.mem_type;
+-	if (robj->tobj.sync_obj) {
+-		r = ttm_bo_wait(&robj->tobj, true, true, true);
++	if (!bo->pin_count) {
++		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
++		return 0;
+ 	}
+-	spin_unlock(&robj->tobj.lock);
+-	radeon_object_unreserve(robj);
++	bo->pin_count--;
++	if (bo->pin_count)
++		return 0;
++	for (i = 0; i < bo->placement.num_placement; i++)
++		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
++	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
++	if (unlikely(r != 0))
++		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
+ 	return r;
+ }
+ 
+-int radeon_object_evict_vram(struct radeon_device *rdev)
++int radeon_bo_evict_vram(struct radeon_device *rdev)
+ {
+-	if (rdev->flags & RADEON_IS_IGP) {
+-		/* Useless to evict on IGP chips */
+-		return 0;
++	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
++	if (0 && (rdev->flags & RADEON_IS_IGP)) {
++		if (rdev->mc.igp_sideport_enabled == false)
++			/* Useless to evict on IGP chips */
++			return 0;
+ 	}
+ 	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+ }
+ 
+-void radeon_object_force_delete(struct radeon_device *rdev)
++void radeon_bo_force_delete(struct radeon_device *rdev)
+ {
+-	struct radeon_object *robj, *n;
++	struct radeon_bo *bo, *n;
+ 	struct drm_gem_object *gobj;
+ 
+ 	if (list_empty(&rdev->gem.objects)) {
+ 		return;
+ 	}
+-	DRM_ERROR("Userspace still has active objects !\n");
+-	list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
++	dev_err(rdev->dev, "Userspace still has active objects !\n");
++	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+ 		mutex_lock(&rdev->ddev->struct_mutex);
+-		gobj = robj->gobj;
+-		DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
+-			  gobj, robj, (unsigned long)gobj->size,
+-			  *((unsigned long *)&gobj->refcount));
+-		list_del_init(&robj->list);
+-		radeon_object_unref(&robj);
++		gobj = bo->gobj;
++		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
++			gobj, bo, (unsigned long)gobj->size,
++			*((unsigned long *)&gobj->refcount));
++		mutex_lock(&bo->rdev->gem.mutex);
++		list_del_init(&bo->list);
++		mutex_unlock(&bo->rdev->gem.mutex);
++		radeon_bo_unref(&bo);
+ 		gobj->driver_private = NULL;
+ 		drm_gem_object_unreference(gobj);
+ 		mutex_unlock(&rdev->ddev->struct_mutex);
+ 	}
+ }
+ 
+-int radeon_object_init(struct radeon_device *rdev)
++int radeon_bo_init(struct radeon_device *rdev)
+ {
+ 	/* Add an MTRR for the VRAM */
+ 	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
+@@ -382,13 +267,13 @@ int radeon_object_init(struct radeon_device *rdev)
+ 	return radeon_ttm_init(rdev);
+ }
+ 
+-void radeon_object_fini(struct radeon_device *rdev)
++void radeon_bo_fini(struct radeon_device *rdev)
+ {
+ 	radeon_ttm_fini(rdev);
+ }
+ 
+-void radeon_object_list_add_object(struct radeon_object_list *lobj,
+-				   struct list_head *head)
++void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
++				struct list_head *head)
+ {
+ 	if (lobj->wdomain) {
+ 		list_add(&lobj->list, head);
+@@ -397,125 +282,102 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
+ 	}
+ }
+ 
+-int radeon_object_list_reserve(struct list_head *head)
++int radeon_bo_list_reserve(struct list_head *head)
+ {
+-	struct radeon_object_list *lobj;
++	struct radeon_bo_list *lobj;
+ 	int r;
+ 
+ 	list_for_each_entry(lobj, head, list){
+-		if (!lobj->robj->pin_count) {
+-			r = radeon_object_reserve(lobj->robj, true);
+-			if (unlikely(r != 0)) {
+-				DRM_ERROR("radeon: failed to reserve object.\n");
+-				return r;
+-			}
+-		} else {
+-		}
++		r = radeon_bo_reserve(lobj->bo, false);
++		if (unlikely(r != 0))
++			return r;
+ 	}
+ 	return 0;
+ }
+ 
+-void radeon_object_list_unreserve(struct list_head *head)
++void radeon_bo_list_unreserve(struct list_head *head)
+ {
+-	struct radeon_object_list *lobj;
++	struct radeon_bo_list *lobj;
+ 
+ 	list_for_each_entry(lobj, head, list) {
+-		if (!lobj->robj->pin_count) {
+-			radeon_object_unreserve(lobj->robj);
+-		}
++		/* only unreserve object we successfully reserved */
++		if (radeon_bo_is_reserved(lobj->bo))
++			radeon_bo_unreserve(lobj->bo);
+ 	}
+ }
+ 
+-int radeon_object_list_validate(struct list_head *head, void *fence)
++int radeon_bo_list_validate(struct list_head *head)
+ {
+-	struct radeon_object_list *lobj;
+-	struct radeon_object *robj;
+-	struct radeon_fence *old_fence = NULL;
++	struct radeon_bo_list *lobj;
++	struct radeon_bo *bo;
+ 	int r;
+ 
+-	r = radeon_object_list_reserve(head);
++	r = radeon_bo_list_reserve(head);
+ 	if (unlikely(r != 0)) {
+-		radeon_object_list_unreserve(head);
+ 		return r;
+ 	}
+ 	list_for_each_entry(lobj, head, list) {
+-		robj = lobj->robj;
+-		if (!robj->pin_count) {
++		bo = lobj->bo;
++		if (!bo->pin_count) {
+ 			if (lobj->wdomain) {
+-				robj->tobj.proposed_placement =
+-					radeon_object_flags_from_domain(lobj->wdomain);
++				radeon_ttm_placement_from_domain(bo,
++								lobj->wdomain);
+ 			} else {
+-				robj->tobj.proposed_placement =
+-					radeon_object_flags_from_domain(lobj->rdomain);
++				radeon_ttm_placement_from_domain(bo,
++								lobj->rdomain);
+ 			}
+-			r = ttm_buffer_object_validate(&robj->tobj,
+-						       robj->tobj.proposed_placement,
+-						       true, false);
+-			if (unlikely(r)) {
+-				DRM_ERROR("radeon: failed to validate.\n");
++			r = ttm_bo_validate(&bo->tbo, &bo->placement,
++						true, false);
++			if (unlikely(r))
+ 				return r;
+-			}
+-			radeon_object_gpu_addr(robj);
+-		}
+-		lobj->gpu_offset = robj->gpu_addr;
+-		lobj->tiling_flags = robj->tiling_flags;
+-		if (fence) {
+-			old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
+-			robj->tobj.sync_obj = radeon_fence_ref(fence);
+-			robj->tobj.sync_obj_arg = NULL;
+-		}
+-		if (old_fence) {
+-			radeon_fence_unref(&old_fence);
+ 		}
++		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
++		lobj->tiling_flags = bo->tiling_flags;
+ 	}
+ 	return 0;
+ }
+ 
+-void radeon_object_list_unvalidate(struct list_head *head)
++void radeon_bo_list_fence(struct list_head *head, void *fence)
+ {
+-	struct radeon_object_list *lobj;
++	struct radeon_bo_list *lobj;
++	struct radeon_bo *bo;
+ 	struct radeon_fence *old_fence = NULL;
+ 
+ 	list_for_each_entry(lobj, head, list) {
+-		old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
+-		lobj->robj->tobj.sync_obj = NULL;
++		bo = lobj->bo;
++		spin_lock(&bo->tbo.lock);
++		old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
++		bo->tbo.sync_obj = radeon_fence_ref(fence);
++		bo->tbo.sync_obj_arg = NULL;
++		spin_unlock(&bo->tbo.lock);
+ 		if (old_fence) {
+ 			radeon_fence_unref(&old_fence);
+ 		}
+ 	}
+-	radeon_object_list_unreserve(head);
+ }
+ 
+-void radeon_object_list_clean(struct list_head *head)
+-{
+-	radeon_object_list_unreserve(head);
+-}
+-
+-int radeon_object_fbdev_mmap(struct radeon_object *robj,
++int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+ 			     struct vm_area_struct *vma)
+ {
+-	return ttm_fbdev_mmap(vma, &robj->tobj);
+-}
+-
+-unsigned long radeon_object_size(struct radeon_object *robj)
+-{
+-	return robj->tobj.num_pages << PAGE_SHIFT;
++	return ttm_fbdev_mmap(vma, &bo->tbo);
+ }
+ 
+-int radeon_object_get_surface_reg(struct radeon_object *robj)
++int radeon_bo_get_surface_reg(struct radeon_bo *bo)
+ {
+-	struct radeon_device *rdev = robj->rdev;
++	struct radeon_device *rdev = bo->rdev;
+ 	struct radeon_surface_reg *reg;
+-	struct radeon_object *old_object;
++	struct radeon_bo *old_object;
+ 	int steal;
+ 	int i;
+ 
+-	if (!robj->tiling_flags)
++	BUG_ON(!atomic_read(&bo->tbo.reserved));
++
++	if (!bo->tiling_flags)
+ 		return 0;
+ 
+-	if (robj->surface_reg >= 0) {
+-		reg = &rdev->surface_regs[robj->surface_reg];
+-		i = robj->surface_reg;
++	if (bo->surface_reg >= 0) {
++		reg = &rdev->surface_regs[bo->surface_reg];
++		i = bo->surface_reg;
+ 		goto out;
+ 	}
+ 
+@@ -523,10 +385,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
+ 	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+ 
+ 		reg = &rdev->surface_regs[i];
+-		if (!reg->robj)
++		if (!reg->bo)
+ 			break;
+ 
+-		old_object = reg->robj;
++		old_object = reg->bo;
+ 		if (old_object->pin_count == 0)
+ 			steal = i;
+ 	}
+@@ -537,91 +399,107 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
+ 			return -ENOMEM;
+ 		/* find someone with a surface reg and nuke their BO */
+ 		reg = &rdev->surface_regs[steal];
+-		old_object = reg->robj;
++		old_object = reg->bo;
+ 		/* blow away the mapping */
+ 		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
+-		ttm_bo_unmap_virtual(&old_object->tobj);
++		ttm_bo_unmap_virtual(&old_object->tbo);
+ 		old_object->surface_reg = -1;
+ 		i = steal;
+ 	}
+ 
+-	robj->surface_reg = i;
+-	reg->robj = robj;
++	bo->surface_reg = i;
++	reg->bo = bo;
+ 
+ out:
+-	radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
+-			       robj->tobj.mem.mm_node->start << PAGE_SHIFT,
+-			       robj->tobj.num_pages << PAGE_SHIFT);
++	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
++			       bo->tbo.mem.mm_node->start << PAGE_SHIFT,
++			       bo->tbo.num_pages << PAGE_SHIFT);
+ 	return 0;
+ }
+ 
+-void radeon_object_clear_surface_reg(struct radeon_object *robj)
++static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
+ {
+-	struct radeon_device *rdev = robj->rdev;
++	struct radeon_device *rdev = bo->rdev;
+ 	struct radeon_surface_reg *reg;
+ 
+-	if (robj->surface_reg == -1)
++	if (bo->surface_reg == -1)
+ 		return;
+ 
+-	reg = &rdev->surface_regs[robj->surface_reg];
+-	radeon_clear_surface_reg(rdev, robj->surface_reg);
++	reg = &rdev->surface_regs[bo->surface_reg];
++	radeon_clear_surface_reg(rdev, bo->surface_reg);
+ 
+-	reg->robj = NULL;
+-	robj->surface_reg = -1;
++	reg->bo = NULL;
++	bo->surface_reg = -1;
+ }
+ 
+-void radeon_object_set_tiling_flags(struct radeon_object *robj,
+-				    uint32_t tiling_flags, uint32_t pitch)
++int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
++				uint32_t tiling_flags, uint32_t pitch)
+ {
+-	robj->tiling_flags = tiling_flags;
+-	robj->pitch = pitch;
++	int r;
++
++	r = radeon_bo_reserve(bo, false);
++	if (unlikely(r != 0))
++		return r;
++	bo->tiling_flags = tiling_flags;
++	bo->pitch = pitch;
++	radeon_bo_unreserve(bo);
++	return 0;
+ }
+ 
+-void radeon_object_get_tiling_flags(struct radeon_object *robj,
+-				    uint32_t *tiling_flags,
+-				    uint32_t *pitch)
++void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
++				uint32_t *tiling_flags,
++				uint32_t *pitch)
+ {
++	BUG_ON(!atomic_read(&bo->tbo.reserved));
+ 	if (tiling_flags)
+-		*tiling_flags = robj->tiling_flags;
++		*tiling_flags = bo->tiling_flags;
+ 	if (pitch)
+-		*pitch = robj->pitch;
++		*pitch = bo->pitch;
+ }
+ 
+-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
+-			       bool force_drop)
++int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
++				bool force_drop)
+ {
+-	if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
++	BUG_ON(!atomic_read(&bo->tbo.reserved));
++
++	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
+ 		return 0;
+ 
+ 	if (force_drop) {
+-		radeon_object_clear_surface_reg(robj);
++		radeon_bo_clear_surface_reg(bo);
+ 		return 0;
+ 	}
+ 
+-	if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
++	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
+ 		if (!has_moved)
+ 			return 0;
+ 
+-		if (robj->surface_reg >= 0)
+-			radeon_object_clear_surface_reg(robj);
++		if (bo->surface_reg >= 0)
++			radeon_bo_clear_surface_reg(bo);
+ 		return 0;
+ 	}
+ 
+-	if ((robj->surface_reg >= 0) && !has_moved)
++	if ((bo->surface_reg >= 0) && !has_moved)
+ 		return 0;
+ 
+-	return radeon_object_get_surface_reg(robj);
++	return radeon_bo_get_surface_reg(bo);
+ }
+ 
+ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+-			  struct ttm_mem_reg *mem)
++			   struct ttm_mem_reg *mem)
+ {
+-	struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
+-	radeon_object_check_tiling(robj, 0, 1);
++	struct radeon_bo *rbo;
++	if (!radeon_ttm_bo_is_radeon_bo(bo))
++		return;
++	rbo = container_of(bo, struct radeon_bo, tbo);
++	radeon_bo_check_tiling(rbo, 0, 1);
+ }
+ 
+ void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+ {
+-	struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
+-	radeon_object_check_tiling(robj, 0, 0);
++	struct radeon_bo *rbo;
++	if (!radeon_ttm_bo_is_radeon_bo(bo))
++		return;
++	rbo = container_of(bo, struct radeon_bo, tbo);
++	radeon_bo_check_tiling(rbo, 0, 0);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
+index 10e8af6..7ab43de 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.h
++++ b/drivers/gpu/drm/radeon/radeon_object.h
+@@ -28,19 +28,146 @@
+ #ifndef __RADEON_OBJECT_H__
+ #define __RADEON_OBJECT_H__
+ 
+-#include <ttm/ttm_bo_api.h>
+-#include <ttm/ttm_bo_driver.h>
+-#include <ttm/ttm_placement.h>
+-#include <ttm/ttm_module.h>
++#include <drm/radeon_drm.h>
++#include "radeon.h"
+ 
+-/*
+- * TTM.
++/**
++ * radeon_mem_type_to_domain - return domain corresponding to mem_type
++ * @mem_type:	ttm memory type
++ *
++ * Returns corresponding domain of the ttm mem_type
++ */
++static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
++{
++	switch (mem_type) {
++	case TTM_PL_VRAM:
++		return RADEON_GEM_DOMAIN_VRAM;
++	case TTM_PL_TT:
++		return RADEON_GEM_DOMAIN_GTT;
++	case TTM_PL_SYSTEM:
++		return RADEON_GEM_DOMAIN_CPU;
++	default:
++		break;
++	}
++	return 0;
++}
++
++/**
++ * radeon_bo_reserve - reserve bo
++ * @bo:		bo structure
++ * @no_wait:		don't sleep while trying to reserve (return -EBUSY)
++ *
++ * Returns:
++ * -EBUSY: buffer is busy and @no_wait is true
++ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
++ * a signal. Release all buffer reservations and return to user-space.
+  */
+-struct radeon_mman {
+-	struct ttm_bo_global_ref        bo_global_ref;
+-	struct ttm_global_reference	mem_global_ref;
+-	bool				mem_global_referenced;
+-	struct ttm_bo_device		bdev;
+-};
++static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
++{
++	int r;
++
++	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
++	if (unlikely(r != 0)) {
++		if (r != -ERESTARTSYS)
++			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
++		return r;
++	}
++	return 0;
++}
++
++static inline void radeon_bo_unreserve(struct radeon_bo *bo)
++{
++	ttm_bo_unreserve(&bo->tbo);
++}
++
++/**
++ * radeon_bo_gpu_offset - return GPU offset of bo
++ * @bo:	radeon object for which we query the offset
++ *
++ * Returns current GPU offset of the object.
++ *
++ * Note: object should either be pinned or reserved when calling this
++ * function, it might be usefull to add check for this for debugging.
++ */
++static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
++{
++	return bo->tbo.offset;
++}
++
++static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
++{
++	return bo->tbo.num_pages << PAGE_SHIFT;
++}
++
++static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
++{
++	return !!atomic_read(&bo->tbo.reserved);
++}
++
++/**
++ * radeon_bo_mmap_offset - return mmap offset of bo
++ * @bo:	radeon object for which we query the offset
++ *
++ * Returns mmap offset of the object.
++ *
++ * Note: addr_space_offset is constant after ttm bo init thus isn't protected
++ * by any lock.
++ */
++static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
++{
++	return bo->tbo.addr_space_offset;
++}
++
++static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
++					bool no_wait)
++{
++	int r;
++
++	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
++	if (unlikely(r != 0)) {
++		if (r != -ERESTARTSYS)
++			dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
++		return r;
++	}
++	spin_lock(&bo->tbo.lock);
++	if (mem_type)
++		*mem_type = bo->tbo.mem.mem_type;
++	if (bo->tbo.sync_obj)
++		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
++	spin_unlock(&bo->tbo.lock);
++	ttm_bo_unreserve(&bo->tbo);
++	return r;
++}
+ 
++extern int radeon_bo_create(struct radeon_device *rdev,
++				struct drm_gem_object *gobj, unsigned long size,
++				bool kernel, u32 domain,
++				struct radeon_bo **bo_ptr);
++extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
++extern void radeon_bo_kunmap(struct radeon_bo *bo);
++extern void radeon_bo_unref(struct radeon_bo **bo);
++extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
++extern int radeon_bo_unpin(struct radeon_bo *bo);
++extern int radeon_bo_evict_vram(struct radeon_device *rdev);
++extern void radeon_bo_force_delete(struct radeon_device *rdev);
++extern int radeon_bo_init(struct radeon_device *rdev);
++extern void radeon_bo_fini(struct radeon_device *rdev);
++extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
++				struct list_head *head);
++extern int radeon_bo_list_reserve(struct list_head *head);
++extern void radeon_bo_list_unreserve(struct list_head *head);
++extern int radeon_bo_list_validate(struct list_head *head);
++extern void radeon_bo_list_fence(struct list_head *head, void *fence);
++extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
++				struct vm_area_struct *vma);
++extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
++				u32 tiling_flags, u32 pitch);
++extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
++				u32 *tiling_flags, u32 *pitch);
++extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
++				bool force_drop);
++extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
++					struct ttm_mem_reg *mem);
++extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
++extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 46146c6..8bce64c 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -27,7 +27,7 @@ int radeon_debugfs_pm_init(struct radeon_device *rdev);
+ int radeon_pm_init(struct radeon_device *rdev)
+ {
+ 	if (radeon_debugfs_pm_init(rdev)) {
+-		DRM_ERROR("Failed to register debugfs file for CP !\n");
++		DRM_ERROR("Failed to register debugfs file for PM!\n");
+ 	}
+ 
+ 	return 0;
+@@ -44,8 +44,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
+ 	struct drm_device *dev = node->minor->dev;
+ 	struct radeon_device *rdev = dev->dev_private;
+ 
+-	seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev));
+-	seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev));
++	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
++	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
++	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
++	if (rdev->asic->get_memory_clock)
++		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
+index 29ab759..6d0a009 100644
+--- a/drivers/gpu/drm/radeon/radeon_reg.h
++++ b/drivers/gpu/drm/radeon/radeon_reg.h
+@@ -887,6 +887,7 @@
+ #       define RADEON_FP_PANEL_FORMAT          (1 <<  3)
+ #       define RADEON_FP_EN_TMDS               (1 <<  7)
+ #       define RADEON_FP_DETECT_SENSE          (1 <<  8)
++#       define RADEON_FP_DETECT_INT_POL        (1 <<  9)
+ #       define R200_FP_SOURCE_SEL_MASK         (3 <<  10)
+ #       define R200_FP_SOURCE_SEL_CRTC1        (0 <<  10)
+ #       define R200_FP_SOURCE_SEL_CRTC2        (1 <<  10)
+@@ -894,6 +895,7 @@
+ #       define R200_FP_SOURCE_SEL_TRANS        (3 <<  10)
+ #       define RADEON_FP_SEL_CRTC1             (0 << 13)
+ #       define RADEON_FP_SEL_CRTC2             (1 << 13)
++#       define R300_HPD_SEL(x)                 ((x) << 13)
+ #       define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
+ #       define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
+ #       define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
+@@ -909,6 +911,7 @@
+ #       define RADEON_FP2_ON                   (1 <<  2)
+ #       define RADEON_FP2_PANEL_FORMAT         (1 <<  3)
+ #       define RADEON_FP2_DETECT_SENSE         (1 <<  8)
++#       define RADEON_FP2_DETECT_INT_POL       (1 <<  9)
+ #       define R200_FP2_SOURCE_SEL_MASK        (3 << 10)
+ #       define R200_FP2_SOURCE_SEL_CRTC1       (0 << 10)
+ #       define R200_FP2_SOURCE_SEL_CRTC2       (1 << 10)
+@@ -988,14 +991,20 @@
+ 
+ #define RADEON_GEN_INT_CNTL                 0x0040
+ #	define RADEON_CRTC_VBLANK_MASK		(1 << 0)
++#	define RADEON_FP_DETECT_MASK		(1 << 4)
+ #	define RADEON_CRTC2_VBLANK_MASK		(1 << 9)
++#	define RADEON_FP2_DETECT_MASK		(1 << 10)
+ #	define RADEON_SW_INT_ENABLE		(1 << 25)
+ #define RADEON_GEN_INT_STATUS               0x0044
+ #	define AVIVO_DISPLAY_INT_STATUS		(1 << 0)
+ #	define RADEON_CRTC_VBLANK_STAT		(1 << 0)
+ #	define RADEON_CRTC_VBLANK_STAT_ACK	(1 << 0)
++#	define RADEON_FP_DETECT_STAT		(1 << 4)
++#	define RADEON_FP_DETECT_STAT_ACK	(1 << 4)
+ #	define RADEON_CRTC2_VBLANK_STAT		(1 << 9)
+ #	define RADEON_CRTC2_VBLANK_STAT_ACK	(1 << 9)
++#	define RADEON_FP2_DETECT_STAT		(1 << 10)
++#	define RADEON_FP2_DETECT_STAT_ACK	(1 << 10)
+ #	define RADEON_SW_INT_FIRE		(1 << 26)
+ #	define RADEON_SW_INT_TEST		(1 << 25)
+ #	define RADEON_SW_INT_TEST_ACK		(1 << 25)
+@@ -1051,20 +1060,25 @@
+ 
+        /* Multimedia I2C bus */
+ #define RADEON_I2C_CNTL_0		    0x0090
+-#define RADEON_I2C_DONE (1<<0)
+-#define RADEON_I2C_NACK (1<<1)
+-#define RADEON_I2C_HALT (1<<2)
+-#define RADEON_I2C_SOFT_RST (1<<5)
+-#define RADEON_I2C_DRIVE_EN (1<<6)
+-#define RADEON_I2C_DRIVE_SEL (1<<7)
+-#define RADEON_I2C_START (1<<8)
+-#define RADEON_I2C_STOP (1<<9)
+-#define RADEON_I2C_RECEIVE (1<<10)
+-#define RADEON_I2C_ABORT (1<<11)
+-#define RADEON_I2C_GO (1<<12)
++#define RADEON_I2C_DONE                     (1 << 0)
++#define RADEON_I2C_NACK                     (1 << 1)
++#define RADEON_I2C_HALT                     (1 << 2)
++#define RADEON_I2C_SOFT_RST                 (1 << 5)
++#define RADEON_I2C_DRIVE_EN                 (1 << 6)
++#define RADEON_I2C_DRIVE_SEL                (1 << 7)
++#define RADEON_I2C_START                    (1 << 8)
++#define RADEON_I2C_STOP                     (1 << 9)
++#define RADEON_I2C_RECEIVE                  (1 << 10)
++#define RADEON_I2C_ABORT                    (1 << 11)
++#define RADEON_I2C_GO                       (1 << 12)
++#define RADEON_I2C_PRESCALE_SHIFT           16
+ #define RADEON_I2C_CNTL_1                   0x0094
+-#define RADEON_I2C_SEL         (1<<16)
+-#define RADEON_I2C_EN          (1<<17)
++#define RADEON_I2C_DATA_COUNT_SHIFT         0
++#define RADEON_I2C_ADDR_COUNT_SHIFT         4
++#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT   8
++#define RADEON_I2C_SEL                      (1 << 16)
++#define RADEON_I2C_EN                       (1 << 17)
++#define RADEON_I2C_TIME_LIMIT_SHIFT         24
+ #define RADEON_I2C_DATA			    0x0098
+ 
+ #define RADEON_DVI_I2C_CNTL_0		    0x02e0
+@@ -1072,7 +1086,7 @@
+ #       define R200_SEL_DDC1                0 /* 0x60 - VGA_DDC */
+ #       define R200_SEL_DDC2                1 /* 0x64 - DVI_DDC */
+ #       define R200_SEL_DDC3                2 /* 0x68 - MONID_DDC */
+-#define RADEON_DVI_I2C_CNTL_1               0x02e4 /* ? */
++#define RADEON_DVI_I2C_CNTL_1               0x02e4
+ #define RADEON_DVI_I2C_DATA		    0x02e8
+ 
+ #define RADEON_INTERRUPT_LINE               0x0f3c /* PCI */
+@@ -1143,15 +1157,16 @@
+ #       define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
+ #       define RADEON_MC_MCLK_DYN_ENABLE    (1 << 14)
+ #       define RADEON_IO_MCLK_DYN_ENABLE    (1 << 15)
+-#define RADEON_LCD_GPIO_MASK                0x01a0
+-#define RADEON_GPIOPAD_EN                   0x01a0
+-#define RADEON_LCD_GPIO_Y_REG               0x01a4
+-#define RADEON_MDGPIO_A_REG                 0x01ac
+-#define RADEON_MDGPIO_EN_REG                0x01b0
+-#define RADEON_MDGPIO_MASK                  0x0198
++
+ #define RADEON_GPIOPAD_MASK                 0x0198
+ #define RADEON_GPIOPAD_A		    0x019c
+-#define RADEON_MDGPIO_Y_REG                 0x01b4
++#define RADEON_GPIOPAD_EN                   0x01a0
++#define RADEON_GPIOPAD_Y                    0x01a4
++#define RADEON_MDGPIO_MASK                  0x01a8
++#define RADEON_MDGPIO_A                     0x01ac
++#define RADEON_MDGPIO_EN                    0x01b0
++#define RADEON_MDGPIO_Y                     0x01b4
++
+ #define RADEON_MEM_ADDR_CONFIG              0x0148
+ #define RADEON_MEM_BASE                     0x0f10 /* PCI */
+ #define RADEON_MEM_CNTL                     0x0140
+@@ -1360,6 +1375,9 @@
+ #define RADEON_OVR_CLR                      0x0230
+ #define RADEON_OVR_WID_LEFT_RIGHT           0x0234
+ #define RADEON_OVR_WID_TOP_BOTTOM           0x0238
++#define RADEON_OVR2_CLR                     0x0330
++#define RADEON_OVR2_WID_LEFT_RIGHT          0x0334
++#define RADEON_OVR2_WID_TOP_BOTTOM          0x0338
+ 
+ /* first capture unit */
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
+index 747b4bf..6579eb4 100644
+--- a/drivers/gpu/drm/radeon/radeon_ring.c
++++ b/drivers/gpu/drm/radeon/radeon_ring.c
+@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
+ {
+ 	struct radeon_fence *fence;
+ 	struct radeon_ib *nib;
+-	unsigned long i;
+-	int r = 0;
++	int r = 0, i, c;
+ 
+ 	*ib = NULL;
+ 	r = radeon_fence_create(rdev, &fence);
+ 	if (r) {
+-		DRM_ERROR("failed to create fence for new IB\n");
++		dev_err(rdev->dev, "failed to create fence for new IB\n");
+ 		return r;
+ 	}
+ 	mutex_lock(&rdev->ib_pool.mutex);
+-	i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+-	if (i < RADEON_IB_POOL_SIZE) {
+-		set_bit(i, rdev->ib_pool.alloc_bm);
+-		rdev->ib_pool.ibs[i].length_dw = 0;
+-		*ib = &rdev->ib_pool.ibs[i];
+-		mutex_unlock(&rdev->ib_pool.mutex);
+-		goto out;
++	for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
++		i &= (RADEON_IB_POOL_SIZE - 1);
++		if (rdev->ib_pool.ibs[i].free) {
++			nib = &rdev->ib_pool.ibs[i];
++			break;
++		}
+ 	}
+-	if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
+-		/* we go do nothings here */
++	if (nib == NULL) {
++		/* This should never happen, it means we allocated all
++		 * IB and haven't scheduled one yet, return EBUSY to
++		 * userspace hoping that on ioctl recall we get better
++		 * luck
++		 */
++		dev_err(rdev->dev, "no free indirect buffer !\n");
+ 		mutex_unlock(&rdev->ib_pool.mutex);
+-		DRM_ERROR("all IB allocated none scheduled.\n");
+-		r = -EINVAL;
+-		goto out;
++		radeon_fence_unref(&fence);
++		return -EBUSY;
+ 	}
+-	/* get the first ib on the scheduled list */
+-	nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
+-			 struct radeon_ib, list);
+-	if (nib->fence == NULL) {
+-		/* we go do nothings here */
++	rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
++	nib->free = false;
++	if (nib->fence) {
+ 		mutex_unlock(&rdev->ib_pool.mutex);
+-		DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
+-		r = -EINVAL;
+-		goto out;
+-	}
+-	mutex_unlock(&rdev->ib_pool.mutex);
+-
+-	r = radeon_fence_wait(nib->fence, false);
+-	if (r) {
+-		DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
+-			  (unsigned long)nib->gpu_addr, nib->length_dw);
+-		DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
+-		goto out;
++		r = radeon_fence_wait(nib->fence, false);
++		if (r) {
++			dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
++				nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
++			mutex_lock(&rdev->ib_pool.mutex);
++			nib->free = true;
++			mutex_unlock(&rdev->ib_pool.mutex);
++			radeon_fence_unref(&fence);
++			return r;
++		}
++		mutex_lock(&rdev->ib_pool.mutex);
+ 	}
+ 	radeon_fence_unref(&nib->fence);
+-
++	nib->fence = fence;
+ 	nib->length_dw = 0;
+-
+-	/* scheduled list is accessed here */
+-	mutex_lock(&rdev->ib_pool.mutex);
+-	list_del(&nib->list);
+-	INIT_LIST_HEAD(&nib->list);
+ 	mutex_unlock(&rdev->ib_pool.mutex);
+-
+ 	*ib = nib;
+-out:
+-	if (r) {
+-		radeon_fence_unref(&fence);
+-	} else {
+-		(*ib)->fence = fence;
+-	}
+-	return r;
++	return 0;
+ }
+ 
+ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
+@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
+ 	if (tmp == NULL) {
+ 		return;
+ 	}
+-	mutex_lock(&rdev->ib_pool.mutex);
+-	if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
+-		/* IB is scheduled & not signaled don't do anythings */
+-		mutex_unlock(&rdev->ib_pool.mutex);
+-		return;
+-	}
+-	list_del(&tmp->list);
+-	INIT_LIST_HEAD(&tmp->list);
+-	if (tmp->fence)
++	if (!tmp->fence->emited)
+ 		radeon_fence_unref(&tmp->fence);
+-
+-	tmp->length_dw = 0;
+-	clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
++	mutex_lock(&rdev->ib_pool.mutex);
++	tmp->free = true;
+ 	mutex_unlock(&rdev->ib_pool.mutex);
+ }
+ 
+@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
+ 
+ 	if (!ib->length_dw || !rdev->cp.ready) {
+ 		/* TODO: Nothings in the ib we should report. */
+-		DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
++		DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
+ 		return -EINVAL;
+ 	}
+ 
+@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
+ 	radeon_ring_ib_execute(rdev, ib);
+ 	radeon_fence_emit(rdev, ib->fence);
+ 	mutex_lock(&rdev->ib_pool.mutex);
+-	list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
++	/* once scheduled IB is considered free and protected by the fence */
++	ib->free = true;
+ 	mutex_unlock(&rdev->ib_pool.mutex);
+ 	radeon_ring_unlock_commit(rdev);
+ 	return 0;
+@@ -164,20 +143,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
+ 	if (rdev->ib_pool.robj)
+ 		return 0;
+ 	/* Allocate 1M object buffer */
+-	INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
+-	r = radeon_object_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
+-				 true, RADEON_GEM_DOMAIN_GTT,
+-				 false, &rdev->ib_pool.robj);
++	r = radeon_bo_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
++				true, RADEON_GEM_DOMAIN_GTT,
++				&rdev->ib_pool.robj);
+ 	if (r) {
+ 		DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
+ 		return r;
+ 	}
+-	r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
++	r = radeon_bo_reserve(rdev->ib_pool.robj, false);
++	if (unlikely(r != 0))
++		return r;
++	r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+ 	if (r) {
++		radeon_bo_unreserve(rdev->ib_pool.robj);
+ 		DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
+ 		return r;
+ 	}
+-	r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
++	r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
++	radeon_bo_unreserve(rdev->ib_pool.robj);
+ 	if (r) {
+ 		DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
+ 		return r;
+@@ -190,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
+ 		rdev->ib_pool.ibs[i].ptr = ptr + offset;
+ 		rdev->ib_pool.ibs[i].idx = i;
+ 		rdev->ib_pool.ibs[i].length_dw = 0;
+-		INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
++		rdev->ib_pool.ibs[i].free = true;
+ 	}
+-	bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
++	rdev->ib_pool.head_id = 0;
+ 	rdev->ib_pool.ready = true;
+ 	DRM_INFO("radeon: ib pool ready.\n");
+ 	if (radeon_debugfs_ib_init(rdev)) {
+@@ -203,14 +186,20 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
+ 
+ void radeon_ib_pool_fini(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	if (!rdev->ib_pool.ready) {
+ 		return;
+ 	}
+ 	mutex_lock(&rdev->ib_pool.mutex);
+-	bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+ 	if (rdev->ib_pool.robj) {
+-		radeon_object_kunmap(rdev->ib_pool.robj);
+-		radeon_object_unref(&rdev->ib_pool.robj);
++		r = radeon_bo_reserve(rdev->ib_pool.robj, false);
++		if (likely(r == 0)) {
++			radeon_bo_kunmap(rdev->ib_pool.robj);
++			radeon_bo_unpin(rdev->ib_pool.robj);
++			radeon_bo_unreserve(rdev->ib_pool.robj);
++		}
++		radeon_bo_unref(&rdev->ib_pool.robj);
+ 		rdev->ib_pool.robj = NULL;
+ 	}
+ 	mutex_unlock(&rdev->ib_pool.mutex);
+@@ -288,29 +277,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
+ 	rdev->cp.ring_size = ring_size;
+ 	/* Allocate ring buffer */
+ 	if (rdev->cp.ring_obj == NULL) {
+-		r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
+-					 true,
+-					 RADEON_GEM_DOMAIN_GTT,
+-					 false,
+-					 &rdev->cp.ring_obj);
++		r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
++					RADEON_GEM_DOMAIN_GTT,
++					&rdev->cp.ring_obj);
+ 		if (r) {
+-			DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
+-			mutex_unlock(&rdev->cp.mutex);
++			dev_err(rdev->dev, "(%d) ring create failed\n", r);
+ 			return r;
+ 		}
+-		r = radeon_object_pin(rdev->cp.ring_obj,
+-				      RADEON_GEM_DOMAIN_GTT,
+-				      &rdev->cp.gpu_addr);
++		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
++		if (unlikely(r != 0))
++			return r;
++		r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
++					&rdev->cp.gpu_addr);
+ 		if (r) {
+-			DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
+-			mutex_unlock(&rdev->cp.mutex);
++			radeon_bo_unreserve(rdev->cp.ring_obj);
++			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
+ 			return r;
+ 		}
+-		r = radeon_object_kmap(rdev->cp.ring_obj,
++		r = radeon_bo_kmap(rdev->cp.ring_obj,
+ 				       (void **)&rdev->cp.ring);
++		radeon_bo_unreserve(rdev->cp.ring_obj);
+ 		if (r) {
+-			DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
+-			mutex_unlock(&rdev->cp.mutex);
++			dev_err(rdev->dev, "(%d) ring map failed\n", r);
+ 			return r;
+ 		}
+ 	}
+@@ -321,11 +309,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
+ 
+ void radeon_ring_fini(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	mutex_lock(&rdev->cp.mutex);
+ 	if (rdev->cp.ring_obj) {
+-		radeon_object_kunmap(rdev->cp.ring_obj);
+-		radeon_object_unpin(rdev->cp.ring_obj);
+-		radeon_object_unref(&rdev->cp.ring_obj);
++		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
++		if (likely(r == 0)) {
++			radeon_bo_kunmap(rdev->cp.ring_obj);
++			radeon_bo_unpin(rdev->cp.ring_obj);
++			radeon_bo_unreserve(rdev->cp.ring_obj);
++		}
++		radeon_bo_unref(&rdev->cp.ring_obj);
+ 		rdev->cp.ring = NULL;
+ 		rdev->cp.ring_obj = NULL;
+ 	}
+@@ -346,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+ 	if (ib == NULL) {
+ 		return 0;
+ 	}
+-	seq_printf(m, "IB %04lu\n", ib->idx);
++	seq_printf(m, "IB %04u\n", ib->idx);
+ 	seq_printf(m, "IB fence %p\n", ib->fence);
+ 	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
+ 	for (i = 0; i < ib->length_dw; i++) {
+diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
+index 38537d9..067167c 100644
+--- a/drivers/gpu/drm/radeon/radeon_state.c
++++ b/drivers/gpu/drm/radeon/radeon_state.c
+@@ -1950,7 +1950,7 @@ static void radeon_apply_surface_regs(int surf_index,
+  * Note that refcount can be at most 2, since during a free refcount=3
+  * might mean we have to allocate a new surface which might not always
+  * be available.
+- * For example : we allocate three contigous surfaces ABC. If B is
++ * For example : we allocate three contiguous surfaces ABC. If B is
+  * freed, we suddenly need two surfaces to store A and C, which might
+  * not always be available.
+  */
+diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
+index c8942ca..9f5e2f9 100644
+--- a/drivers/gpu/drm/radeon/radeon_test.c
++++ b/drivers/gpu/drm/radeon/radeon_test.c
+@@ -30,8 +30,8 @@
+ /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
+ void radeon_test_moves(struct radeon_device *rdev)
+ {
+-	struct radeon_object *vram_obj = NULL;
+-	struct radeon_object **gtt_obj = NULL;
++	struct radeon_bo *vram_obj = NULL;
++	struct radeon_bo **gtt_obj = NULL;
+ 	struct radeon_fence *fence = NULL;
+ 	uint64_t gtt_addr, vram_addr;
+ 	unsigned i, n, size;
+@@ -52,38 +52,42 @@ void radeon_test_moves(struct radeon_device *rdev)
+ 		goto out_cleanup;
+ 	}
+ 
+-	r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
+-				 false, &vram_obj);
++	r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
++				&vram_obj);
+ 	if (r) {
+ 		DRM_ERROR("Failed to create VRAM object\n");
+ 		goto out_cleanup;
+ 	}
+-
+-	r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
++	r = radeon_bo_reserve(vram_obj, false);
++	if (unlikely(r != 0))
++		goto out_cleanup;
++	r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
+ 	if (r) {
+ 		DRM_ERROR("Failed to pin VRAM object\n");
+ 		goto out_cleanup;
+ 	}
+-
+ 	for (i = 0; i < n; i++) {
+ 		void *gtt_map, *vram_map;
+ 		void **gtt_start, **gtt_end;
+ 		void **vram_start, **vram_end;
+ 
+-		r = radeon_object_create(rdev, NULL, size, true,
+-					 RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i);
++		r = radeon_bo_create(rdev, NULL, size, true,
++					 RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
+ 		if (r) {
+ 			DRM_ERROR("Failed to create GTT object %d\n", i);
+ 			goto out_cleanup;
+ 		}
+ 
+-		r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
++		r = radeon_bo_reserve(gtt_obj[i], false);
++		if (unlikely(r != 0))
++			goto out_cleanup;
++		r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
+ 		if (r) {
+ 			DRM_ERROR("Failed to pin GTT object %d\n", i);
+ 			goto out_cleanup;
+ 		}
+ 
+-		r = radeon_object_kmap(gtt_obj[i], &gtt_map);
++		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
+ 		if (r) {
+ 			DRM_ERROR("Failed to map GTT object %d\n", i);
+ 			goto out_cleanup;
+@@ -94,7 +98,7 @@ void radeon_test_moves(struct radeon_device *rdev)
+ 		     gtt_start++)
+ 			*gtt_start = gtt_start;
+ 
+-		radeon_object_kunmap(gtt_obj[i]);
++		radeon_bo_kunmap(gtt_obj[i]);
+ 
+ 		r = radeon_fence_create(rdev, &fence);
+ 		if (r) {
+@@ -116,7 +120,7 @@ void radeon_test_moves(struct radeon_device *rdev)
+ 
+ 		radeon_fence_unref(&fence);
+ 
+-		r = radeon_object_kmap(vram_obj, &vram_map);
++		r = radeon_bo_kmap(vram_obj, &vram_map);
+ 		if (r) {
+ 			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
+ 			goto out_cleanup;
+@@ -131,13 +135,13 @@ void radeon_test_moves(struct radeon_device *rdev)
+ 					  "expected 0x%p (GTT map 0x%p-0x%p)\n",
+ 					  i, *vram_start, gtt_start, gtt_map,
+ 					  gtt_end);
+-				radeon_object_kunmap(vram_obj);
++				radeon_bo_kunmap(vram_obj);
+ 				goto out_cleanup;
+ 			}
+ 			*vram_start = vram_start;
+ 		}
+ 
+-		radeon_object_kunmap(vram_obj);
++		radeon_bo_kunmap(vram_obj);
+ 
+ 		r = radeon_fence_create(rdev, &fence);
+ 		if (r) {
+@@ -159,7 +163,7 @@ void radeon_test_moves(struct radeon_device *rdev)
+ 
+ 		radeon_fence_unref(&fence);
+ 
+-		r = radeon_object_kmap(gtt_obj[i], &gtt_map);
++		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
+ 		if (r) {
+ 			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
+ 			goto out_cleanup;
+@@ -174,12 +178,12 @@ void radeon_test_moves(struct radeon_device *rdev)
+ 					  "expected 0x%p (VRAM map 0x%p-0x%p)\n",
+ 					  i, *gtt_start, vram_start, vram_map,
+ 					  vram_end);
+-				radeon_object_kunmap(gtt_obj[i]);
++				radeon_bo_kunmap(gtt_obj[i]);
+ 				goto out_cleanup;
+ 			}
+ 		}
+ 
+-		radeon_object_kunmap(gtt_obj[i]);
++		radeon_bo_kunmap(gtt_obj[i]);
+ 
+ 		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
+ 			 gtt_addr - rdev->mc.gtt_location);
+@@ -187,14 +191,20 @@ void radeon_test_moves(struct radeon_device *rdev)
+ 
+ out_cleanup:
+ 	if (vram_obj) {
+-		radeon_object_unpin(vram_obj);
+-		radeon_object_unref(&vram_obj);
++		if (radeon_bo_is_reserved(vram_obj)) {
++			radeon_bo_unpin(vram_obj);
++			radeon_bo_unreserve(vram_obj);
++		}
++		radeon_bo_unref(&vram_obj);
+ 	}
+ 	if (gtt_obj) {
+ 		for (i = 0; i < n; i++) {
+ 			if (gtt_obj[i]) {
+-				radeon_object_unpin(gtt_obj[i]);
+-				radeon_object_unref(&gtt_obj[i]);
++				if (radeon_bo_is_reserved(gtt_obj[i])) {
++					radeon_bo_unpin(gtt_obj[i]);
++					radeon_bo_unreserve(gtt_obj[i]);
++				}
++				radeon_bo_unref(&gtt_obj[i]);
+ 			}
+ 		}
+ 		kfree(gtt_obj);
+@@ -206,4 +216,3 @@ out_cleanup:
+ 		printk(KERN_WARNING "Error while testing BO move.\n");
+ 	}
+ }
+-
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 1381e06..58b5adf 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ 		man->default_caching = TTM_PL_FLAG_CACHED;
+ 		break;
+ 	case TTM_PL_TT:
+-		man->gpu_offset = 0;
++		man->gpu_offset = rdev->mc.gtt_location;
+ 		man->available_caching = TTM_PL_MASK_CACHING;
+ 		man->default_caching = TTM_PL_FLAG_CACHED;
+ 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
+@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ 		break;
+ 	case TTM_PL_VRAM:
+ 		/* "On-card" video ram */
+-		man->gpu_offset = 0;
++		man->gpu_offset = rdev->mc.vram_location;
+ 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ 			     TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
+ 			     TTM_MEMTYPE_FLAG_MAPPABLE;
+@@ -197,16 +197,34 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+ 	return 0;
+ }
+ 
+-static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
++static void radeon_evict_flags(struct ttm_buffer_object *bo,
++				struct ttm_placement *placement)
+ {
+-	uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
++	struct radeon_bo *rbo;
++	static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ 
++	if (!radeon_ttm_bo_is_radeon_bo(bo)) {
++		placement->fpfn = 0;
++		placement->lpfn = 0;
++		placement->placement = &placements;
++		placement->busy_placement = &placements;
++		placement->num_placement = 1;
++		placement->num_busy_placement = 1;
++		return;
++	}
++	rbo = container_of(bo, struct radeon_bo, tbo);
+ 	switch (bo->mem.mem_type) {
++	case TTM_PL_VRAM:
++		if (rbo->rdev->cp.ready == false)
++			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
++		else
++			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
++		break;
++	case TTM_PL_TT:
+ 	default:
+-		return (cur_placement & ~TTM_PL_MASK_CACHING) |
+-			TTM_PL_FLAG_SYSTEM |
+-			TTM_PL_FLAG_CACHED;
++		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
+ 	}
++	*placement = rbo->placement;
+ }
+ 
+ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+@@ -283,14 +301,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
+ 	struct radeon_device *rdev;
+ 	struct ttm_mem_reg *old_mem = &bo->mem;
+ 	struct ttm_mem_reg tmp_mem;
+-	uint32_t proposed_placement;
++	u32 placements;
++	struct ttm_placement placement;
+ 	int r;
+ 
+ 	rdev = radeon_get_rdev(bo->bdev);
+ 	tmp_mem = *new_mem;
+ 	tmp_mem.mm_node = NULL;
+-	proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+-	r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
++	placement.fpfn = 0;
++	placement.lpfn = 0;
++	placement.num_placement = 1;
++	placement.placement = &placements;
++	placement.num_busy_placement = 1;
++	placement.busy_placement = &placements;
++	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
++	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
+ 			     interruptible, no_wait);
+ 	if (unlikely(r)) {
+ 		return r;
+@@ -329,15 +354,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
+ 	struct radeon_device *rdev;
+ 	struct ttm_mem_reg *old_mem = &bo->mem;
+ 	struct ttm_mem_reg tmp_mem;
+-	uint32_t proposed_flags;
++	struct ttm_placement placement;
++	u32 placements;
+ 	int r;
+ 
+ 	rdev = radeon_get_rdev(bo->bdev);
+ 	tmp_mem = *new_mem;
+ 	tmp_mem.mm_node = NULL;
+-	proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
+-	r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
+-			     interruptible, no_wait);
++	placement.fpfn = 0;
++	placement.lpfn = 0;
++	placement.num_placement = 1;
++	placement.placement = &placements;
++	placement.num_busy_placement = 1;
++	placement.busy_placement = &placements;
++	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
++	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
+ 	if (unlikely(r)) {
+ 		return r;
+ 	}
+@@ -378,7 +409,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
+ 	     new_mem->mem_type == TTM_PL_SYSTEM) ||
+ 	    (old_mem->mem_type == TTM_PL_SYSTEM &&
+ 	     new_mem->mem_type == TTM_PL_TT)) {
+-		/* bind is enought */
++		/* bind is enough */
+ 		radeon_move_null(bo, new_mem);
+ 		return 0;
+ 	}
+@@ -407,18 +438,6 @@ memcpy:
+ 	return r;
+ }
+ 
+-const uint32_t radeon_mem_prios[] = {
+-	TTM_PL_VRAM,
+-	TTM_PL_TT,
+-	TTM_PL_SYSTEM,
+-};
+-
+-const uint32_t radeon_busy_prios[] = {
+-	TTM_PL_TT,
+-	TTM_PL_VRAM,
+-	TTM_PL_SYSTEM,
+-};
+-
+ static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
+ 				bool lazy, bool interruptible)
+ {
+@@ -446,10 +465,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
+ }
+ 
+ static struct ttm_bo_driver radeon_bo_driver = {
+-	.mem_type_prio = radeon_mem_prios,
+-	.mem_busy_prio = radeon_busy_prios,
+-	.num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
+-	.num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
+ 	.create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
+ 	.invalidate_caches = &radeon_invalidate_caches,
+ 	.init_mem_type = &radeon_init_mem_type,
+@@ -482,27 +497,32 @@ int radeon_ttm_init(struct radeon_device *rdev)
+ 		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+ 		return r;
+ 	}
+-	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
+-			   ((rdev->mc.real_vram_size) >> PAGE_SHIFT));
++	rdev->mman.initialized = true;
++	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
++				rdev->mc.real_vram_size >> PAGE_SHIFT);
+ 	if (r) {
+ 		DRM_ERROR("Failed initializing VRAM heap.\n");
+ 		return r;
+ 	}
+-	r = radeon_object_create(rdev, NULL, 256 * 1024, true,
+-				 RADEON_GEM_DOMAIN_VRAM, false,
+-				 &rdev->stollen_vga_memory);
++	r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
++				RADEON_GEM_DOMAIN_VRAM,
++				&rdev->stollen_vga_memory);
+ 	if (r) {
+ 		return r;
+ 	}
+-	r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
++	r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
++	if (r)
++		return r;
++	r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
++	radeon_bo_unreserve(rdev->stollen_vga_memory);
+ 	if (r) {
+-		radeon_object_unref(&rdev->stollen_vga_memory);
++		radeon_bo_unref(&rdev->stollen_vga_memory);
+ 		return r;
+ 	}
+ 	DRM_INFO("radeon: %uM of VRAM memory ready\n",
+ 		 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
+-	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
+-			   ((rdev->mc.gtt_size) >> PAGE_SHIFT));
++	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
++				rdev->mc.gtt_size >> PAGE_SHIFT);
+ 	if (r) {
+ 		DRM_ERROR("Failed initializing GTT heap.\n");
+ 		return r;
+@@ -523,15 +543,24 @@ int radeon_ttm_init(struct radeon_device *rdev)
+ 
+ void radeon_ttm_fini(struct radeon_device *rdev)
+ {
++	int r;
++
++	if (!rdev->mman.initialized)
++		return;
+ 	if (rdev->stollen_vga_memory) {
+-		radeon_object_unpin(rdev->stollen_vga_memory);
+-		radeon_object_unref(&rdev->stollen_vga_memory);
++		r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
++		if (r == 0) {
++			radeon_bo_unpin(rdev->stollen_vga_memory);
++			radeon_bo_unreserve(rdev->stollen_vga_memory);
++		}
++		radeon_bo_unref(&rdev->stollen_vga_memory);
+ 	}
+ 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+ 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
+ 	ttm_bo_device_release(&rdev->mman.bdev);
+ 	radeon_gart_fini(rdev);
+ 	radeon_ttm_global_fini(rdev);
++	rdev->mman.initialized = false;
+ 	DRM_INFO("radeon: ttm finalized\n");
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/reg_srcs/r200 b/drivers/gpu/drm/radeon/reg_srcs/r200
+index 6021c88..c29ac43 100644
+--- a/drivers/gpu/drm/radeon/reg_srcs/r200
++++ b/drivers/gpu/drm/radeon/reg_srcs/r200
+@@ -91,6 +91,8 @@ r200 0x3294
+ 0x22b8 SE_TCL_TEX_CYL_WRAP_CTL
+ 0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
+ 0x22c4 SE_TCL_POINT_SPRITE_CNTL
++0x22d0 SE_PVS_CNTL
++0x22d4 SE_PVS_CONST_CNTL
+ 0x2648 RE_POINTSIZE
+ 0x26c0 RE_TOP_LEFT
+ 0x26c4 RE_MISC
+diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
+new file mode 100644
+index 0000000..989f7a0
+--- /dev/null
++++ b/drivers/gpu/drm/radeon/reg_srcs/r420
+@@ -0,0 +1,795 @@
++r420 0x4f60
++0x1434 SRC_Y_X
++0x1438 DST_Y_X
++0x143C DST_HEIGHT_WIDTH
++0x146C DP_GUI_MASTER_CNTL
++0x1474 BRUSH_Y_X
++0x1478 DP_BRUSH_BKGD_CLR
++0x147C DP_BRUSH_FRGD_CLR
++0x1480 BRUSH_DATA0
++0x1484 BRUSH_DATA1
++0x1598 DST_WIDTH_HEIGHT
++0x15C0 CLR_CMP_CNTL
++0x15C4 CLR_CMP_CLR_SRC
++0x15C8 CLR_CMP_CLR_DST
++0x15CC CLR_CMP_MSK
++0x15D8 DP_SRC_FRGD_CLR
++0x15DC DP_SRC_BKGD_CLR
++0x1600 DST_LINE_START
++0x1604 DST_LINE_END
++0x1608 DST_LINE_PATCOUNT
++0x16C0 DP_CNTL
++0x16CC DP_WRITE_MSK
++0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
++0x16E8 DEFAULT_SC_BOTTOM_RIGHT
++0x16EC SC_TOP_LEFT
++0x16F0 SC_BOTTOM_RIGHT
++0x16F4 SRC_SC_BOTTOM_RIGHT
++0x1714 DSTCACHE_CTLSTAT
++0x1720 WAIT_UNTIL
++0x172C RBBM_GUICNTL
++0x1D98 VAP_VPORT_XSCALE
++0x1D9C VAP_VPORT_XOFFSET
++0x1DA0 VAP_VPORT_YSCALE
++0x1DA4 VAP_VPORT_YOFFSET
++0x1DA8 VAP_VPORT_ZSCALE
++0x1DAC VAP_VPORT_ZOFFSET
++0x2080 VAP_CNTL
++0x2090 VAP_OUT_VTX_FMT_0
++0x2094 VAP_OUT_VTX_FMT_1
++0x20B0 VAP_VTE_CNTL
++0x2138 VAP_VF_MIN_VTX_INDX
++0x2140 VAP_CNTL_STATUS
++0x2150 VAP_PROG_STREAM_CNTL_0
++0x2154 VAP_PROG_STREAM_CNTL_1
++0x2158 VAP_PROG_STREAM_CNTL_2
++0x215C VAP_PROG_STREAM_CNTL_3
++0x2160 VAP_PROG_STREAM_CNTL_4
++0x2164 VAP_PROG_STREAM_CNTL_5
++0x2168 VAP_PROG_STREAM_CNTL_6
++0x216C VAP_PROG_STREAM_CNTL_7
++0x2180 VAP_VTX_STATE_CNTL
++0x2184 VAP_VSM_VTX_ASSM
++0x2188 VAP_VTX_STATE_IND_REG_0
++0x218C VAP_VTX_STATE_IND_REG_1
++0x2190 VAP_VTX_STATE_IND_REG_2
++0x2194 VAP_VTX_STATE_IND_REG_3
++0x2198 VAP_VTX_STATE_IND_REG_4
++0x219C VAP_VTX_STATE_IND_REG_5
++0x21A0 VAP_VTX_STATE_IND_REG_6
++0x21A4 VAP_VTX_STATE_IND_REG_7
++0x21A8 VAP_VTX_STATE_IND_REG_8
++0x21AC VAP_VTX_STATE_IND_REG_9
++0x21B0 VAP_VTX_STATE_IND_REG_10
++0x21B4 VAP_VTX_STATE_IND_REG_11
++0x21B8 VAP_VTX_STATE_IND_REG_12
++0x21BC VAP_VTX_STATE_IND_REG_13
++0x21C0 VAP_VTX_STATE_IND_REG_14
++0x21C4 VAP_VTX_STATE_IND_REG_15
++0x21DC VAP_PSC_SGN_NORM_CNTL
++0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
++0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
++0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
++0x21EC VAP_PROG_STREAM_CNTL_EXT_3
++0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
++0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
++0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
++0x21FC VAP_PROG_STREAM_CNTL_EXT_7
++0x2200 VAP_PVS_VECTOR_INDX_REG
++0x2204 VAP_PVS_VECTOR_DATA_REG
++0x2208 VAP_PVS_VECTOR_DATA_REG_128
++0x221C VAP_CLIP_CNTL
++0x2220 VAP_GB_VERT_CLIP_ADJ
++0x2224 VAP_GB_VERT_DISC_ADJ
++0x2228 VAP_GB_HORZ_CLIP_ADJ
++0x222C VAP_GB_HORZ_DISC_ADJ
++0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
++0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
++0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
++0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
++0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
++0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
++0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
++0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
++0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
++0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
++0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
++0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
++0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
++0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
++0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
++0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
++0x2284 VAP_PVS_STATE_FLUSH_REG
++0x2288 VAP_PVS_VTX_TIMEOUT_REG
++0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
++0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
++0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
++0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
++0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
++0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
++0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
++0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
++0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
++0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
++0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
++0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
++0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
++0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
++0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
++0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
++0x22D0 VAP_PVS_CODE_CNTL_0
++0x22D4 VAP_PVS_CONST_CNTL
++0x22D8 VAP_PVS_CODE_CNTL_1
++0x22DC VAP_PVS_FLOW_CNTL_OPC
++0x342C RB2D_DSTCACHE_CTLSTAT
++0x4000 GB_VAP_RASTER_VTX_FMT_0
++0x4004 GB_VAP_RASTER_VTX_FMT_1
++0x4008 GB_ENABLE
++0x401C GB_SELECT
++0x4020 GB_AA_CONFIG
++0x4024 GB_FIFO_SIZE
++0x4100 TX_INVALTAGS
++0x4200 GA_POINT_S0
++0x4204 GA_POINT_T0
++0x4208 GA_POINT_S1
++0x420C GA_POINT_T1
++0x4214 GA_TRIANGLE_STIPPLE
++0x421C GA_POINT_SIZE
++0x4230 GA_POINT_MINMAX
++0x4234 GA_LINE_CNTL
++0x4238 GA_LINE_STIPPLE_CONFIG
++0x4260 GA_LINE_STIPPLE_VALUE
++0x4264 GA_LINE_S0
++0x4268 GA_LINE_S1
++0x4278 GA_COLOR_CONTROL
++0x427C GA_SOLID_RG
++0x4280 GA_SOLID_BA
++0x4288 GA_POLY_MODE
++0x428C GA_ROUND_MODE
++0x4290 GA_OFFSET
++0x4294 GA_FOG_SCALE
++0x4298 GA_FOG_OFFSET
++0x42A0 SU_TEX_WRAP
++0x42A4 SU_POLY_OFFSET_FRONT_SCALE
++0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
++0x42AC SU_POLY_OFFSET_BACK_SCALE
++0x42B0 SU_POLY_OFFSET_BACK_OFFSET
++0x42B4 SU_POLY_OFFSET_ENABLE
++0x42B8 SU_CULL_MODE
++0x42C0 SU_DEPTH_SCALE
++0x42C4 SU_DEPTH_OFFSET
++0x42C8 SU_REG_DEST
++0x4300 RS_COUNT
++0x4304 RS_INST_COUNT
++0x4310 RS_IP_0
++0x4314 RS_IP_1
++0x4318 RS_IP_2
++0x431C RS_IP_3
++0x4320 RS_IP_4
++0x4324 RS_IP_5
++0x4328 RS_IP_6
++0x432C RS_IP_7
++0x4330 RS_INST_0
++0x4334 RS_INST_1
++0x4338 RS_INST_2
++0x433C RS_INST_3
++0x4340 RS_INST_4
++0x4344 RS_INST_5
++0x4348 RS_INST_6
++0x434C RS_INST_7
++0x4350 RS_INST_8
++0x4354 RS_INST_9
++0x4358 RS_INST_10
++0x435C RS_INST_11
++0x4360 RS_INST_12
++0x4364 RS_INST_13
++0x4368 RS_INST_14
++0x436C RS_INST_15
++0x43A4 SC_HYPERZ_EN
++0x43A8 SC_EDGERULE
++0x43B0 SC_CLIP_0_A
++0x43B4 SC_CLIP_0_B
++0x43B8 SC_CLIP_1_A
++0x43BC SC_CLIP_1_B
++0x43C0 SC_CLIP_2_A
++0x43C4 SC_CLIP_2_B
++0x43C8 SC_CLIP_3_A
++0x43CC SC_CLIP_3_B
++0x43D0 SC_CLIP_RULE
++0x43E0 SC_SCISSOR0
++0x43E8 SC_SCREENDOOR
++0x4440 TX_FILTER1_0
++0x4444 TX_FILTER1_1
++0x4448 TX_FILTER1_2
++0x444C TX_FILTER1_3
++0x4450 TX_FILTER1_4
++0x4454 TX_FILTER1_5
++0x4458 TX_FILTER1_6
++0x445C TX_FILTER1_7
++0x4460 TX_FILTER1_8
++0x4464 TX_FILTER1_9
++0x4468 TX_FILTER1_10
++0x446C TX_FILTER1_11
++0x4470 TX_FILTER1_12
++0x4474 TX_FILTER1_13
++0x4478 TX_FILTER1_14
++0x447C TX_FILTER1_15
++0x4580 TX_CHROMA_KEY_0
++0x4584 TX_CHROMA_KEY_1
++0x4588 TX_CHROMA_KEY_2
++0x458C TX_CHROMA_KEY_3
++0x4590 TX_CHROMA_KEY_4
++0x4594 TX_CHROMA_KEY_5
++0x4598 TX_CHROMA_KEY_6
++0x459C TX_CHROMA_KEY_7
++0x45A0 TX_CHROMA_KEY_8
++0x45A4 TX_CHROMA_KEY_9
++0x45A8 TX_CHROMA_KEY_10
++0x45AC TX_CHROMA_KEY_11
++0x45B0 TX_CHROMA_KEY_12
++0x45B4 TX_CHROMA_KEY_13
++0x45B8 TX_CHROMA_KEY_14
++0x45BC TX_CHROMA_KEY_15
++0x45C0 TX_BORDER_COLOR_0
++0x45C4 TX_BORDER_COLOR_1
++0x45C8 TX_BORDER_COLOR_2
++0x45CC TX_BORDER_COLOR_3
++0x45D0 TX_BORDER_COLOR_4
++0x45D4 TX_BORDER_COLOR_5
++0x45D8 TX_BORDER_COLOR_6
++0x45DC TX_BORDER_COLOR_7
++0x45E0 TX_BORDER_COLOR_8
++0x45E4 TX_BORDER_COLOR_9
++0x45E8 TX_BORDER_COLOR_10
++0x45EC TX_BORDER_COLOR_11
++0x45F0 TX_BORDER_COLOR_12
++0x45F4 TX_BORDER_COLOR_13
++0x45F8 TX_BORDER_COLOR_14
++0x45FC TX_BORDER_COLOR_15
++0x4600 US_CONFIG
++0x4604 US_PIXSIZE
++0x4608 US_CODE_OFFSET
++0x460C US_RESET
++0x4610 US_CODE_ADDR_0
++0x4614 US_CODE_ADDR_1
++0x4618 US_CODE_ADDR_2
++0x461C US_CODE_ADDR_3
++0x4620 US_TEX_INST_0
++0x4624 US_TEX_INST_1
++0x4628 US_TEX_INST_2
++0x462C US_TEX_INST_3
++0x4630 US_TEX_INST_4
++0x4634 US_TEX_INST_5
++0x4638 US_TEX_INST_6
++0x463C US_TEX_INST_7
++0x4640 US_TEX_INST_8
++0x4644 US_TEX_INST_9
++0x4648 US_TEX_INST_10
++0x464C US_TEX_INST_11
++0x4650 US_TEX_INST_12
++0x4654 US_TEX_INST_13
++0x4658 US_TEX_INST_14
++0x465C US_TEX_INST_15
++0x4660 US_TEX_INST_16
++0x4664 US_TEX_INST_17
++0x4668 US_TEX_INST_18
++0x466C US_TEX_INST_19
++0x4670 US_TEX_INST_20
++0x4674 US_TEX_INST_21
++0x4678 US_TEX_INST_22
++0x467C US_TEX_INST_23
++0x4680 US_TEX_INST_24
++0x4684 US_TEX_INST_25
++0x4688 US_TEX_INST_26
++0x468C US_TEX_INST_27
++0x4690 US_TEX_INST_28
++0x4694 US_TEX_INST_29
++0x4698 US_TEX_INST_30
++0x469C US_TEX_INST_31
++0x46A4 US_OUT_FMT_0
++0x46A8 US_OUT_FMT_1
++0x46AC US_OUT_FMT_2
++0x46B0 US_OUT_FMT_3
++0x46B4 US_W_FMT
++0x46B8 US_CODE_BANK
++0x46BC US_CODE_EXT
++0x46C0 US_ALU_RGB_ADDR_0
++0x46C4 US_ALU_RGB_ADDR_1
++0x46C8 US_ALU_RGB_ADDR_2
++0x46CC US_ALU_RGB_ADDR_3
++0x46D0 US_ALU_RGB_ADDR_4
++0x46D4 US_ALU_RGB_ADDR_5
++0x46D8 US_ALU_RGB_ADDR_6
++0x46DC US_ALU_RGB_ADDR_7
++0x46E0 US_ALU_RGB_ADDR_8
++0x46E4 US_ALU_RGB_ADDR_9
++0x46E8 US_ALU_RGB_ADDR_10
++0x46EC US_ALU_RGB_ADDR_11
++0x46F0 US_ALU_RGB_ADDR_12
++0x46F4 US_ALU_RGB_ADDR_13
++0x46F8 US_ALU_RGB_ADDR_14
++0x46FC US_ALU_RGB_ADDR_15
++0x4700 US_ALU_RGB_ADDR_16
++0x4704 US_ALU_RGB_ADDR_17
++0x4708 US_ALU_RGB_ADDR_18
++0x470C US_ALU_RGB_ADDR_19
++0x4710 US_ALU_RGB_ADDR_20
++0x4714 US_ALU_RGB_ADDR_21
++0x4718 US_ALU_RGB_ADDR_22
++0x471C US_ALU_RGB_ADDR_23
++0x4720 US_ALU_RGB_ADDR_24
++0x4724 US_ALU_RGB_ADDR_25
++0x4728 US_ALU_RGB_ADDR_26
++0x472C US_ALU_RGB_ADDR_27
++0x4730 US_ALU_RGB_ADDR_28
++0x4734 US_ALU_RGB_ADDR_29
++0x4738 US_ALU_RGB_ADDR_30
++0x473C US_ALU_RGB_ADDR_31
++0x4740 US_ALU_RGB_ADDR_32
++0x4744 US_ALU_RGB_ADDR_33
++0x4748 US_ALU_RGB_ADDR_34
++0x474C US_ALU_RGB_ADDR_35
++0x4750 US_ALU_RGB_ADDR_36
++0x4754 US_ALU_RGB_ADDR_37
++0x4758 US_ALU_RGB_ADDR_38
++0x475C US_ALU_RGB_ADDR_39
++0x4760 US_ALU_RGB_ADDR_40
++0x4764 US_ALU_RGB_ADDR_41
++0x4768 US_ALU_RGB_ADDR_42
++0x476C US_ALU_RGB_ADDR_43
++0x4770 US_ALU_RGB_ADDR_44
++0x4774 US_ALU_RGB_ADDR_45
++0x4778 US_ALU_RGB_ADDR_46
++0x477C US_ALU_RGB_ADDR_47
++0x4780 US_ALU_RGB_ADDR_48
++0x4784 US_ALU_RGB_ADDR_49
++0x4788 US_ALU_RGB_ADDR_50
++0x478C US_ALU_RGB_ADDR_51
++0x4790 US_ALU_RGB_ADDR_52
++0x4794 US_ALU_RGB_ADDR_53
++0x4798 US_ALU_RGB_ADDR_54
++0x479C US_ALU_RGB_ADDR_55
++0x47A0 US_ALU_RGB_ADDR_56
++0x47A4 US_ALU_RGB_ADDR_57
++0x47A8 US_ALU_RGB_ADDR_58
++0x47AC US_ALU_RGB_ADDR_59
++0x47B0 US_ALU_RGB_ADDR_60
++0x47B4 US_ALU_RGB_ADDR_61
++0x47B8 US_ALU_RGB_ADDR_62
++0x47BC US_ALU_RGB_ADDR_63
++0x47C0 US_ALU_ALPHA_ADDR_0
++0x47C4 US_ALU_ALPHA_ADDR_1
++0x47C8 US_ALU_ALPHA_ADDR_2
++0x47CC US_ALU_ALPHA_ADDR_3
++0x47D0 US_ALU_ALPHA_ADDR_4
++0x47D4 US_ALU_ALPHA_ADDR_5
++0x47D8 US_ALU_ALPHA_ADDR_6
++0x47DC US_ALU_ALPHA_ADDR_7
++0x47E0 US_ALU_ALPHA_ADDR_8
++0x47E4 US_ALU_ALPHA_ADDR_9
++0x47E8 US_ALU_ALPHA_ADDR_10
++0x47EC US_ALU_ALPHA_ADDR_11
++0x47F0 US_ALU_ALPHA_ADDR_12
++0x47F4 US_ALU_ALPHA_ADDR_13
++0x47F8 US_ALU_ALPHA_ADDR_14
++0x47FC US_ALU_ALPHA_ADDR_15
++0x4800 US_ALU_ALPHA_ADDR_16
++0x4804 US_ALU_ALPHA_ADDR_17
++0x4808 US_ALU_ALPHA_ADDR_18
++0x480C US_ALU_ALPHA_ADDR_19
++0x4810 US_ALU_ALPHA_ADDR_20
++0x4814 US_ALU_ALPHA_ADDR_21
++0x4818 US_ALU_ALPHA_ADDR_22
++0x481C US_ALU_ALPHA_ADDR_23
++0x4820 US_ALU_ALPHA_ADDR_24
++0x4824 US_ALU_ALPHA_ADDR_25
++0x4828 US_ALU_ALPHA_ADDR_26
++0x482C US_ALU_ALPHA_ADDR_27
++0x4830 US_ALU_ALPHA_ADDR_28
++0x4834 US_ALU_ALPHA_ADDR_29
++0x4838 US_ALU_ALPHA_ADDR_30
++0x483C US_ALU_ALPHA_ADDR_31
++0x4840 US_ALU_ALPHA_ADDR_32
++0x4844 US_ALU_ALPHA_ADDR_33
++0x4848 US_ALU_ALPHA_ADDR_34
++0x484C US_ALU_ALPHA_ADDR_35
++0x4850 US_ALU_ALPHA_ADDR_36
++0x4854 US_ALU_ALPHA_ADDR_37
++0x4858 US_ALU_ALPHA_ADDR_38
++0x485C US_ALU_ALPHA_ADDR_39
++0x4860 US_ALU_ALPHA_ADDR_40
++0x4864 US_ALU_ALPHA_ADDR_41
++0x4868 US_ALU_ALPHA_ADDR_42
++0x486C US_ALU_ALPHA_ADDR_43
++0x4870 US_ALU_ALPHA_ADDR_44
++0x4874 US_ALU_ALPHA_ADDR_45
++0x4878 US_ALU_ALPHA_ADDR_46
++0x487C US_ALU_ALPHA_ADDR_47
++0x4880 US_ALU_ALPHA_ADDR_48
++0x4884 US_ALU_ALPHA_ADDR_49
++0x4888 US_ALU_ALPHA_ADDR_50
++0x488C US_ALU_ALPHA_ADDR_51
++0x4890 US_ALU_ALPHA_ADDR_52
++0x4894 US_ALU_ALPHA_ADDR_53
++0x4898 US_ALU_ALPHA_ADDR_54
++0x489C US_ALU_ALPHA_ADDR_55
++0x48A0 US_ALU_ALPHA_ADDR_56
++0x48A4 US_ALU_ALPHA_ADDR_57
++0x48A8 US_ALU_ALPHA_ADDR_58
++0x48AC US_ALU_ALPHA_ADDR_59
++0x48B0 US_ALU_ALPHA_ADDR_60
++0x48B4 US_ALU_ALPHA_ADDR_61
++0x48B8 US_ALU_ALPHA_ADDR_62
++0x48BC US_ALU_ALPHA_ADDR_63
++0x48C0 US_ALU_RGB_INST_0
++0x48C4 US_ALU_RGB_INST_1
++0x48C8 US_ALU_RGB_INST_2
++0x48CC US_ALU_RGB_INST_3
++0x48D0 US_ALU_RGB_INST_4
++0x48D4 US_ALU_RGB_INST_5
++0x48D8 US_ALU_RGB_INST_6
++0x48DC US_ALU_RGB_INST_7
++0x48E0 US_ALU_RGB_INST_8
++0x48E4 US_ALU_RGB_INST_9
++0x48E8 US_ALU_RGB_INST_10
++0x48EC US_ALU_RGB_INST_11
++0x48F0 US_ALU_RGB_INST_12
++0x48F4 US_ALU_RGB_INST_13
++0x48F8 US_ALU_RGB_INST_14
++0x48FC US_ALU_RGB_INST_15
++0x4900 US_ALU_RGB_INST_16
++0x4904 US_ALU_RGB_INST_17
++0x4908 US_ALU_RGB_INST_18
++0x490C US_ALU_RGB_INST_19
++0x4910 US_ALU_RGB_INST_20
++0x4914 US_ALU_RGB_INST_21
++0x4918 US_ALU_RGB_INST_22
++0x491C US_ALU_RGB_INST_23
++0x4920 US_ALU_RGB_INST_24
++0x4924 US_ALU_RGB_INST_25
++0x4928 US_ALU_RGB_INST_26
++0x492C US_ALU_RGB_INST_27
++0x4930 US_ALU_RGB_INST_28
++0x4934 US_ALU_RGB_INST_29
++0x4938 US_ALU_RGB_INST_30
++0x493C US_ALU_RGB_INST_31
++0x4940 US_ALU_RGB_INST_32
++0x4944 US_ALU_RGB_INST_33
++0x4948 US_ALU_RGB_INST_34
++0x494C US_ALU_RGB_INST_35
++0x4950 US_ALU_RGB_INST_36
++0x4954 US_ALU_RGB_INST_37
++0x4958 US_ALU_RGB_INST_38
++0x495C US_ALU_RGB_INST_39
++0x4960 US_ALU_RGB_INST_40
++0x4964 US_ALU_RGB_INST_41
++0x4968 US_ALU_RGB_INST_42
++0x496C US_ALU_RGB_INST_43
++0x4970 US_ALU_RGB_INST_44
++0x4974 US_ALU_RGB_INST_45
++0x4978 US_ALU_RGB_INST_46
++0x497C US_ALU_RGB_INST_47
++0x4980 US_ALU_RGB_INST_48
++0x4984 US_ALU_RGB_INST_49
++0x4988 US_ALU_RGB_INST_50
++0x498C US_ALU_RGB_INST_51
++0x4990 US_ALU_RGB_INST_52
++0x4994 US_ALU_RGB_INST_53
++0x4998 US_ALU_RGB_INST_54
++0x499C US_ALU_RGB_INST_55
++0x49A0 US_ALU_RGB_INST_56
++0x49A4 US_ALU_RGB_INST_57
++0x49A8 US_ALU_RGB_INST_58
++0x49AC US_ALU_RGB_INST_59
++0x49B0 US_ALU_RGB_INST_60
++0x49B4 US_ALU_RGB_INST_61
++0x49B8 US_ALU_RGB_INST_62
++0x49BC US_ALU_RGB_INST_63
++0x49C0 US_ALU_ALPHA_INST_0
++0x49C4 US_ALU_ALPHA_INST_1
++0x49C8 US_ALU_ALPHA_INST_2
++0x49CC US_ALU_ALPHA_INST_3
++0x49D0 US_ALU_ALPHA_INST_4
++0x49D4 US_ALU_ALPHA_INST_5
++0x49D8 US_ALU_ALPHA_INST_6
++0x49DC US_ALU_ALPHA_INST_7
++0x49E0 US_ALU_ALPHA_INST_8
++0x49E4 US_ALU_ALPHA_INST_9
++0x49E8 US_ALU_ALPHA_INST_10
++0x49EC US_ALU_ALPHA_INST_11
++0x49F0 US_ALU_ALPHA_INST_12
++0x49F4 US_ALU_ALPHA_INST_13
++0x49F8 US_ALU_ALPHA_INST_14
++0x49FC US_ALU_ALPHA_INST_15
++0x4A00 US_ALU_ALPHA_INST_16
++0x4A04 US_ALU_ALPHA_INST_17
++0x4A08 US_ALU_ALPHA_INST_18
++0x4A0C US_ALU_ALPHA_INST_19
++0x4A10 US_ALU_ALPHA_INST_20
++0x4A14 US_ALU_ALPHA_INST_21
++0x4A18 US_ALU_ALPHA_INST_22
++0x4A1C US_ALU_ALPHA_INST_23
++0x4A20 US_ALU_ALPHA_INST_24
++0x4A24 US_ALU_ALPHA_INST_25
++0x4A28 US_ALU_ALPHA_INST_26
++0x4A2C US_ALU_ALPHA_INST_27
++0x4A30 US_ALU_ALPHA_INST_28
++0x4A34 US_ALU_ALPHA_INST_29
++0x4A38 US_ALU_ALPHA_INST_30
++0x4A3C US_ALU_ALPHA_INST_31
++0x4A40 US_ALU_ALPHA_INST_32
++0x4A44 US_ALU_ALPHA_INST_33
++0x4A48 US_ALU_ALPHA_INST_34
++0x4A4C US_ALU_ALPHA_INST_35
++0x4A50 US_ALU_ALPHA_INST_36
++0x4A54 US_ALU_ALPHA_INST_37
++0x4A58 US_ALU_ALPHA_INST_38
++0x4A5C US_ALU_ALPHA_INST_39
++0x4A60 US_ALU_ALPHA_INST_40
++0x4A64 US_ALU_ALPHA_INST_41
++0x4A68 US_ALU_ALPHA_INST_42
++0x4A6C US_ALU_ALPHA_INST_43
++0x4A70 US_ALU_ALPHA_INST_44
++0x4A74 US_ALU_ALPHA_INST_45
++0x4A78 US_ALU_ALPHA_INST_46
++0x4A7C US_ALU_ALPHA_INST_47
++0x4A80 US_ALU_ALPHA_INST_48
++0x4A84 US_ALU_ALPHA_INST_49
++0x4A88 US_ALU_ALPHA_INST_50
++0x4A8C US_ALU_ALPHA_INST_51
++0x4A90 US_ALU_ALPHA_INST_52
++0x4A94 US_ALU_ALPHA_INST_53
++0x4A98 US_ALU_ALPHA_INST_54
++0x4A9C US_ALU_ALPHA_INST_55
++0x4AA0 US_ALU_ALPHA_INST_56
++0x4AA4 US_ALU_ALPHA_INST_57
++0x4AA8 US_ALU_ALPHA_INST_58
++0x4AAC US_ALU_ALPHA_INST_59
++0x4AB0 US_ALU_ALPHA_INST_60
++0x4AB4 US_ALU_ALPHA_INST_61
++0x4AB8 US_ALU_ALPHA_INST_62
++0x4ABC US_ALU_ALPHA_INST_63
++0x4AC0 US_ALU_EXT_ADDR_0
++0x4AC4 US_ALU_EXT_ADDR_1
++0x4AC8 US_ALU_EXT_ADDR_2
++0x4ACC US_ALU_EXT_ADDR_3
++0x4AD0 US_ALU_EXT_ADDR_4
++0x4AD4 US_ALU_EXT_ADDR_5
++0x4AD8 US_ALU_EXT_ADDR_6
++0x4ADC US_ALU_EXT_ADDR_7
++0x4AE0 US_ALU_EXT_ADDR_8
++0x4AE4 US_ALU_EXT_ADDR_9
++0x4AE8 US_ALU_EXT_ADDR_10
++0x4AEC US_ALU_EXT_ADDR_11
++0x4AF0 US_ALU_EXT_ADDR_12
++0x4AF4 US_ALU_EXT_ADDR_13
++0x4AF8 US_ALU_EXT_ADDR_14
++0x4AFC US_ALU_EXT_ADDR_15
++0x4B00 US_ALU_EXT_ADDR_16
++0x4B04 US_ALU_EXT_ADDR_17
++0x4B08 US_ALU_EXT_ADDR_18
++0x4B0C US_ALU_EXT_ADDR_19
++0x4B10 US_ALU_EXT_ADDR_20
++0x4B14 US_ALU_EXT_ADDR_21
++0x4B18 US_ALU_EXT_ADDR_22
++0x4B1C US_ALU_EXT_ADDR_23
++0x4B20 US_ALU_EXT_ADDR_24
++0x4B24 US_ALU_EXT_ADDR_25
++0x4B28 US_ALU_EXT_ADDR_26
++0x4B2C US_ALU_EXT_ADDR_27
++0x4B30 US_ALU_EXT_ADDR_28
++0x4B34 US_ALU_EXT_ADDR_29
++0x4B38 US_ALU_EXT_ADDR_30
++0x4B3C US_ALU_EXT_ADDR_31
++0x4B40 US_ALU_EXT_ADDR_32
++0x4B44 US_ALU_EXT_ADDR_33
++0x4B48 US_ALU_EXT_ADDR_34
++0x4B4C US_ALU_EXT_ADDR_35
++0x4B50 US_ALU_EXT_ADDR_36
++0x4B54 US_ALU_EXT_ADDR_37
++0x4B58 US_ALU_EXT_ADDR_38
++0x4B5C US_ALU_EXT_ADDR_39
++0x4B60 US_ALU_EXT_ADDR_40
++0x4B64 US_ALU_EXT_ADDR_41
++0x4B68 US_ALU_EXT_ADDR_42
++0x4B6C US_ALU_EXT_ADDR_43
++0x4B70 US_ALU_EXT_ADDR_44
++0x4B74 US_ALU_EXT_ADDR_45
++0x4B78 US_ALU_EXT_ADDR_46
++0x4B7C US_ALU_EXT_ADDR_47
++0x4B80 US_ALU_EXT_ADDR_48
++0x4B84 US_ALU_EXT_ADDR_49
++0x4B88 US_ALU_EXT_ADDR_50
++0x4B8C US_ALU_EXT_ADDR_51
++0x4B90 US_ALU_EXT_ADDR_52
++0x4B94 US_ALU_EXT_ADDR_53
++0x4B98 US_ALU_EXT_ADDR_54
++0x4B9C US_ALU_EXT_ADDR_55
++0x4BA0 US_ALU_EXT_ADDR_56
++0x4BA4 US_ALU_EXT_ADDR_57
++0x4BA8 US_ALU_EXT_ADDR_58
++0x4BAC US_ALU_EXT_ADDR_59
++0x4BB0 US_ALU_EXT_ADDR_60
++0x4BB4 US_ALU_EXT_ADDR_61
++0x4BB8 US_ALU_EXT_ADDR_62
++0x4BBC US_ALU_EXT_ADDR_63
++0x4BC0 FG_FOG_BLEND
++0x4BC4 FG_FOG_FACTOR
++0x4BC8 FG_FOG_COLOR_R
++0x4BCC FG_FOG_COLOR_G
++0x4BD0 FG_FOG_COLOR_B
++0x4BD4 FG_ALPHA_FUNC
++0x4BD8 FG_DEPTH_SRC
++0x4C00 US_ALU_CONST_R_0
++0x4C04 US_ALU_CONST_G_0
++0x4C08 US_ALU_CONST_B_0
++0x4C0C US_ALU_CONST_A_0
++0x4C10 US_ALU_CONST_R_1
++0x4C14 US_ALU_CONST_G_1
++0x4C18 US_ALU_CONST_B_1
++0x4C1C US_ALU_CONST_A_1
++0x4C20 US_ALU_CONST_R_2
++0x4C24 US_ALU_CONST_G_2
++0x4C28 US_ALU_CONST_B_2
++0x4C2C US_ALU_CONST_A_2
++0x4C30 US_ALU_CONST_R_3
++0x4C34 US_ALU_CONST_G_3
++0x4C38 US_ALU_CONST_B_3
++0x4C3C US_ALU_CONST_A_3
++0x4C40 US_ALU_CONST_R_4
++0x4C44 US_ALU_CONST_G_4
++0x4C48 US_ALU_CONST_B_4
++0x4C4C US_ALU_CONST_A_4
++0x4C50 US_ALU_CONST_R_5
++0x4C54 US_ALU_CONST_G_5
++0x4C58 US_ALU_CONST_B_5
++0x4C5C US_ALU_CONST_A_5
++0x4C60 US_ALU_CONST_R_6
++0x4C64 US_ALU_CONST_G_6
++0x4C68 US_ALU_CONST_B_6
++0x4C6C US_ALU_CONST_A_6
++0x4C70 US_ALU_CONST_R_7
++0x4C74 US_ALU_CONST_G_7
++0x4C78 US_ALU_CONST_B_7
++0x4C7C US_ALU_CONST_A_7
++0x4C80 US_ALU_CONST_R_8
++0x4C84 US_ALU_CONST_G_8
++0x4C88 US_ALU_CONST_B_8
++0x4C8C US_ALU_CONST_A_8
++0x4C90 US_ALU_CONST_R_9
++0x4C94 US_ALU_CONST_G_9
++0x4C98 US_ALU_CONST_B_9
++0x4C9C US_ALU_CONST_A_9
++0x4CA0 US_ALU_CONST_R_10
++0x4CA4 US_ALU_CONST_G_10
++0x4CA8 US_ALU_CONST_B_10
++0x4CAC US_ALU_CONST_A_10
++0x4CB0 US_ALU_CONST_R_11
++0x4CB4 US_ALU_CONST_G_11
++0x4CB8 US_ALU_CONST_B_11
++0x4CBC US_ALU_CONST_A_11
++0x4CC0 US_ALU_CONST_R_12
++0x4CC4 US_ALU_CONST_G_12
++0x4CC8 US_ALU_CONST_B_12
++0x4CCC US_ALU_CONST_A_12
++0x4CD0 US_ALU_CONST_R_13
++0x4CD4 US_ALU_CONST_G_13
++0x4CD8 US_ALU_CONST_B_13
++0x4CDC US_ALU_CONST_A_13
++0x4CE0 US_ALU_CONST_R_14
++0x4CE4 US_ALU_CONST_G_14
++0x4CE8 US_ALU_CONST_B_14
++0x4CEC US_ALU_CONST_A_14
++0x4CF0 US_ALU_CONST_R_15
++0x4CF4 US_ALU_CONST_G_15
++0x4CF8 US_ALU_CONST_B_15
++0x4CFC US_ALU_CONST_A_15
++0x4D00 US_ALU_CONST_R_16
++0x4D04 US_ALU_CONST_G_16
++0x4D08 US_ALU_CONST_B_16
++0x4D0C US_ALU_CONST_A_16
++0x4D10 US_ALU_CONST_R_17
++0x4D14 US_ALU_CONST_G_17
++0x4D18 US_ALU_CONST_B_17
++0x4D1C US_ALU_CONST_A_17
++0x4D20 US_ALU_CONST_R_18
++0x4D24 US_ALU_CONST_G_18
++0x4D28 US_ALU_CONST_B_18
++0x4D2C US_ALU_CONST_A_18
++0x4D30 US_ALU_CONST_R_19
++0x4D34 US_ALU_CONST_G_19
++0x4D38 US_ALU_CONST_B_19
++0x4D3C US_ALU_CONST_A_19
++0x4D40 US_ALU_CONST_R_20
++0x4D44 US_ALU_CONST_G_20
++0x4D48 US_ALU_CONST_B_20
++0x4D4C US_ALU_CONST_A_20
++0x4D50 US_ALU_CONST_R_21
++0x4D54 US_ALU_CONST_G_21
++0x4D58 US_ALU_CONST_B_21
++0x4D5C US_ALU_CONST_A_21
++0x4D60 US_ALU_CONST_R_22
++0x4D64 US_ALU_CONST_G_22
++0x4D68 US_ALU_CONST_B_22
++0x4D6C US_ALU_CONST_A_22
++0x4D70 US_ALU_CONST_R_23
++0x4D74 US_ALU_CONST_G_23
++0x4D78 US_ALU_CONST_B_23
++0x4D7C US_ALU_CONST_A_23
++0x4D80 US_ALU_CONST_R_24
++0x4D84 US_ALU_CONST_G_24
++0x4D88 US_ALU_CONST_B_24
++0x4D8C US_ALU_CONST_A_24
++0x4D90 US_ALU_CONST_R_25
++0x4D94 US_ALU_CONST_G_25
++0x4D98 US_ALU_CONST_B_25
++0x4D9C US_ALU_CONST_A_25
++0x4DA0 US_ALU_CONST_R_26
++0x4DA4 US_ALU_CONST_G_26
++0x4DA8 US_ALU_CONST_B_26
++0x4DAC US_ALU_CONST_A_26
++0x4DB0 US_ALU_CONST_R_27
++0x4DB4 US_ALU_CONST_G_27
++0x4DB8 US_ALU_CONST_B_27
++0x4DBC US_ALU_CONST_A_27
++0x4DC0 US_ALU_CONST_R_28
++0x4DC4 US_ALU_CONST_G_28
++0x4DC8 US_ALU_CONST_B_28
++0x4DCC US_ALU_CONST_A_28
++0x4DD0 US_ALU_CONST_R_29
++0x4DD4 US_ALU_CONST_G_29
++0x4DD8 US_ALU_CONST_B_29
++0x4DDC US_ALU_CONST_A_29
++0x4DE0 US_ALU_CONST_R_30
++0x4DE4 US_ALU_CONST_G_30
++0x4DE8 US_ALU_CONST_B_30
++0x4DEC US_ALU_CONST_A_30
++0x4DF0 US_ALU_CONST_R_31
++0x4DF4 US_ALU_CONST_G_31
++0x4DF8 US_ALU_CONST_B_31
++0x4DFC US_ALU_CONST_A_31
++0x4E04 RB3D_BLENDCNTL_R3
++0x4E08 RB3D_ABLENDCNTL_R3
++0x4E0C RB3D_COLOR_CHANNEL_MASK
++0x4E10 RB3D_CONSTANT_COLOR
++0x4E14 RB3D_COLOR_CLEAR_VALUE
++0x4E18 RB3D_ROPCNTL_R3
++0x4E1C RB3D_CLRCMP_FLIPE_R3
++0x4E20 RB3D_CLRCMP_CLR_R3
++0x4E24 RB3D_CLRCMP_MSK_R3
++0x4E48 RB3D_DEBUG_CTL
++0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
++0x4E50 RB3D_DITHER_CTL
++0x4E54 RB3D_CMASK_OFFSET0
++0x4E58 RB3D_CMASK_OFFSET1
++0x4E5C RB3D_CMASK_OFFSET2
++0x4E60 RB3D_CMASK_OFFSET3
++0x4E64 RB3D_CMASK_PITCH0
++0x4E68 RB3D_CMASK_PITCH1
++0x4E6C RB3D_CMASK_PITCH2
++0x4E70 RB3D_CMASK_PITCH3
++0x4E74 RB3D_CMASK_WRINDEX
++0x4E78 RB3D_CMASK_DWORD
++0x4E7C RB3D_CMASK_RDINDEX
++0x4E80 RB3D_AARESOLVE_OFFSET
++0x4E84 RB3D_AARESOLVE_PITCH
++0x4E88 RB3D_AARESOLVE_CTL
++0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
++0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
++0x4F04 ZB_ZSTENCILCNTL
++0x4F08 ZB_STENCILREFMASK
++0x4F14 ZB_ZTOP
++0x4F18 ZB_ZCACHE_CTLSTAT
++0x4F1C ZB_BW_CNTL
++0x4F28 ZB_DEPTHCLEARVALUE
++0x4F30 ZB_ZMASK_OFFSET
++0x4F34 ZB_ZMASK_PITCH
++0x4F38 ZB_ZMASK_WRINDEX
++0x4F3C ZB_ZMASK_DWORD
++0x4F40 ZB_ZMASK_RDINDEX
++0x4F44 ZB_HIZ_OFFSET
++0x4F48 ZB_HIZ_WRINDEX
++0x4F4C ZB_HIZ_DWORD
++0x4F50 ZB_HIZ_RDINDEX
++0x4F54 ZB_HIZ_PITCH
++0x4F58 ZB_ZPASS_DATA
+diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
+index 8e3c0b8..6801b86 100644
+--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
++++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
+@@ -153,7 +153,7 @@ rs600 0x6d40
+ 0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+ 0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+ 0x42AC SU_POLY_OFFSET_BACK_SCALE
+-0x42B0 SU_POLY_OFFSET_BACK_OFFSET 
++0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+ 0x42B4 SU_POLY_OFFSET_ENABLE
+ 0x42B8 SU_CULL_MODE
+ 0x42C0 SU_DEPTH_SCALE
+@@ -291,6 +291,8 @@ rs600 0x6d40
+ 0x46AC US_OUT_FMT_2
+ 0x46B0 US_OUT_FMT_3
+ 0x46B4 US_W_FMT
++0x46B8 US_CODE_BANK
++0x46BC US_CODE_EXT
+ 0x46C0 US_ALU_RGB_ADDR_0
+ 0x46C4 US_ALU_RGB_ADDR_1
+ 0x46C8 US_ALU_RGB_ADDR_2
+@@ -547,6 +549,70 @@ rs600 0x6d40
+ 0x4AB4 US_ALU_ALPHA_INST_61
+ 0x4AB8 US_ALU_ALPHA_INST_62
+ 0x4ABC US_ALU_ALPHA_INST_63
++0x4AC0 US_ALU_EXT_ADDR_0
++0x4AC4 US_ALU_EXT_ADDR_1
++0x4AC8 US_ALU_EXT_ADDR_2
++0x4ACC US_ALU_EXT_ADDR_3
++0x4AD0 US_ALU_EXT_ADDR_4
++0x4AD4 US_ALU_EXT_ADDR_5
++0x4AD8 US_ALU_EXT_ADDR_6
++0x4ADC US_ALU_EXT_ADDR_7
++0x4AE0 US_ALU_EXT_ADDR_8
++0x4AE4 US_ALU_EXT_ADDR_9
++0x4AE8 US_ALU_EXT_ADDR_10
++0x4AEC US_ALU_EXT_ADDR_11
++0x4AF0 US_ALU_EXT_ADDR_12
++0x4AF4 US_ALU_EXT_ADDR_13
++0x4AF8 US_ALU_EXT_ADDR_14
++0x4AFC US_ALU_EXT_ADDR_15
++0x4B00 US_ALU_EXT_ADDR_16
++0x4B04 US_ALU_EXT_ADDR_17
++0x4B08 US_ALU_EXT_ADDR_18
++0x4B0C US_ALU_EXT_ADDR_19
++0x4B10 US_ALU_EXT_ADDR_20
++0x4B14 US_ALU_EXT_ADDR_21
++0x4B18 US_ALU_EXT_ADDR_22
++0x4B1C US_ALU_EXT_ADDR_23
++0x4B20 US_ALU_EXT_ADDR_24
++0x4B24 US_ALU_EXT_ADDR_25
++0x4B28 US_ALU_EXT_ADDR_26
++0x4B2C US_ALU_EXT_ADDR_27
++0x4B30 US_ALU_EXT_ADDR_28
++0x4B34 US_ALU_EXT_ADDR_29
++0x4B38 US_ALU_EXT_ADDR_30
++0x4B3C US_ALU_EXT_ADDR_31
++0x4B40 US_ALU_EXT_ADDR_32
++0x4B44 US_ALU_EXT_ADDR_33
++0x4B48 US_ALU_EXT_ADDR_34
++0x4B4C US_ALU_EXT_ADDR_35
++0x4B50 US_ALU_EXT_ADDR_36
++0x4B54 US_ALU_EXT_ADDR_37
++0x4B58 US_ALU_EXT_ADDR_38
++0x4B5C US_ALU_EXT_ADDR_39
++0x4B60 US_ALU_EXT_ADDR_40
++0x4B64 US_ALU_EXT_ADDR_41
++0x4B68 US_ALU_EXT_ADDR_42
++0x4B6C US_ALU_EXT_ADDR_43
++0x4B70 US_ALU_EXT_ADDR_44
++0x4B74 US_ALU_EXT_ADDR_45
++0x4B78 US_ALU_EXT_ADDR_46
++0x4B7C US_ALU_EXT_ADDR_47
++0x4B80 US_ALU_EXT_ADDR_48
++0x4B84 US_ALU_EXT_ADDR_49
++0x4B88 US_ALU_EXT_ADDR_50
++0x4B8C US_ALU_EXT_ADDR_51
++0x4B90 US_ALU_EXT_ADDR_52
++0x4B94 US_ALU_EXT_ADDR_53
++0x4B98 US_ALU_EXT_ADDR_54
++0x4B9C US_ALU_EXT_ADDR_55
++0x4BA0 US_ALU_EXT_ADDR_56
++0x4BA4 US_ALU_EXT_ADDR_57
++0x4BA8 US_ALU_EXT_ADDR_58
++0x4BAC US_ALU_EXT_ADDR_59
++0x4BB0 US_ALU_EXT_ADDR_60
++0x4BB4 US_ALU_EXT_ADDR_61
++0x4BB8 US_ALU_EXT_ADDR_62
++0x4BBC US_ALU_EXT_ADDR_63
+ 0x4BC0 FG_FOG_BLEND
+ 0x4BC4 FG_FOG_FACTOR
+ 0x4BC8 FG_FOG_COLOR_R
+diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
+index 0102a0d..38abf63 100644
+--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
++++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
+@@ -161,7 +161,12 @@ rv515 0x6d40
+ 0x401C GB_SELECT
+ 0x4020 GB_AA_CONFIG
+ 0x4024 GB_FIFO_SIZE
++0x4028 GB_Z_PEQ_CONFIG
+ 0x4100 TX_INVALTAGS
++0x4114 SU_TEX_WRAP_PS3
++0x4118 PS3_ENABLE
++0x411c PS3_VTX_FMT
++0x4120 PS3_TEX_SOURCE
+ 0x4200 GA_POINT_S0
+ 0x4204 GA_POINT_T0
+ 0x4208 GA_POINT_S1
+@@ -171,6 +176,7 @@ rv515 0x6d40
+ 0x4230 GA_POINT_MINMAX
+ 0x4234 GA_LINE_CNTL
+ 0x4238 GA_LINE_STIPPLE_CONFIG
++0x4258 GA_COLOR_CONTROL_PS3
+ 0x4260 GA_LINE_STIPPLE_VALUE
+ 0x4264 GA_LINE_S0
+ 0x4268 GA_LINE_S1
+diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
+index ca03716..287fceb 100644
+--- a/drivers/gpu/drm/radeon/rs400.c
++++ b/drivers/gpu/drm/radeon/rs400.c
+@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
+ 	return 0;
+ }
+ 
++int rs400_mc_wait_for_idle(struct radeon_device *rdev)
++{
++	unsigned i;
++	uint32_t tmp;
++
++	for (i = 0; i < rdev->usec_timeout; i++) {
++		/* read MC_STATUS */
++		tmp = RREG32(0x0150);
++		if (tmp & (1 << 2)) {
++			return 0;
++		}
++		DRM_UDELAY(1);
++	}
++	return -1;
++}
++
+ void rs400_gpu_init(struct radeon_device *rdev)
+ {
+ 	/* FIXME: HDP same place on rs400 ? */
+ 	r100_hdp_reset(rdev);
+ 	/* FIXME: is this correct ? */
+ 	r420_pipes_init(rdev);
+-	if (r300_mc_wait_for_idle(rdev)) {
+-		printk(KERN_WARNING "Failed to wait MC idle while "
+-		       "programming pipes. Bad things might happen.\n");
++	if (rs400_mc_wait_for_idle(rdev)) {
++		printk(KERN_WARNING "rs400: Failed to wait MC idle while "
++		       "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
+ 	}
+ }
+ 
+@@ -352,10 +368,11 @@ static int rs400_mc_init(struct radeon_device *rdev)
+ 	u32 tmp;
+ 
+ 	/* Setup GPU memory space */
+-	tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
++	tmp = RREG32(R_00015C_NB_TOM);
+ 	rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
+ 	rdev->mc.gtt_location = 0xFFFFFFFFUL;
+ 	r = radeon_mc_setup(rdev);
++	rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
+ 	if (r)
+ 		return r;
+ 	return 0;
+@@ -369,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
+ 	r100_mc_stop(rdev, &save);
+ 
+ 	/* Wait for mc idle */
+-	if (r300_mc_wait_for_idle(rdev))
+-		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
++	if (rs400_mc_wait_for_idle(rdev))
++		dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
+ 	WREG32(R_000148_MC_FB_LOCATION,
+ 		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
+ 		S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
+@@ -387,14 +404,15 @@ static int rs400_startup(struct radeon_device *rdev)
+ 	r300_clock_startup(rdev);
+ 	/* Initialize GPU configuration (# pipes, ...) */
+ 	rs400_gpu_init(rdev);
++	r100_enable_bm(rdev);
+ 	/* Initialize GART (initialize after TTM so we can allocate
+ 	 * memory through TTM but finalize after TTM) */
+ 	r = rs400_gart_enable(rdev);
+ 	if (r)
+ 		return r;
+ 	/* Enable IRQ */
+-	rdev->irq.sw_int = true;
+ 	r100_irq_set(rdev);
++	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ 	/* 1M ring buffer */
+ 	r = r100_cp_init(rdev, 1024 * 1024);
+ 	if (r) {
+@@ -430,6 +448,8 @@ int rs400_resume(struct radeon_device *rdev)
+ 	radeon_combios_asic_init(rdev->ddev);
+ 	/* Resume clock after posting */
+ 	r300_clock_startup(rdev);
++	/* Initialize surface registers */
++	radeon_surface_init(rdev);
+ 	return rs400_startup(rdev);
+ }
+ 
+@@ -444,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
+ 
+ void rs400_fini(struct radeon_device *rdev)
+ {
+-	rs400_suspend(rdev);
+ 	r100_cp_fini(rdev);
+ 	r100_wb_fini(rdev);
+ 	r100_ib_fini(rdev);
+@@ -452,7 +471,7 @@ void rs400_fini(struct radeon_device *rdev)
+ 	rs400_gart_fini(rdev);
+ 	radeon_irq_kms_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+-	radeon_object_fini(rdev);
++	radeon_bo_fini(rdev);
+ 	radeon_atombios_fini(rdev);
+ 	kfree(rdev->bios);
+ 	rdev->bios = NULL;
+@@ -490,12 +509,13 @@ int rs400_init(struct radeon_device *rdev)
+ 			RREG32(R_0007C0_CP_STAT));
+ 	}
+ 	/* check if cards are posted or not */
+-	if (!radeon_card_posted(rdev) && rdev->bios) {
+-		DRM_INFO("GPU not posted. posting now...\n");
+-		radeon_combios_asic_init(rdev->ddev);
+-	}
++	if (radeon_boot_test_post_card(rdev) == false)
++		return -EINVAL;
++
+ 	/* Initialize clocks */
+ 	radeon_get_clock_info(rdev->ddev);
++	/* Initialize power management */
++	radeon_pm_init(rdev);
+ 	/* Get vram informations */
+ 	rs400_vram_info(rdev);
+ 	/* Initialize memory controller (also test AGP) */
+@@ -510,7 +530,7 @@ int rs400_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	/* Memory manager */
+-	r = radeon_object_init(rdev);
++	r = radeon_bo_init(rdev);
+ 	if (r)
+ 		return r;
+ 	r = rs400_gart_init(rdev);
+@@ -522,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+ 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+-		rs400_suspend(rdev);
+ 		r100_cp_fini(rdev);
+ 		r100_wb_fini(rdev);
+ 		r100_ib_fini(rdev);
+diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
+index 4444f48..c381856 100644
+--- a/drivers/gpu/drm/radeon/rs600.c
++++ b/drivers/gpu/drm/radeon/rs600.c
+@@ -45,6 +45,124 @@
+ void rs600_gpu_init(struct radeon_device *rdev);
+ int rs600_mc_wait_for_idle(struct radeon_device *rdev);
+ 
++int rs600_mc_init(struct radeon_device *rdev)
++{
++	/* read back the MC value from the hw */
++	int r;
++	u32 tmp;
++
++	/* Setup GPU memory space */
++	tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
++	rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
++	rdev->mc.gtt_location = 0xffffffffUL;
++	r = radeon_mc_setup(rdev);
++	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
++	if (r)
++		return r;
++	return 0;
++}
++
++/* hpd for digital panel detect/disconnect */
++bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
++{
++	u32 tmp;
++	bool connected = false;
++
++	switch (hpd) {
++	case RADEON_HPD_1:
++		tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
++		if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
++			connected = true;
++		break;
++	case RADEON_HPD_2:
++		tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
++		if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
++			connected = true;
++		break;
++	default:
++		break;
++	}
++	return connected;
++}
++
++void rs600_hpd_set_polarity(struct radeon_device *rdev,
++			    enum radeon_hpd_id hpd)
++{
++	u32 tmp;
++	bool connected = rs600_hpd_sense(rdev, hpd);
++
++	switch (hpd) {
++	case RADEON_HPD_1:
++		tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
++		if (connected)
++			tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
++		else
++			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
++		WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
++		break;
++	case RADEON_HPD_2:
++		tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
++		if (connected)
++			tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
++		else
++			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
++		WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
++		break;
++	default:
++		break;
++	}
++}
++
++void rs600_hpd_init(struct radeon_device *rdev)
++{
++	struct drm_device *dev = rdev->ddev;
++	struct drm_connector *connector;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++		switch (radeon_connector->hpd.hpd) {
++		case RADEON_HPD_1:
++			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
++			       S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
++			rdev->irq.hpd[0] = true;
++			break;
++		case RADEON_HPD_2:
++			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
++			       S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
++			rdev->irq.hpd[1] = true;
++			break;
++		default:
++			break;
++		}
++	}
++	if (rdev->irq.installed)
++		rs600_irq_set(rdev);
++}
++
++void rs600_hpd_fini(struct radeon_device *rdev)
++{
++	struct drm_device *dev = rdev->ddev;
++	struct drm_connector *connector;
++
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
++		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
++		switch (radeon_connector->hpd.hpd) {
++		case RADEON_HPD_1:
++			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
++			       S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
++			rdev->irq.hpd[0] = false;
++			break;
++		case RADEON_HPD_2:
++			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
++			       S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
++			rdev->irq.hpd[1] = false;
++			break;
++		default:
++			break;
++		}
++	}
++}
++
+ /*
+  * GART.
+  */
+@@ -100,40 +218,40 @@ int rs600_gart_enable(struct radeon_device *rdev)
+ 	WREG32(R_00004C_BUS_CNTL, tmp);
+ 	/* FIXME: setup default page */
+ 	WREG32_MC(R_000100_MC_PT0_CNTL,
+-		 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+-		  S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
++		  (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
++		   S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
++
+ 	for (i = 0; i < 19; i++) {
+ 		WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
+-			S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+-			S_00016C_SYSTEM_ACCESS_MODE_MASK(
+-				V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
+-			S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+-				V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
+-			S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
+-			S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+-			S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
++			  S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
++			  S_00016C_SYSTEM_ACCESS_MODE_MASK(
++				  V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
++			  S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
++				  V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
++			  S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
++			  S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
++			  S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
+ 	}
+-
+-	/* System context map to GART space */
+-	WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
+-	WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
+-
+ 	/* enable first context */
+-	WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+-	WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
+ 	WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
+-			S_000102_ENABLE_PAGE_TABLE(1) |
+-			S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
++		  S_000102_ENABLE_PAGE_TABLE(1) |
++		  S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
++
+ 	/* disable all other contexts */
+-	for (i = 1; i < 8; i++) {
++	for (i = 1; i < 8; i++)
+ 		WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
+-	}
+ 
+ 	/* setup the page table */
+ 	WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
+-			rdev->gart.table_addr);
++		  rdev->gart.table_addr);
++	WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
++	WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
+ 	WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
+ 
++	/* System context maps to VRAM space */
++	WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
++	WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
++
+ 	/* enable page tables */
+ 	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
+ 	WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
+@@ -146,15 +264,20 @@ int rs600_gart_enable(struct radeon_device *rdev)
+ 
+ void rs600_gart_disable(struct radeon_device *rdev)
+ {
+-	uint32_t tmp;
++	u32 tmp;
++	int r;
+ 
+ 	/* FIXME: disable out of gart access */
+ 	WREG32_MC(R_000100_MC_PT0_CNTL, 0);
+ 	tmp = RREG32_MC(R_000009_MC_CNTL1);
+ 	WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
+ 	if (rdev->gart.table.vram.robj) {
+-		radeon_object_kunmap(rdev->gart.table.vram.robj);
+-		radeon_object_unpin(rdev->gart.table.vram.robj);
++		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
++		if (r == 0) {
++			radeon_bo_kunmap(rdev->gart.table.vram.robj);
++			radeon_bo_unpin(rdev->gart.table.vram.robj);
++			radeon_bo_unreserve(rdev->gart.table.vram.robj);
++		}
+ 	}
+ }
+ 
+@@ -189,7 +312,16 @@ int rs600_irq_set(struct radeon_device *rdev)
+ {
+ 	uint32_t tmp = 0;
+ 	uint32_t mode_int = 0;
+-
++	u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
++		~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
++	u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
++		~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
++
++	if (!rdev->irq.installed) {
++		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
++		WREG32(R_000040_GEN_INT_CNTL, 0);
++		return -EINVAL;
++	}
+ 	if (rdev->irq.sw_int) {
+ 		tmp |= S_000040_SW_INT_EN(1);
+ 	}
+@@ -199,8 +331,16 @@ int rs600_irq_set(struct radeon_device *rdev)
+ 	if (rdev->irq.crtc_vblank_int[1]) {
+ 		mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
+ 	}
++	if (rdev->irq.hpd[0]) {
++		hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
++	}
++	if (rdev->irq.hpd[1]) {
++		hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
++	}
+ 	WREG32(R_000040_GEN_INT_CNTL, tmp);
+ 	WREG32(R_006540_DxMODE_INT_MASK, mode_int);
++	WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
++	WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
+ 	return 0;
+ }
+ 
+@@ -208,6 +348,7 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
+ {
+ 	uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
+ 	uint32_t irq_mask = ~C_000044_SW_INT;
++	u32 tmp;
+ 
+ 	if (G_000044_DISPLAY_INT_STAT(irqs)) {
+ 		*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+@@ -219,6 +360,16 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
+ 			WREG32(R_006D34_D2MODE_VBLANK_STATUS,
+ 				S_006D34_D2MODE_VBLANK_ACK(1));
+ 		}
++		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
++			tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
++			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
++			WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
++		}
++		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
++			tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
++			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
++			WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
++		}
+ 	} else {
+ 		*r500_disp_int = 0;
+ 	}
+@@ -244,6 +395,7 @@ int rs600_irq_process(struct radeon_device *rdev)
+ {
+ 	uint32_t status, msi_rearm;
+ 	uint32_t r500_disp_int;
++	bool queue_hotplug = false;
+ 
+ 	status = rs600_irq_ack(rdev, &r500_disp_int);
+ 	if (!status && !r500_disp_int) {
+@@ -251,15 +403,25 @@ int rs600_irq_process(struct radeon_device *rdev)
+ 	}
+ 	while (status || r500_disp_int) {
+ 		/* SW interrupt */
+-		if (G_000040_SW_INT_EN(status))
++		if (G_000044_SW_INT(status))
+ 			radeon_fence_process(rdev);
+ 		/* Vertical blank interrupts */
+ 		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
+ 			drm_handle_vblank(rdev->ddev, 0);
+ 		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
+ 			drm_handle_vblank(rdev->ddev, 1);
++		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
++			queue_hotplug = true;
++			DRM_DEBUG("HPD1\n");
++		}
++		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
++			queue_hotplug = true;
++			DRM_DEBUG("HPD2\n");
++		}
+ 		status = rs600_irq_ack(rdev, &r500_disp_int);
+ 	}
++	if (queue_hotplug)
++		queue_work(rdev->wq, &rdev->hotplug_work);
+ 	if (rdev->msi_enabled) {
+ 		switch (rdev->family) {
+ 		case CHIP_RS600:
+@@ -397,8 +559,8 @@ static int rs600_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	/* Enable IRQ */
+-	rdev->irq.sw_int = true;
+ 	rs600_irq_set(rdev);
++	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ 	/* 1M ring buffer */
+ 	r = r100_cp_init(rdev, 1024 * 1024);
+ 	if (r) {
+@@ -432,6 +594,8 @@ int rs600_resume(struct radeon_device *rdev)
+ 	atom_asic_init(rdev->mode_info.atom_context);
+ 	/* Resume clock after posting */
+ 	rv515_clock_startup(rdev);
++	/* Initialize surface registers */
++	radeon_surface_init(rdev);
+ 	return rs600_startup(rdev);
+ }
+ 
+@@ -446,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
+ 
+ void rs600_fini(struct radeon_device *rdev)
+ {
+-	rs600_suspend(rdev);
+ 	r100_cp_fini(rdev);
+ 	r100_wb_fini(rdev);
+ 	r100_ib_fini(rdev);
+@@ -454,7 +617,7 @@ void rs600_fini(struct radeon_device *rdev)
+ 	rs600_gart_fini(rdev);
+ 	radeon_irq_kms_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+-	radeon_object_fini(rdev);
++	radeon_bo_fini(rdev);
+ 	radeon_atombios_fini(rdev);
+ 	kfree(rdev->bios);
+ 	rdev->bios = NULL;
+@@ -491,10 +654,9 @@ int rs600_init(struct radeon_device *rdev)
+ 			RREG32(R_0007C0_CP_STAT));
+ 	}
+ 	/* check if cards are posted or not */
+-	if (!radeon_card_posted(rdev) && rdev->bios) {
+-		DRM_INFO("GPU not posted. posting now...\n");
+-		atom_asic_init(rdev->mode_info.atom_context);
+-	}
++	if (radeon_boot_test_post_card(rdev) == false)
++		return -EINVAL;
++
+ 	/* Initialize clocks */
+ 	radeon_get_clock_info(rdev->ddev);
+ 	/* Initialize power management */
+@@ -502,7 +664,7 @@ int rs600_init(struct radeon_device *rdev)
+ 	/* Get vram informations */
+ 	rs600_vram_info(rdev);
+ 	/* Initialize memory controller (also test AGP) */
+-	r = r420_mc_init(rdev);
++	r = rs600_mc_init(rdev);
+ 	if (r)
+ 		return r;
+ 	rs600_debugfs(rdev);
+@@ -514,7 +676,7 @@ int rs600_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	/* Memory manager */
+-	r = radeon_object_init(rdev);
++	r = radeon_bo_init(rdev);
+ 	if (r)
+ 		return r;
+ 	r = rs600_gart_init(rdev);
+@@ -526,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+ 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+-		rs600_suspend(rdev);
+ 		r100_cp_fini(rdev);
+ 		r100_wb_fini(rdev);
+ 		r100_ib_fini(rdev);
+diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
+index 8130892..c1c8f58 100644
+--- a/drivers/gpu/drm/radeon/rs600d.h
++++ b/drivers/gpu/drm/radeon/rs600d.h
+@@ -30,27 +30,12 @@
+ 
+ /* Registers */
+ #define R_000040_GEN_INT_CNTL                        0x000040
+-#define   S_000040_DISPLAY_INT_STATUS(x)               (((x) & 0x1) << 0)
+-#define   G_000040_DISPLAY_INT_STATUS(x)               (((x) >> 0) & 0x1)
+-#define   C_000040_DISPLAY_INT_STATUS                  0xFFFFFFFE
+-#define   S_000040_DMA_VIPH0_INT_EN(x)                 (((x) & 0x1) << 12)
+-#define   G_000040_DMA_VIPH0_INT_EN(x)                 (((x) >> 12) & 0x1)
+-#define   C_000040_DMA_VIPH0_INT_EN                    0xFFFFEFFF
+-#define   S_000040_CRTC2_VSYNC(x)                      (((x) & 0x1) << 6)
+-#define   G_000040_CRTC2_VSYNC(x)                      (((x) >> 6) & 0x1)
+-#define   C_000040_CRTC2_VSYNC                         0xFFFFFFBF
+-#define   S_000040_SNAPSHOT2(x)                        (((x) & 0x1) << 7)
+-#define   G_000040_SNAPSHOT2(x)                        (((x) >> 7) & 0x1)
+-#define   C_000040_SNAPSHOT2                           0xFFFFFF7F
+-#define   S_000040_CRTC2_VBLANK(x)                     (((x) & 0x1) << 9)
+-#define   G_000040_CRTC2_VBLANK(x)                     (((x) >> 9) & 0x1)
+-#define   C_000040_CRTC2_VBLANK                        0xFFFFFDFF
+-#define   S_000040_FP2_DETECT(x)                       (((x) & 0x1) << 10)
+-#define   G_000040_FP2_DETECT(x)                       (((x) >> 10) & 0x1)
+-#define   C_000040_FP2_DETECT                          0xFFFFFBFF
+-#define   S_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) & 0x1) << 11)
+-#define   G_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) >> 11) & 0x1)
+-#define   C_000040_VSYNC_DIFF_OVER_LIMIT               0xFFFFF7FF
++#define   S_000040_SCRATCH_INT_MASK(x)                 (((x) & 0x1) << 18)
++#define   G_000040_SCRATCH_INT_MASK(x)                 (((x) >> 18) & 0x1)
++#define   C_000040_SCRATCH_INT_MASK                    0xFFFBFFFF
++#define   S_000040_GUI_IDLE_MASK(x)                    (((x) & 0x1) << 19)
++#define   G_000040_GUI_IDLE_MASK(x)                    (((x) >> 19) & 0x1)
++#define   C_000040_GUI_IDLE_MASK                       0xFFF7FFFF
+ #define   S_000040_DMA_VIPH1_INT_EN(x)                 (((x) & 0x1) << 13)
+ #define   G_000040_DMA_VIPH1_INT_EN(x)                 (((x) >> 13) & 0x1)
+ #define   C_000040_DMA_VIPH1_INT_EN                    0xFFFFDFFF
+@@ -370,7 +355,90 @@
+ #define   S_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) & 0x1) << 5)
+ #define   G_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) >> 5) & 0x1)
+ #define   C_007EDC_LB_D2_VBLANK_INTERRUPT              0xFFFFFFDF
+-
++#define   S_007EDC_DACA_AUTODETECT_INTERRUPT(x)        (((x) & 0x1) << 16)
++#define   G_007EDC_DACA_AUTODETECT_INTERRUPT(x)        (((x) >> 16) & 0x1)
++#define   C_007EDC_DACA_AUTODETECT_INTERRUPT           0xFFFEFFFF
++#define   S_007EDC_DACB_AUTODETECT_INTERRUPT(x)        (((x) & 0x1) << 17)
++#define   G_007EDC_DACB_AUTODETECT_INTERRUPT(x)        (((x) >> 17) & 0x1)
++#define   C_007EDC_DACB_AUTODETECT_INTERRUPT           0xFFFDFFFF
++#define   S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x)    (((x) & 0x1) << 18)
++#define   G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x)    (((x) >> 18) & 0x1)
++#define   C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT       0xFFFBFFFF
++#define   S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x)    (((x) & 0x1) << 19)
++#define   G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x)    (((x) >> 19) & 0x1)
++#define   C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT       0xFFF7FFFF
++#define R_007828_DACA_AUTODETECT_CONTROL               0x007828
++#define   S_007828_DACA_AUTODETECT_MODE(x)             (((x) & 0x3) << 0)
++#define   G_007828_DACA_AUTODETECT_MODE(x)             (((x) >> 0) & 0x3)
++#define   C_007828_DACA_AUTODETECT_MODE                0xFFFFFFFC
++#define   S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
++#define   G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
++#define   C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER  0xFFFF00FF
++#define   S_007828_DACA_AUTODETECT_CHECK_MASK(x)       (((x) & 0x3) << 16)
++#define   G_007828_DACA_AUTODETECT_CHECK_MASK(x)       (((x) >> 16) & 0x3)
++#define   C_007828_DACA_AUTODETECT_CHECK_MASK          0xFFFCFFFF
++#define R_007838_DACA_AUTODETECT_INT_CONTROL           0x007838
++#define   S_007838_DACA_AUTODETECT_ACK(x)              (((x) & 0x1) << 0)
++#define   C_007838_DACA_DACA_AUTODETECT_ACK            0xFFFFFFFE
++#define   S_007838_DACA_AUTODETECT_INT_ENABLE(x)       (((x) & 0x1) << 16)
++#define   G_007838_DACA_AUTODETECT_INT_ENABLE(x)       (((x) >> 16) & 0x1)
++#define   C_007838_DACA_AUTODETECT_INT_ENABLE          0xFFFCFFFF
++#define R_007A28_DACB_AUTODETECT_CONTROL               0x007A28
++#define   S_007A28_DACB_AUTODETECT_MODE(x)             (((x) & 0x3) << 0)
++#define   G_007A28_DACB_AUTODETECT_MODE(x)             (((x) >> 0) & 0x3)
++#define   C_007A28_DACB_AUTODETECT_MODE                0xFFFFFFFC
++#define   S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
++#define   G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
++#define   C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER  0xFFFF00FF
++#define   S_007A28_DACB_AUTODETECT_CHECK_MASK(x)       (((x) & 0x3) << 16)
++#define   G_007A28_DACB_AUTODETECT_CHECK_MASK(x)       (((x) >> 16) & 0x3)
++#define   C_007A28_DACB_AUTODETECT_CHECK_MASK          0xFFFCFFFF
++#define R_007A38_DACB_AUTODETECT_INT_CONTROL           0x007A38
++#define   S_007A38_DACB_AUTODETECT_ACK(x)              (((x) & 0x1) << 0)
++#define   C_007A38_DACB_DACA_AUTODETECT_ACK            0xFFFFFFFE
++#define   S_007A38_DACB_AUTODETECT_INT_ENABLE(x)       (((x) & 0x1) << 16)
++#define   G_007A38_DACB_AUTODETECT_INT_ENABLE(x)       (((x) >> 16) & 0x1)
++#define   C_007A38_DACB_AUTODETECT_INT_ENABLE          0xFFFCFFFF
++#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL           0x007D00
++#define   S_007D00_DC_HOT_PLUG_DETECT1_EN(x)           (((x) & 0x1) << 0)
++#define   G_007D00_DC_HOT_PLUG_DETECT1_EN(x)           (((x) >> 0) & 0x1)
++#define   C_007D00_DC_HOT_PLUG_DETECT1_EN              0xFFFFFFFE
++#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS        0x007D04
++#define   S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x)   (((x) & 0x1) << 0)
++#define   G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x)   (((x) >> 0) & 0x1)
++#define   C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS      0xFFFFFFFE
++#define   S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x)        (((x) & 0x1) << 1)
++#define   G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x)        (((x) >> 1) & 0x1)
++#define   C_007D04_DC_HOT_PLUG_DETECT1_SENSE           0xFFFFFFFD
++#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL       0x007D08
++#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x)      (((x) & 0x1) << 0)
++#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK         0xFFFFFFFE
++#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
++#define   G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
++#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY    0xFFFFFEFF
++#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x)       (((x) & 0x1) << 16)
++#define   G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x)       (((x) >> 16) & 0x1)
++#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_EN          0xFFFEFFFF
++#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL           0x007D10
++#define   S_007D10_DC_HOT_PLUG_DETECT2_EN(x)           (((x) & 0x1) << 0)
++#define   G_007D10_DC_HOT_PLUG_DETECT2_EN(x)           (((x) >> 0) & 0x1)
++#define   C_007D10_DC_HOT_PLUG_DETECT2_EN              0xFFFFFFFE
++#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS        0x007D14
++#define   S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x)   (((x) & 0x1) << 0)
++#define   G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x)   (((x) >> 0) & 0x1)
++#define   C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS      0xFFFFFFFE
++#define   S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x)        (((x) & 0x1) << 1)
++#define   G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x)        (((x) >> 1) & 0x1)
++#define   C_007D14_DC_HOT_PLUG_DETECT2_SENSE           0xFFFFFFFD
++#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL       0x007D18
++#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x)      (((x) & 0x1) << 0)
++#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK         0xFFFFFFFE
++#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
++#define   G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
++#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY    0xFFFFFEFF
++#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x)       (((x) & 0x1) << 16)
++#define   G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x)       (((x) >> 16) & 0x1)
++#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_EN          0xFFFEFFFF
+ 
+ /* MC registers */
+ #define R_000000_MC_STATUS                           0x000000
+diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
+index b12ff76..06e2771 100644
+--- a/drivers/gpu/drm/radeon/rs690.c
++++ b/drivers/gpu/drm/radeon/rs690.c
+@@ -162,6 +162,22 @@ void rs690_vram_info(struct radeon_device *rdev)
+ 	rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
+ }
+ 
++static int rs690_mc_init(struct radeon_device *rdev)
++{
++	int r;
++	u32 tmp;
++
++	/* Setup GPU memory space */
++	tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
++	rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
++	rdev->mc.gtt_location = 0xFFFFFFFFUL;
++	r = radeon_mc_setup(rdev);
++	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
++	if (r)
++		return r;
++	return 0;
++}
++
+ void rs690_line_buffer_adjust(struct radeon_device *rdev,
+ 			      struct drm_display_mode *mode1,
+ 			      struct drm_display_mode *mode2)
+@@ -245,8 +261,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
+ 
+ 	b.full = rfixed_const(mode->crtc_hdisplay);
+ 	c.full = rfixed_const(256);
+-	a.full = rfixed_mul(wm->num_line_pair, b);
+-	request_fifo_depth.full = rfixed_div(a, c);
++	a.full = rfixed_div(b, c);
++	request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
++	request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
+ 	if (a.full < rfixed_const(4)) {
+ 		wm->lb_request_fifo_depth = 4;
+ 	} else {
+@@ -375,6 +392,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
+ 	a.full = rfixed_const(16);
+ 	wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
+ 	wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
++	wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+ 
+ 	/* Determine estimated width */
+ 	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
+@@ -384,6 +402,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
+ 	} else {
+ 		a.full = rfixed_const(16);
+ 		wm->priority_mark.full = rfixed_div(estimated_width, a);
++		wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+ 		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
+ 	}
+ }
+@@ -606,8 +625,8 @@ static int rs690_startup(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	/* Enable IRQ */
+-	rdev->irq.sw_int = true;
+ 	rs600_irq_set(rdev);
++	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ 	/* 1M ring buffer */
+ 	r = r100_cp_init(rdev, 1024 * 1024);
+ 	if (r) {
+@@ -641,6 +660,8 @@ int rs690_resume(struct radeon_device *rdev)
+ 	atom_asic_init(rdev->mode_info.atom_context);
+ 	/* Resume clock after posting */
+ 	rv515_clock_startup(rdev);
++	/* Initialize surface registers */
++	radeon_surface_init(rdev);
+ 	return rs690_startup(rdev);
+ }
+ 
+@@ -655,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
+ 
+ void rs690_fini(struct radeon_device *rdev)
+ {
+-	rs690_suspend(rdev);
+ 	r100_cp_fini(rdev);
+ 	r100_wb_fini(rdev);
+ 	r100_ib_fini(rdev);
+@@ -663,7 +683,7 @@ void rs690_fini(struct radeon_device *rdev)
+ 	rs400_gart_fini(rdev);
+ 	radeon_irq_kms_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+-	radeon_object_fini(rdev);
++	radeon_bo_fini(rdev);
+ 	radeon_atombios_fini(rdev);
+ 	kfree(rdev->bios);
+ 	rdev->bios = NULL;
+@@ -701,10 +721,9 @@ int rs690_init(struct radeon_device *rdev)
+ 			RREG32(R_0007C0_CP_STAT));
+ 	}
+ 	/* check if cards are posted or not */
+-	if (!radeon_card_posted(rdev) && rdev->bios) {
+-		DRM_INFO("GPU not posted. posting now...\n");
+-		atom_asic_init(rdev->mode_info.atom_context);
+-	}
++	if (radeon_boot_test_post_card(rdev) == false)
++		return -EINVAL;
++
+ 	/* Initialize clocks */
+ 	radeon_get_clock_info(rdev->ddev);
+ 	/* Initialize power management */
+@@ -712,7 +731,7 @@ int rs690_init(struct radeon_device *rdev)
+ 	/* Get vram informations */
+ 	rs690_vram_info(rdev);
+ 	/* Initialize memory controller (also test AGP) */
+-	r = r420_mc_init(rdev);
++	r = rs690_mc_init(rdev);
+ 	if (r)
+ 		return r;
+ 	rv515_debugfs(rdev);
+@@ -724,7 +743,7 @@ int rs690_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	/* Memory manager */
+-	r = radeon_object_init(rdev);
++	r = radeon_bo_init(rdev);
+ 	if (r)
+ 		return r;
+ 	r = rs400_gart_init(rdev);
+@@ -736,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+ 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+-		rs690_suspend(rdev);
+ 		r100_cp_fini(rdev);
+ 		r100_wb_fini(rdev);
+ 		r100_ib_fini(rdev);
+diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
+index ba68c9f..0e1e6b8 100644
+--- a/drivers/gpu/drm/radeon/rv515.c
++++ b/drivers/gpu/drm/radeon/rv515.c
+@@ -478,8 +478,8 @@ static int rv515_startup(struct radeon_device *rdev)
+ 			return r;
+ 	}
+ 	/* Enable IRQ */
+-	rdev->irq.sw_int = true;
+ 	rs600_irq_set(rdev);
++	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
+ 	/* 1M ring buffer */
+ 	r = r100_cp_init(rdev, 1024 * 1024);
+ 	if (r) {
+@@ -514,6 +514,8 @@ int rv515_resume(struct radeon_device *rdev)
+ 	atom_asic_init(rdev->mode_info.atom_context);
+ 	/* Resume clock after posting */
+ 	rv515_clock_startup(rdev);
++	/* Initialize surface registers */
++	radeon_surface_init(rdev);
+ 	return rv515_startup(rdev);
+ }
+ 
+@@ -535,16 +537,15 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
+ 
+ void rv515_fini(struct radeon_device *rdev)
+ {
+-	rv515_suspend(rdev);
+ 	r100_cp_fini(rdev);
+ 	r100_wb_fini(rdev);
+ 	r100_ib_fini(rdev);
+ 	radeon_gem_fini(rdev);
+-    rv370_pcie_gart_fini(rdev);
++	rv370_pcie_gart_fini(rdev);
+ 	radeon_agp_fini(rdev);
+ 	radeon_irq_kms_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+-	radeon_object_fini(rdev);
++	radeon_bo_fini(rdev);
+ 	radeon_atombios_fini(rdev);
+ 	kfree(rdev->bios);
+ 	rdev->bios = NULL;
+@@ -580,10 +581,8 @@ int rv515_init(struct radeon_device *rdev)
+ 			RREG32(R_0007C0_CP_STAT));
+ 	}
+ 	/* check if cards are posted or not */
+-	if (!radeon_card_posted(rdev) && rdev->bios) {
+-		DRM_INFO("GPU not posted. posting now...\n");
+-		atom_asic_init(rdev->mode_info.atom_context);
+-	}
++	if (radeon_boot_test_post_card(rdev) == false)
++		return -EINVAL;
+ 	/* Initialize clocks */
+ 	radeon_get_clock_info(rdev->ddev);
+ 	/* Initialize power management */
+@@ -603,7 +602,7 @@ int rv515_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	/* Memory manager */
+-	r = radeon_object_init(rdev);
++	r = radeon_bo_init(rdev);
+ 	if (r)
+ 		return r;
+ 	r = rv370_pcie_gart_init(rdev);
+@@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
+ 	if (r) {
+ 		/* Somethings want wront with the accel init stop accel */
+ 		dev_err(rdev->dev, "Disabling GPU acceleration\n");
+-		rv515_suspend(rdev);
+ 		r100_cp_fini(rdev);
+ 		r100_wb_fini(rdev);
+ 		r100_ib_fini(rdev);
++		radeon_irq_kms_fini(rdev);
+ 		rv370_pcie_gart_fini(rdev);
+ 		radeon_agp_fini(rdev);
+-		radeon_irq_kms_fini(rdev);
+ 		rdev->accel_working = false;
+ 	}
+ 	return 0;
+@@ -892,8 +890,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
+ 
+ 	b.full = rfixed_const(mode->crtc_hdisplay);
+ 	c.full = rfixed_const(256);
+-	a.full = rfixed_mul(wm->num_line_pair, b);
+-	request_fifo_depth.full = rfixed_div(a, c);
++	a.full = rfixed_div(b, c);
++	request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
++	request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
+ 	if (a.full < rfixed_const(4)) {
+ 		wm->lb_request_fifo_depth = 4;
+ 	} else {
+@@ -995,15 +994,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
+ 	a.full = rfixed_const(16);
+ 	wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
+ 	wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
++	wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
+ 
+ 	/* Determine estimated width */
+ 	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
+ 	estimated_width.full = rfixed_div(estimated_width, consumption_time);
+ 	if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
+-		wm->priority_mark.full = rfixed_const(10);
++		wm->priority_mark.full = wm->priority_mark_max.full;
+ 	} else {
+ 		a.full = rfixed_const(16);
+ 		wm->priority_mark.full = rfixed_div(estimated_width, a);
++		wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
+ 		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
+ 	}
+ }
+diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
+index b0efd0d..0302167 100644
+--- a/drivers/gpu/drm/radeon/rv770.c
++++ b/drivers/gpu/drm/radeon/rv770.c
+@@ -92,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
+ void rv770_pcie_gart_disable(struct radeon_device *rdev)
+ {
+ 	u32 tmp;
+-	int i;
++	int i, r;
+ 
+ 	/* Disable all tables */
+ 	for (i = 0; i < 7; i++)
+@@ -113,8 +113,12 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
+ 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ 	if (rdev->gart.table.vram.robj) {
+-		radeon_object_kunmap(rdev->gart.table.vram.robj);
+-		radeon_object_unpin(rdev->gart.table.vram.robj);
++		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
++		if (likely(r == 0)) {
++			radeon_bo_kunmap(rdev->gart.table.vram.robj);
++			radeon_bo_unpin(rdev->gart.table.vram.robj);
++			radeon_bo_unreserve(rdev->gart.table.vram.robj);
++		}
+ 	}
+ }
+ 
+@@ -545,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
+ 
+ 	gb_tiling_config |= BANK_SWAPS(1);
+ 
+-	backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
+-							rdev->config.rv770.max_backends,
+-							(0xff << rdev->config.rv770.max_backends) & 0xff);
++	if (rdev->family == CHIP_RV740)
++		backend_map = 0x28;
++	else
++		backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
++								rdev->config.rv770.max_backends,
++								(0xff << rdev->config.rv770.max_backends) & 0xff);
+ 	gb_tiling_config |= BACKEND_MAP(backend_map);
+ 
+ 	cc_gc_shader_pipe_config =
+@@ -775,7 +782,6 @@ int rv770_mc_init(struct radeon_device *rdev)
+ 	fixed20_12 a;
+ 	u32 tmp;
+ 	int chansize, numchan;
+-	int r;
+ 
+ 	/* Get VRAM informations */
+ 	rdev->mc.vram_is_ddr = true;
+@@ -818,9 +824,6 @@ int rv770_mc_init(struct radeon_device *rdev)
+ 		rdev->mc.real_vram_size = rdev->mc.aper_size;
+ 
+ 	if (rdev->flags & RADEON_IS_AGP) {
+-		r = radeon_agp_init(rdev);
+-		if (r)
+-			return r;
+ 		/* gtt_size is setup by radeon_agp_init */
+ 		rdev->mc.gtt_location = rdev->mc.agp_base;
+ 		tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
+@@ -829,11 +832,11 @@ int rv770_mc_init(struct radeon_device *rdev)
+ 		 * AGP so that GPU can catch out of VRAM/AGP access
+ 		 */
+ 		if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
+-			/* Enought place before */
++			/* Enough place before */
+ 			rdev->mc.vram_location = rdev->mc.gtt_location -
+ 							rdev->mc.mc_vram_size;
+ 		} else if (tmp > rdev->mc.mc_vram_size) {
+-			/* Enought place after */
++			/* Enough place after */
+ 			rdev->mc.vram_location = rdev->mc.gtt_location +
+ 							rdev->mc.gtt_size;
+ 		} else {
+@@ -870,6 +873,14 @@ static int rv770_startup(struct radeon_device *rdev)
+ {
+ 	int r;
+ 
++	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
++		r = r600_init_microcode(rdev);
++		if (r) {
++			DRM_ERROR("Failed to load firmware!\n");
++			return r;
++		}
++	}
++
+ 	rv770_mc_program(rdev);
+ 	if (rdev->flags & RADEON_IS_AGP) {
+ 		rv770_agp_enable(rdev);
+@@ -879,13 +890,33 @@ static int rv770_startup(struct radeon_device *rdev)
+ 			return r;
+ 	}
+ 	rv770_gpu_init(rdev);
+-
+-	r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+-			      &rdev->r600_blit.shader_gpu_addr);
++	r = r600_blit_init(rdev);
+ 	if (r) {
+-		DRM_ERROR("failed to pin blit object %d\n", r);
++		r600_blit_fini(rdev);
++		rdev->asic->copy = NULL;
++		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
++	}
++	/* pin copy shader into vram */
++	if (rdev->r600_blit.shader_obj) {
++		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
++		if (unlikely(r != 0))
++			return r;
++		r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
++				&rdev->r600_blit.shader_gpu_addr);
++		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
++		if (r) {
++			DRM_ERROR("failed to pin blit object %d\n", r);
++			return r;
++		}
++	}
++	/* Enable IRQ */
++	r = r600_irq_init(rdev);
++	if (r) {
++		DRM_ERROR("radeon: IH init failed (%d).\n", r);
++		radeon_irq_kms_fini(rdev);
+ 		return r;
+ 	}
++	r600_irq_set(rdev);
+ 
+ 	r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ 	if (r)
+@@ -934,13 +965,22 @@ int rv770_resume(struct radeon_device *rdev)
+ 
+ int rv770_suspend(struct radeon_device *rdev)
+ {
++	int r;
++
+ 	/* FIXME: we should wait for ring to be empty */
+ 	r700_cp_stop(rdev);
+ 	rdev->cp.ready = false;
++	r600_irq_suspend(rdev);
+ 	r600_wb_disable(rdev);
+ 	rv770_pcie_gart_disable(rdev);
+ 	/* unpin shaders bo */
+-        radeon_object_unpin(rdev->r600_blit.shader_obj);
++	if (rdev->r600_blit.shader_obj) {
++		r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
++		if (likely(r == 0)) {
++			radeon_bo_unpin(rdev->r600_blit.shader_obj);
++			radeon_bo_unreserve(rdev->r600_blit.shader_obj);
++		}
++	}
+ 	return 0;
+ }
+ 
+@@ -975,7 +1015,11 @@ int rv770_init(struct radeon_device *rdev)
+ 	if (r)
+ 		return r;
+ 	/* Post card if necessary */
+-	if (!r600_card_posted(rdev) && rdev->bios) {
++	if (!r600_card_posted(rdev)) {
++		if (!rdev->bios) {
++			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
++			return -EINVAL;
++		}
+ 		DRM_INFO("GPU not posted. posting now...\n");
+ 		atom_asic_init(rdev->mode_info.atom_context);
+ 	}
+@@ -994,53 +1038,55 @@ int rv770_init(struct radeon_device *rdev)
+ 	r = radeon_fence_driver_init(rdev);
+ 	if (r)
+ 		return r;
++	if (rdev->flags & RADEON_IS_AGP) {
++		r = radeon_agp_init(rdev);
++		if (r)
++			radeon_agp_disable(rdev);
++	}
+ 	r = rv770_mc_init(rdev);
+ 	if (r)
+ 		return r;
+ 	/* Memory manager */
+-	r = radeon_object_init(rdev);
++	r = radeon_bo_init(rdev);
+ 	if (r)
+ 		return r;
++
++	r = radeon_irq_kms_init(rdev);
++	if (r)
++		return r;
++
+ 	rdev->cp.ring_obj = NULL;
+ 	r600_ring_init(rdev, 1024 * 1024);
+ 
+-	if (!rdev->me_fw || !rdev->pfp_fw) {
+-		r = r600_cp_init_microcode(rdev);
+-		if (r) {
+-			DRM_ERROR("Failed to load firmware!\n");
+-			return r;
+-		}
+-	}
++	rdev->ih.ring_obj = NULL;
++	r600_ih_ring_init(rdev, 64 * 1024);
+ 
+ 	r = r600_pcie_gart_init(rdev);
+ 	if (r)
+ 		return r;
+ 
+ 	rdev->accel_working = true;
+-	r = r600_blit_init(rdev);
+-	if (r) {
+-		DRM_ERROR("radeon: failled blitter (%d).\n", r);
+-		rdev->accel_working = false;
+-	}
+-
+ 	r = rv770_startup(rdev);
+ 	if (r) {
+-		rv770_suspend(rdev);
++		dev_err(rdev->dev, "disabling GPU acceleration\n");
++		r600_cp_fini(rdev);
+ 		r600_wb_fini(rdev);
+-		radeon_ring_fini(rdev);
++		r600_irq_fini(rdev);
++		radeon_irq_kms_fini(rdev);
+ 		rv770_pcie_gart_fini(rdev);
+ 		rdev->accel_working = false;
+ 	}
+ 	if (rdev->accel_working) {
+ 		r = radeon_ib_pool_init(rdev);
+ 		if (r) {
+-			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+-			rdev->accel_working = false;
+-		}
+-		r = r600_ib_test(rdev);
+-		if (r) {
+-			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
++			dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ 			rdev->accel_working = false;
++		} else {
++			r = r600_ib_test(rdev);
++			if (r) {
++				dev_err(rdev->dev, "IB test failed (%d).\n", r);
++				rdev->accel_working = false;
++			}
+ 		}
+ 	}
+ 	return 0;
+@@ -1048,18 +1094,17 @@ int rv770_init(struct radeon_device *rdev)
+ 
+ void rv770_fini(struct radeon_device *rdev)
+ {
+-	rv770_suspend(rdev);
+-
+ 	r600_blit_fini(rdev);
+-	radeon_ring_fini(rdev);
++	r600_cp_fini(rdev);
+ 	r600_wb_fini(rdev);
++	r600_irq_fini(rdev);
++	radeon_irq_kms_fini(rdev);
+ 	rv770_pcie_gart_fini(rdev);
+ 	radeon_gem_fini(rdev);
+ 	radeon_fence_driver_fini(rdev);
+ 	radeon_clocks_fini(rdev);
+-	if (rdev->flags & RADEON_IS_AGP)
+-		radeon_agp_fini(rdev);
+-	radeon_object_fini(rdev);
++	radeon_agp_fini(rdev);
++	radeon_bo_fini(rdev);
+ 	radeon_atombios_fini(rdev);
+ 	kfree(rdev->bios);
+ 	rdev->bios = NULL;
+diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
+index eee52aa..021de44 100644
+--- a/drivers/gpu/drm/savage/savage_drv.c
++++ b/drivers/gpu/drm/savage/savage_drv.c
+@@ -50,7 +50,7 @@ static struct drm_driver driver = {
+ 		 .owner = THIS_MODULE,
+ 		 .open = drm_open,
+ 		 .release = drm_release,
+-		 .ioctl = drm_ioctl,
++		 .unlocked_ioctl = drm_ioctl,
+ 		 .mmap = drm_mmap,
+ 		 .poll = drm_poll,
+ 		 .fasync = drm_fasync,
+diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
+index e725cc0..4fd1f06 100644
+--- a/drivers/gpu/drm/sis/sis_drv.c
++++ b/drivers/gpu/drm/sis/sis_drv.c
+@@ -80,7 +80,7 @@ static struct drm_driver driver = {
+ 		 .owner = THIS_MODULE,
+ 		 .open = drm_open,
+ 		 .release = drm_release,
+-		 .ioctl = drm_ioctl,
++		 .unlocked_ioctl = drm_ioctl,
+ 		 .mmap = drm_mmap,
+ 		 .poll = drm_poll,
+ 		 .fasync = drm_fasync,
+diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
+index 012ff2e..ec5a43e 100644
+--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
++++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
+@@ -48,7 +48,7 @@ static struct drm_driver driver = {
+ 		 .owner = THIS_MODULE,
+ 		 .open = drm_open,
+ 		 .release = drm_release,
+-		 .ioctl = drm_ioctl,
++		 .unlocked_ioctl = drm_ioctl,
+ 		 .mmap = drm_mmap,
+ 		 .poll = drm_poll,
+ 		 .fasync = drm_fasync,
+diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
+index b0a9de7..1e138f5 100644
+--- a/drivers/gpu/drm/ttm/Makefile
++++ b/drivers/gpu/drm/ttm/Makefile
+@@ -3,6 +3,7 @@
+ 
+ ccflags-y := -Iinclude/drm
+ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
+-	ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
++	ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
++	ttm_object.o ttm_lock.o ttm_execbuf_util.o
+ 
+ obj-$(CONFIG_DRM_TTM) += ttm.o
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 87c0625..c7320ce 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -27,6 +27,14 @@
+ /*
+  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+  */
++/* Notes:
++ *
++ * We store bo pointer in drm_mm_node struct so we know which bo own a
++ * specific node. There is no protection on the pointer, thus to make
++ * sure things don't go berserk you have to access this pointer while
++ * holding the global lru lock and make sure anytime you free a node you
++ * reset the pointer to NULL.
++ */
+ 
+ #include "ttm/ttm_module.h"
+ #include "ttm/ttm_bo_driver.h"
+@@ -51,6 +59,59 @@ static struct attribute ttm_bo_count = {
+ 	.mode = S_IRUGO
+ };
+ 
++static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
++{
++	int i;
++
++	for (i = 0; i <= TTM_PL_PRIV5; i++)
++		if (flags & (1 << i)) {
++			*mem_type = i;
++			return 0;
++		}
++	return -EINVAL;
++}
++
++static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
++{
++	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++
++	printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
++	printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
++	printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
++	printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
++	printk(KERN_ERR TTM_PFX "    io_offset: 0x%08lX\n", man->io_offset);
++	printk(KERN_ERR TTM_PFX "    io_size: %ld\n", man->io_size);
++	printk(KERN_ERR TTM_PFX "    size: %llu\n", man->size);
++	printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
++		man->available_caching);
++	printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
++		man->default_caching);
++	if (mem_type != TTM_PL_SYSTEM) {
++		spin_lock(&bdev->glob->lru_lock);
++		drm_mm_debug_table(&man->manager, TTM_PFX);
++		spin_unlock(&bdev->glob->lru_lock);
++	}
++}
++
++static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
++					struct ttm_placement *placement)
++{
++	int i, ret, mem_type;
++
++	printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
++		bo, bo->mem.num_pages, bo->mem.size >> 10,
++		bo->mem.size >> 20);
++	for (i = 0; i < placement->num_placement; i++) {
++		ret = ttm_mem_type_from_flags(placement->placement[i],
++						&mem_type);
++		if (ret)
++			return;
++		printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
++			i, placement->placement[i], mem_type);
++		ttm_mem_type_debug(bo->bdev, mem_type);
++	}
++}
++
+ static ssize_t ttm_bo_global_show(struct kobject *kobj,
+ 				  struct attribute *attr,
+ 				  char *buffer)
+@@ -117,12 +178,13 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
+ 		ret = wait_event_interruptible(bo->event_queue,
+ 					       atomic_read(&bo->reserved) == 0);
+ 		if (unlikely(ret != 0))
+-			return -ERESTART;
++			return ret;
+ 	} else {
+ 		wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+ 	}
+ 	return 0;
+ }
++EXPORT_SYMBOL(ttm_bo_wait_unreserved);
+ 
+ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+ {
+@@ -247,7 +309,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
+ /*
+  * Call bo->mutex locked.
+  */
+-
+ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+ {
+ 	struct ttm_bo_device *bdev = bo->bdev;
+@@ -275,9 +336,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+ 		bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ 					page_flags | TTM_PAGE_FLAG_USER,
+ 					glob->dummy_read_page);
+-		if (unlikely(bo->ttm == NULL))
++		if (unlikely(bo->ttm == NULL)) {
+ 			ret = -ENOMEM;
+-		break;
++			break;
++		}
+ 
+ 		ret = ttm_tt_set_user(bo->ttm, current,
+ 				      bo->buffer_start, bo->num_pages);
+@@ -328,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+ 		}
+ 
+ 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+-
+-			struct ttm_mem_reg *old_mem = &bo->mem;
+-			uint32_t save_flags = old_mem->placement;
+-
+-			*old_mem = *mem;
++			bo->mem = *mem;
+ 			mem->mm_node = NULL;
+-			ttm_flag_masked(&save_flags, mem->placement,
+-					TTM_PL_MASK_MEMTYPE);
+ 			goto moved;
+ 		}
+ 
+@@ -370,7 +426,8 @@ moved:
+ 		    bdev->man[bo->mem.mem_type].gpu_offset;
+ 		bo->cur_placement = bo->mem.placement;
+ 		spin_unlock(&bo->lock);
+-	}
++	} else
++		bo->offset = 0;
+ 
+ 	return 0;
+ 
+@@ -408,6 +465,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
+ 		spin_unlock(&bo->lock);
+ 
+ 		spin_lock(&glob->lru_lock);
++		put_count = ttm_bo_del_from_lru(bo);
++
+ 		ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
+ 		BUG_ON(ret);
+ 		if (bo->ttm)
+@@ -415,19 +474,19 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
+ 
+ 		if (!list_empty(&bo->ddestroy)) {
+ 			list_del_init(&bo->ddestroy);
+-			kref_put(&bo->list_kref, ttm_bo_ref_bug);
++			++put_count;
+ 		}
+ 		if (bo->mem.mm_node) {
++			bo->mem.mm_node->private = NULL;
+ 			drm_mm_put_block(bo->mem.mm_node);
+ 			bo->mem.mm_node = NULL;
+ 		}
+-		put_count = ttm_bo_del_from_lru(bo);
+ 		spin_unlock(&glob->lru_lock);
+ 
+ 		atomic_set(&bo->reserved, 0);
+ 
+ 		while (put_count--)
+-			kref_put(&bo->list_kref, ttm_bo_release_list);
++			kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ 
+ 		return 0;
+ 	}
+@@ -465,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
+ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+ {
+ 	struct ttm_bo_global *glob = bdev->glob;
+-	struct ttm_buffer_object *entry, *nentry;
+-	struct list_head *list, *next;
+-	int ret;
++	struct ttm_buffer_object *entry = NULL;
++	int ret = 0;
+ 
+ 	spin_lock(&glob->lru_lock);
+-	list_for_each_safe(list, next, &bdev->ddestroy) {
+-		entry = list_entry(list, struct ttm_buffer_object, ddestroy);
+-		nentry = NULL;
++	if (list_empty(&bdev->ddestroy))
++		goto out_unlock;
+ 
+-		/*
+-		 * Protect the next list entry from destruction while we
+-		 * unlock the lru_lock.
+-		 */
++	entry = list_first_entry(&bdev->ddestroy,
++		struct ttm_buffer_object, ddestroy);
++	kref_get(&entry->list_kref);
+ 
+-		if (next != &bdev->ddestroy) {
+-			nentry = list_entry(next, struct ttm_buffer_object,
+-					    ddestroy);
++	for (;;) {
++		struct ttm_buffer_object *nentry = NULL;
++
++		if (entry->ddestroy.next != &bdev->ddestroy) {
++			nentry = list_first_entry(&entry->ddestroy,
++				struct ttm_buffer_object, ddestroy);
+ 			kref_get(&nentry->list_kref);
+ 		}
+-		kref_get(&entry->list_kref);
+ 
+ 		spin_unlock(&glob->lru_lock);
+ 		ret = ttm_bo_cleanup_refs(entry, remove_all);
+ 		kref_put(&entry->list_kref, ttm_bo_release_list);
++		entry = nentry;
++
++		if (ret || !entry)
++			goto out;
+ 
+ 		spin_lock(&glob->lru_lock);
+-		if (nentry) {
+-			bool next_onlist = !list_empty(next);
+-			spin_unlock(&glob->lru_lock);
+-			kref_put(&nentry->list_kref, ttm_bo_release_list);
+-			spin_lock(&glob->lru_lock);
+-			/*
+-			 * Someone might have raced us and removed the
+-			 * next entry from the list. We don't bother restarting
+-			 * list traversal.
+-			 */
+-
+-			if (!next_onlist)
+-				break;
+-		}
+-		if (ret)
++		if (list_empty(&entry->ddestroy))
+ 			break;
+ 	}
+-	ret = !list_empty(&bdev->ddestroy);
+-	spin_unlock(&glob->lru_lock);
+ 
++out_unlock:
++	spin_unlock(&glob->lru_lock);
++out:
++	if (entry)
++		kref_put(&entry->list_kref, ttm_bo_release_list);
+ 	return ret;
+ }
+ 
+@@ -554,24 +605,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
+ }
+ EXPORT_SYMBOL(ttm_bo_unref);
+ 
+-static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
+-			bool interruptible, bool no_wait)
++static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
++			bool no_wait)
+ {
+-	int ret = 0;
+ 	struct ttm_bo_device *bdev = bo->bdev;
+ 	struct ttm_bo_global *glob = bo->glob;
+ 	struct ttm_mem_reg evict_mem;
+-	uint32_t proposed_placement;
+-
+-	if (bo->mem.mem_type != mem_type)
+-		goto out;
++	struct ttm_placement placement;
++	int ret = 0;
+ 
+ 	spin_lock(&bo->lock);
+ 	ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ 	spin_unlock(&bo->lock);
+ 
+ 	if (unlikely(ret != 0)) {
+-		if (ret != -ERESTART) {
++		if (ret != -ERESTARTSYS) {
+ 			printk(KERN_ERR TTM_PFX
+ 			       "Failed to expire sync object before "
+ 			       "buffer eviction.\n");
+@@ -584,116 +632,165 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
+ 	evict_mem = bo->mem;
+ 	evict_mem.mm_node = NULL;
+ 
+-	proposed_placement = bdev->driver->evict_flags(bo);
+-
+-	ret = ttm_bo_mem_space(bo, proposed_placement,
+-			       &evict_mem, interruptible, no_wait);
+-	if (unlikely(ret != 0 && ret != -ERESTART))
+-		ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
+-				       &evict_mem, interruptible, no_wait);
+-
++	placement.fpfn = 0;
++	placement.lpfn = 0;
++	placement.num_placement = 0;
++	placement.num_busy_placement = 0;
++	bdev->driver->evict_flags(bo, &placement);
++	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
++				no_wait);
+ 	if (ret) {
+-		if (ret != -ERESTART)
++		if (ret != -ERESTARTSYS) {
+ 			printk(KERN_ERR TTM_PFX
+ 			       "Failed to find memory space for "
+ 			       "buffer 0x%p eviction.\n", bo);
++			ttm_bo_mem_space_debug(bo, &placement);
++		}
+ 		goto out;
+ 	}
+ 
+ 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
+ 				     no_wait);
+ 	if (ret) {
+-		if (ret != -ERESTART)
++		if (ret != -ERESTARTSYS)
+ 			printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
++		spin_lock(&glob->lru_lock);
++		if (evict_mem.mm_node) {
++			evict_mem.mm_node->private = NULL;
++			drm_mm_put_block(evict_mem.mm_node);
++			evict_mem.mm_node = NULL;
++		}
++		spin_unlock(&glob->lru_lock);
+ 		goto out;
+ 	}
+-
+-	spin_lock(&glob->lru_lock);
+-	if (evict_mem.mm_node) {
+-		drm_mm_put_block(evict_mem.mm_node);
+-		evict_mem.mm_node = NULL;
+-	}
+-	spin_unlock(&glob->lru_lock);
+ 	bo->evicted = true;
+ out:
+ 	return ret;
+ }
+ 
+-/**
+- * Repeatedly evict memory from the LRU for @mem_type until we create enough
+- * space, or we've evicted everything and there isn't enough space.
+- */
+-static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
+-				  struct ttm_mem_reg *mem,
+-				  uint32_t mem_type,
+-				  bool interruptible, bool no_wait)
++static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
++				uint32_t mem_type,
++				bool interruptible, bool no_wait)
+ {
+ 	struct ttm_bo_global *glob = bdev->glob;
+-	struct drm_mm_node *node;
+-	struct ttm_buffer_object *entry;
+ 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+-	struct list_head *lru;
+-	unsigned long num_pages = mem->num_pages;
+-	int put_count = 0;
+-	int ret;
+-
+-retry_pre_get:
+-	ret = drm_mm_pre_get(&man->manager);
+-	if (unlikely(ret != 0))
+-		return ret;
++	struct ttm_buffer_object *bo;
++	int ret, put_count = 0;
+ 
++retry:
+ 	spin_lock(&glob->lru_lock);
+-	do {
+-		node = drm_mm_search_free(&man->manager, num_pages,
+-					  mem->page_alignment, 1);
+-		if (node)
+-			break;
++	if (list_empty(&man->lru)) {
++		spin_unlock(&glob->lru_lock);
++		return -EBUSY;
++	}
+ 
+-		lru = &man->lru;
+-		if (list_empty(lru))
+-			break;
++	bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
++	kref_get(&bo->list_kref);
+ 
+-		entry = list_first_entry(lru, struct ttm_buffer_object, lru);
+-		kref_get(&entry->list_kref);
++	ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ 
+-		ret =
+-		    ttm_bo_reserve_locked(entry, interruptible, no_wait,
+-					  false, 0);
++	if (unlikely(ret == -EBUSY)) {
++		spin_unlock(&glob->lru_lock);
++		if (likely(!no_wait))
++			ret = ttm_bo_wait_unreserved(bo, interruptible);
+ 
+-		if (likely(ret == 0))
+-			put_count = ttm_bo_del_from_lru(entry);
++		kref_put(&bo->list_kref, ttm_bo_release_list);
+ 
+-		spin_unlock(&glob->lru_lock);
++		/**
++		 * We *need* to retry after releasing the lru lock.
++		 */
+ 
+ 		if (unlikely(ret != 0))
+ 			return ret;
++		goto retry;
++	}
+ 
+-		while (put_count--)
+-			kref_put(&entry->list_kref, ttm_bo_ref_bug);
++	put_count = ttm_bo_del_from_lru(bo);
++	spin_unlock(&glob->lru_lock);
++
++	BUG_ON(ret != 0);
++
++	while (put_count--)
++		kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ 
+-		ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
++	ret = ttm_bo_evict(bo, interruptible, no_wait);
++	ttm_bo_unreserve(bo);
+ 
+-		ttm_bo_unreserve(entry);
++	kref_put(&bo->list_kref, ttm_bo_release_list);
++	return ret;
++}
+ 
+-		kref_put(&entry->list_kref, ttm_bo_release_list);
+-		if (ret)
++static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
++				struct ttm_mem_type_manager *man,
++				struct ttm_placement *placement,
++				struct ttm_mem_reg *mem,
++				struct drm_mm_node **node)
++{
++	struct ttm_bo_global *glob = bo->glob;
++	unsigned long lpfn;
++	int ret;
++
++	lpfn = placement->lpfn;
++	if (!lpfn)
++		lpfn = man->size;
++	*node = NULL;
++	do {
++		ret = drm_mm_pre_get(&man->manager);
++		if (unlikely(ret))
+ 			return ret;
+ 
+ 		spin_lock(&glob->lru_lock);
+-	} while (1);
+-
+-	if (!node) {
++		*node = drm_mm_search_free_in_range(&man->manager,
++					mem->num_pages, mem->page_alignment,
++					placement->fpfn, lpfn, 1);
++		if (unlikely(*node == NULL)) {
++			spin_unlock(&glob->lru_lock);
++			return 0;
++		}
++		*node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
++							mem->page_alignment,
++							placement->fpfn,
++							lpfn);
+ 		spin_unlock(&glob->lru_lock);
+-		return -ENOMEM;
+-	}
++	} while (*node == NULL);
++	return 0;
++}
+ 
+-	node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
+-	if (unlikely(!node)) {
+-		spin_unlock(&glob->lru_lock);
+-		goto retry_pre_get;
+-	}
++/**
++ * Repeatedly evict memory from the LRU for @mem_type until we create enough
++ * space, or we've evicted everything and there isn't enough space.
++ */
++static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
++					uint32_t mem_type,
++					struct ttm_placement *placement,
++					struct ttm_mem_reg *mem,
++					bool interruptible, bool no_wait)
++{
++	struct ttm_bo_device *bdev = bo->bdev;
++	struct ttm_bo_global *glob = bdev->glob;
++	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
++	struct drm_mm_node *node;
++	int ret;
+ 
+-	spin_unlock(&glob->lru_lock);
++	do {
++		ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
++		if (unlikely(ret != 0))
++			return ret;
++		if (node)
++			break;
++		spin_lock(&glob->lru_lock);
++		if (list_empty(&man->lru)) {
++			spin_unlock(&glob->lru_lock);
++			break;
++		}
++		spin_unlock(&glob->lru_lock);
++		ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
++						no_wait);
++		if (unlikely(ret != 0))
++			return ret;
++	} while (1);
++	if (node == NULL)
++		return -ENOMEM;
+ 	mem->mm_node = node;
+ 	mem->mem_type = mem_type;
+ 	return 0;
+@@ -724,7 +821,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
+ 	return result;
+ }
+ 
+-
+ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
+ 				 bool disallow_fixed,
+ 				 uint32_t mem_type,
+@@ -757,66 +853,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
+  * space.
+  */
+ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+-		     uint32_t proposed_placement,
+-		     struct ttm_mem_reg *mem,
+-		     bool interruptible, bool no_wait)
++			struct ttm_placement *placement,
++			struct ttm_mem_reg *mem,
++			bool interruptible, bool no_wait)
+ {
+ 	struct ttm_bo_device *bdev = bo->bdev;
+-	struct ttm_bo_global *glob = bo->glob;
+ 	struct ttm_mem_type_manager *man;
+-
+-	uint32_t num_prios = bdev->driver->num_mem_type_prio;
+-	const uint32_t *prios = bdev->driver->mem_type_prio;
+-	uint32_t i;
+ 	uint32_t mem_type = TTM_PL_SYSTEM;
+ 	uint32_t cur_flags = 0;
+ 	bool type_found = false;
+ 	bool type_ok = false;
+-	bool has_eagain = false;
++	bool has_erestartsys = false;
+ 	struct drm_mm_node *node = NULL;
+-	int ret;
++	int i, ret;
+ 
+ 	mem->mm_node = NULL;
+-	for (i = 0; i < num_prios; ++i) {
+-		mem_type = prios[i];
++	for (i = 0; i < placement->num_placement; ++i) {
++		ret = ttm_mem_type_from_flags(placement->placement[i],
++						&mem_type);
++		if (ret)
++			return ret;
+ 		man = &bdev->man[mem_type];
+ 
+ 		type_ok = ttm_bo_mt_compatible(man,
+-					       bo->type == ttm_bo_type_user,
+-					       mem_type, proposed_placement,
+-					       &cur_flags);
++						bo->type == ttm_bo_type_user,
++						mem_type,
++						placement->placement[i],
++						&cur_flags);
+ 
+ 		if (!type_ok)
+ 			continue;
+ 
+ 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+ 						  cur_flags);
++		/*
++		 * Use the access and other non-mapping-related flag bits from
++		 * the memory placement flags to the current flags
++		 */
++		ttm_flag_masked(&cur_flags, placement->placement[i],
++				~TTM_PL_MASK_MEMTYPE);
+ 
+ 		if (mem_type == TTM_PL_SYSTEM)
+ 			break;
+ 
+ 		if (man->has_type && man->use_type) {
+ 			type_found = true;
+-			do {
+-				ret = drm_mm_pre_get(&man->manager);
+-				if (unlikely(ret))
+-					return ret;
+-
+-				spin_lock(&glob->lru_lock);
+-				node = drm_mm_search_free(&man->manager,
+-							  mem->num_pages,
+-							  mem->page_alignment,
+-							  1);
+-				if (unlikely(!node)) {
+-					spin_unlock(&glob->lru_lock);
+-					break;
+-				}
+-				node = drm_mm_get_block_atomic(node,
+-							       mem->num_pages,
+-							       mem->
+-							       page_alignment);
+-				spin_unlock(&glob->lru_lock);
+-			} while (!node);
++			ret = ttm_bo_man_get_node(bo, man, placement, mem,
++							&node);
++			if (unlikely(ret))
++				return ret;
+ 		}
+ 		if (node)
+ 			break;
+@@ -826,67 +911,74 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ 		mem->mm_node = node;
+ 		mem->mem_type = mem_type;
+ 		mem->placement = cur_flags;
++		if (node)
++			node->private = bo;
+ 		return 0;
+ 	}
+ 
+ 	if (!type_found)
+ 		return -EINVAL;
+ 
+-	num_prios = bdev->driver->num_mem_busy_prio;
+-	prios = bdev->driver->mem_busy_prio;
+-
+-	for (i = 0; i < num_prios; ++i) {
+-		mem_type = prios[i];
++	for (i = 0; i < placement->num_busy_placement; ++i) {
++		ret = ttm_mem_type_from_flags(placement->busy_placement[i],
++						&mem_type);
++		if (ret)
++			return ret;
+ 		man = &bdev->man[mem_type];
+-
+ 		if (!man->has_type)
+ 			continue;
+-
+ 		if (!ttm_bo_mt_compatible(man,
+-					  bo->type == ttm_bo_type_user,
+-					  mem_type,
+-					  proposed_placement, &cur_flags))
++						bo->type == ttm_bo_type_user,
++						mem_type,
++						placement->busy_placement[i],
++						&cur_flags))
+ 			continue;
+ 
+ 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
+ 						  cur_flags);
++		/*
++		 * Use the access and other non-mapping-related flag bits from
++		 * the memory placement flags to the current flags
++		 */
++		ttm_flag_masked(&cur_flags, placement->busy_placement[i],
++				~TTM_PL_MASK_MEMTYPE);
+ 
+-		ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
+-					     interruptible, no_wait);
+ 
+-		if (ret == 0 && mem->mm_node) {
++		if (mem_type == TTM_PL_SYSTEM) {
++			mem->mem_type = mem_type;
+ 			mem->placement = cur_flags;
++			mem->mm_node = NULL;
+ 			return 0;
+ 		}
+ 
+-		if (ret == -ERESTART)
+-			has_eagain = true;
++		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
++						interruptible, no_wait);
++		if (ret == 0 && mem->mm_node) {
++			mem->placement = cur_flags;
++			mem->mm_node->private = bo;
++			return 0;
++		}
++		if (ret == -ERESTARTSYS)
++			has_erestartsys = true;
+ 	}
+-
+-	ret = (has_eagain) ? -ERESTART : -ENOMEM;
++	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
+ 	return ret;
+ }
+ EXPORT_SYMBOL(ttm_bo_mem_space);
+ 
+ int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
+ {
+-	int ret = 0;
+-
+ 	if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
+ 		return -EBUSY;
+ 
+-	ret = wait_event_interruptible(bo->event_queue,
+-				       atomic_read(&bo->cpu_writers) == 0);
+-
+-	if (ret == -ERESTARTSYS)
+-		ret = -ERESTART;
+-
+-	return ret;
++	return wait_event_interruptible(bo->event_queue,
++					atomic_read(&bo->cpu_writers) == 0);
+ }
++EXPORT_SYMBOL(ttm_bo_wait_cpu);
+ 
+ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+-		       uint32_t proposed_placement,
+-		       bool interruptible, bool no_wait)
++			struct ttm_placement *placement,
++			bool interruptible, bool no_wait)
+ {
+ 	struct ttm_bo_global *glob = bo->glob;
+ 	int ret = 0;
+@@ -899,147 +991,138 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+ 	 * Have the driver move function wait for idle when necessary,
+ 	 * instead of doing it here.
+ 	 */
+-
+ 	spin_lock(&bo->lock);
+ 	ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ 	spin_unlock(&bo->lock);
+-
+ 	if (ret)
+ 		return ret;
+-
+ 	mem.num_pages = bo->num_pages;
+ 	mem.size = mem.num_pages << PAGE_SHIFT;
+ 	mem.page_alignment = bo->mem.page_alignment;
+-
+ 	/*
+ 	 * Determine where to move the buffer.
+ 	 */
+-
+-	ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
+-			       interruptible, no_wait);
++	ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
+ 	if (ret)
+ 		goto out_unlock;
+-
+ 	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+-
+ out_unlock:
+ 	if (ret && mem.mm_node) {
+ 		spin_lock(&glob->lru_lock);
++		mem.mm_node->private = NULL;
+ 		drm_mm_put_block(mem.mm_node);
+ 		spin_unlock(&glob->lru_lock);
+ 	}
+ 	return ret;
+ }
+ 
+-static int ttm_bo_mem_compat(uint32_t proposed_placement,
++static int ttm_bo_mem_compat(struct ttm_placement *placement,
+ 			     struct ttm_mem_reg *mem)
+ {
+-	if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
+-		return 0;
+-	if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
+-		return 0;
+-
+-	return 1;
++	int i;
++	struct drm_mm_node *node = mem->mm_node;
++
++	if (node && placement->lpfn != 0 &&
++	    (node->start < placement->fpfn ||
++	     node->start + node->size > placement->lpfn))
++		return -1;
++
++	for (i = 0; i < placement->num_placement; i++) {
++		if ((placement->placement[i] & mem->placement &
++			TTM_PL_MASK_CACHING) &&
++			(placement->placement[i] & mem->placement &
++			TTM_PL_MASK_MEM))
++			return i;
++	}
++	return -1;
+ }
+ 
+-int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
+-			       uint32_t proposed_placement,
+-			       bool interruptible, bool no_wait)
++int ttm_bo_validate(struct ttm_buffer_object *bo,
++			struct ttm_placement *placement,
++			bool interruptible, bool no_wait)
+ {
+ 	int ret;
+ 
+ 	BUG_ON(!atomic_read(&bo->reserved));
+-	bo->proposed_placement = proposed_placement;
+-
+-	TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
+-		  (unsigned long)proposed_placement,
+-		  (unsigned long)bo->mem.placement);
+-
++	/* Check that range is valid */
++	if (placement->lpfn || placement->fpfn)
++		if (placement->fpfn > placement->lpfn ||
++			(placement->lpfn - placement->fpfn) < bo->num_pages)
++			return -EINVAL;
+ 	/*
+ 	 * Check whether we need to move buffer.
+ 	 */
+-
+-	if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
+-		ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
+-					 interruptible, no_wait);
+-		if (ret) {
+-			if (ret != -ERESTART)
+-				printk(KERN_ERR TTM_PFX
+-				       "Failed moving buffer. "
+-				       "Proposed placement 0x%08x\n",
+-				       bo->proposed_placement);
+-			if (ret == -ENOMEM)
+-				printk(KERN_ERR TTM_PFX
+-				       "Out of aperture space or "
+-				       "DRM memory quota.\n");
++	ret = ttm_bo_mem_compat(placement, &bo->mem);
++	if (ret < 0) {
++		ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
++		if (ret)
+ 			return ret;
+-		}
++	} else {
++		/*
++		 * Use the access and other non-mapping-related flag bits from
++		 * the compatible memory placement flags to the active flags
++		 */
++		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
++				~TTM_PL_MASK_MEMTYPE);
+ 	}
+-
+ 	/*
+ 	 * We might need to add a TTM.
+ 	 */
+-
+ 	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ 		ret = ttm_bo_add_ttm(bo, true);
+ 		if (ret)
+ 			return ret;
+ 	}
+-	/*
+-	 * Validation has succeeded, move the access and other
+-	 * non-mapping-related flag bits from the proposed flags to
+-	 * the active flags
+-	 */
+-
+-	ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
+-			~TTM_PL_MASK_MEMTYPE);
+-
+ 	return 0;
+ }
+-EXPORT_SYMBOL(ttm_buffer_object_validate);
++EXPORT_SYMBOL(ttm_bo_validate);
+ 
+-int
+-ttm_bo_check_placement(struct ttm_buffer_object *bo,
+-		       uint32_t set_flags, uint32_t clr_flags)
++int ttm_bo_check_placement(struct ttm_buffer_object *bo,
++				struct ttm_placement *placement)
+ {
+-	uint32_t new_mask = set_flags | clr_flags;
+-
+-	if ((bo->type == ttm_bo_type_user) &&
+-	    (clr_flags & TTM_PL_FLAG_CACHED)) {
+-		printk(KERN_ERR TTM_PFX
+-		       "User buffers require cache-coherent memory.\n");
+-		return -EINVAL;
+-	}
+-
+-	if (!capable(CAP_SYS_ADMIN)) {
+-		if (new_mask & TTM_PL_FLAG_NO_EVICT) {
+-			printk(KERN_ERR TTM_PFX "Need to be root to modify"
+-			       " NO_EVICT status.\n");
++	int i;
++
++	if (placement->fpfn || placement->lpfn) {
++		if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
++			printk(KERN_ERR TTM_PFX "Page number range to small "
++				"Need %lu pages, range is [%u, %u]\n",
++				bo->mem.num_pages, placement->fpfn,
++				placement->lpfn);
+ 			return -EINVAL;
+ 		}
+-
+-		if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
+-		    (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+-			printk(KERN_ERR TTM_PFX
+-			       "Incompatible memory specification"
+-			       " for NO_EVICT buffer.\n");
+-			return -EINVAL;
++	}
++	for (i = 0; i < placement->num_placement; i++) {
++		if (!capable(CAP_SYS_ADMIN)) {
++			if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
++				printk(KERN_ERR TTM_PFX "Need to be root to "
++					"modify NO_EVICT status.\n");
++				return -EINVAL;
++			}
++		}
++	}
++	for (i = 0; i < placement->num_busy_placement; i++) {
++		if (!capable(CAP_SYS_ADMIN)) {
++			if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
++				printk(KERN_ERR TTM_PFX "Need to be root to "
++					"modify NO_EVICT status.\n");
++				return -EINVAL;
++			}
+ 		}
+ 	}
+ 	return 0;
+ }
+ 
+-int ttm_buffer_object_init(struct ttm_bo_device *bdev,
+-			   struct ttm_buffer_object *bo,
+-			   unsigned long size,
+-			   enum ttm_bo_type type,
+-			   uint32_t flags,
+-			   uint32_t page_alignment,
+-			   unsigned long buffer_start,
+-			   bool interruptible,
+-			   struct file *persistant_swap_storage,
+-			   size_t acc_size,
+-			   void (*destroy) (struct ttm_buffer_object *))
++int ttm_bo_init(struct ttm_bo_device *bdev,
++		struct ttm_buffer_object *bo,
++		unsigned long size,
++		enum ttm_bo_type type,
++		struct ttm_placement *placement,
++		uint32_t page_alignment,
++		unsigned long buffer_start,
++		bool interruptible,
++		struct file *persistant_swap_storage,
++		size_t acc_size,
++		void (*destroy) (struct ttm_buffer_object *))
+ {
+ 	int ret = 0;
+ 	unsigned long num_pages;
+@@ -1065,6 +1148,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
+ 	bo->glob = bdev->glob;
+ 	bo->type = type;
+ 	bo->num_pages = num_pages;
++	bo->mem.size = num_pages << PAGE_SHIFT;
+ 	bo->mem.mem_type = TTM_PL_SYSTEM;
+ 	bo->mem.num_pages = bo->num_pages;
+ 	bo->mem.mm_node = NULL;
+@@ -1077,29 +1161,21 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
+ 	bo->acc_size = acc_size;
+ 	atomic_inc(&bo->glob->bo_count);
+ 
+-	ret = ttm_bo_check_placement(bo, flags, 0ULL);
++	ret = ttm_bo_check_placement(bo, placement);
+ 	if (unlikely(ret != 0))
+ 		goto out_err;
+ 
+ 	/*
+-	 * If no caching attributes are set, accept any form of caching.
+-	 */
+-
+-	if ((flags & TTM_PL_MASK_CACHING) == 0)
+-		flags |= TTM_PL_MASK_CACHING;
+-
+-	/*
+ 	 * For ttm_bo_type_device buffers, allocate
+ 	 * address space from the device.
+ 	 */
+-
+ 	if (bo->type == ttm_bo_type_device) {
+ 		ret = ttm_bo_setup_vm(bo);
+ 		if (ret)
+ 			goto out_err;
+ 	}
+ 
+-	ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
++	ret = ttm_bo_validate(bo, placement, interruptible, false);
+ 	if (ret)
+ 		goto out_err;
+ 
+@@ -1112,7 +1188,7 @@ out_err:
+ 
+ 	return ret;
+ }
+-EXPORT_SYMBOL(ttm_buffer_object_init);
++EXPORT_SYMBOL(ttm_bo_init);
+ 
+ static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
+ 				 unsigned long num_pages)
+@@ -1123,19 +1199,19 @@ static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
+ 	return glob->ttm_bo_size + 2 * page_array_size;
+ }
+ 
+-int ttm_buffer_object_create(struct ttm_bo_device *bdev,
+-			     unsigned long size,
+-			     enum ttm_bo_type type,
+-			     uint32_t flags,
+-			     uint32_t page_alignment,
+-			     unsigned long buffer_start,
+-			     bool interruptible,
+-			     struct file *persistant_swap_storage,
+-			     struct ttm_buffer_object **p_bo)
++int ttm_bo_create(struct ttm_bo_device *bdev,
++			unsigned long size,
++			enum ttm_bo_type type,
++			struct ttm_placement *placement,
++			uint32_t page_alignment,
++			unsigned long buffer_start,
++			bool interruptible,
++			struct file *persistant_swap_storage,
++			struct ttm_buffer_object **p_bo)
+ {
+ 	struct ttm_buffer_object *bo;
+-	int ret;
+ 	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
++	int ret;
+ 
+ 	size_t acc_size =
+ 	    ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+@@ -1150,76 +1226,41 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
+ 		return -ENOMEM;
+ 	}
+ 
+-	ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
+-				     page_alignment, buffer_start,
+-				     interruptible,
+-				     persistant_swap_storage, acc_size, NULL);
++	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
++				buffer_start, interruptible,
++				persistant_swap_storage, acc_size, NULL);
+ 	if (likely(ret == 0))
+ 		*p_bo = bo;
+ 
+ 	return ret;
+ }
+ 
+-static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
+-			     uint32_t mem_type, bool allow_errors)
+-{
+-	int ret;
+-
+-	spin_lock(&bo->lock);
+-	ret = ttm_bo_wait(bo, false, false, false);
+-	spin_unlock(&bo->lock);
+-
+-	if (ret && allow_errors)
+-		goto out;
+-
+-	if (bo->mem.mem_type == mem_type)
+-		ret = ttm_bo_evict(bo, mem_type, false, false);
+-
+-	if (ret) {
+-		if (allow_errors) {
+-			goto out;
+-		} else {
+-			ret = 0;
+-			printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
+-		}
+-	}
+-
+-out:
+-	return ret;
+-}
+-
+ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
+-				   struct list_head *head,
+-				   unsigned mem_type, bool allow_errors)
++					unsigned mem_type, bool allow_errors)
+ {
++	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ 	struct ttm_bo_global *glob = bdev->glob;
+-	struct ttm_buffer_object *entry;
+ 	int ret;
+-	int put_count;
+ 
+ 	/*
+ 	 * Can't use standard list traversal since we're unlocking.
+ 	 */
+ 
+ 	spin_lock(&glob->lru_lock);
+-
+-	while (!list_empty(head)) {
+-		entry = list_first_entry(head, struct ttm_buffer_object, lru);
+-		kref_get(&entry->list_kref);
+-		ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
+-		put_count = ttm_bo_del_from_lru(entry);
++	while (!list_empty(&man->lru)) {
+ 		spin_unlock(&glob->lru_lock);
+-		while (put_count--)
+-			kref_put(&entry->list_kref, ttm_bo_ref_bug);
+-		BUG_ON(ret);
+-		ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
+-		ttm_bo_unreserve(entry);
+-		kref_put(&entry->list_kref, ttm_bo_release_list);
++		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
++		if (ret) {
++			if (allow_errors) {
++				return ret;
++			} else {
++				printk(KERN_ERR TTM_PFX
++					"Cleanup eviction failed\n");
++			}
++		}
+ 		spin_lock(&glob->lru_lock);
+ 	}
+-
+ 	spin_unlock(&glob->lru_lock);
+-
+ 	return 0;
+ }
+ 
+@@ -1246,7 +1287,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+ 
+ 	ret = 0;
+ 	if (mem_type > 0) {
+-		ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
++		ttm_bo_force_list_clean(bdev, mem_type, false);
+ 
+ 		spin_lock(&glob->lru_lock);
+ 		if (drm_mm_clean(&man->manager))
+@@ -1279,12 +1320,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+ 		return 0;
+ 	}
+ 
+-	return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
++	return ttm_bo_force_list_clean(bdev, mem_type, true);
+ }
+ EXPORT_SYMBOL(ttm_bo_evict_mm);
+ 
+ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+-		   unsigned long p_offset, unsigned long p_size)
++			unsigned long p_size)
+ {
+ 	int ret = -EINVAL;
+ 	struct ttm_mem_type_manager *man;
+@@ -1314,7 +1355,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+ 			       type);
+ 			return ret;
+ 		}
+-		ret = drm_mm_init(&man->manager, p_offset, p_size);
++		ret = drm_mm_init(&man->manager, 0, p_size);
+ 		if (ret)
+ 			return ret;
+ 	}
+@@ -1463,7 +1504,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
+ 	 * Initialize the system memory buffer type.
+ 	 * Other types need to be driver / IOCTL initialized.
+ 	 */
+-	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
++	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
+ 	if (unlikely(ret != 0))
+ 		goto out_no_sys;
+ 
+@@ -1693,7 +1734,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
+ 			ret = wait_event_interruptible
+ 			    (bo->event_queue, atomic_read(&bo->reserved) == 0);
+ 			if (unlikely(ret != 0))
+-				return -ERESTART;
++				return ret;
+ 		} else {
+ 			wait_event(bo->event_queue,
+ 				   atomic_read(&bo->reserved) == 0);
+@@ -1722,12 +1763,14 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
+ 	ttm_bo_unreserve(bo);
+ 	return ret;
+ }
++EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
+ 
+ void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
+ {
+ 	if (atomic_dec_and_test(&bo->cpu_writers))
+ 		wake_up_all(&bo->event_queue);
+ }
++EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
+ 
+ /**
+  * A buffer object shrink method that tries to swap out the first
+@@ -1808,6 +1851,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
+ 	 * anyone tries to access a ttm page.
+ 	 */
+ 
++	if (bo->bdev->driver->swap_notify)
++		bo->bdev->driver->swap_notify(bo);
++
+ 	ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
+ out:
+ 
+@@ -1828,3 +1874,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
+ 	while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
+ 		;
+ }
++EXPORT_SYMBOL(ttm_bo_swapout_all);
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
+index c70927e..5ca37a5 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
+@@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+ {
+ 	struct ttm_tt *ttm = bo->ttm;
+ 	struct ttm_mem_reg *old_mem = &bo->mem;
+-	uint32_t save_flags = old_mem->placement;
+ 	int ret;
+ 
+ 	if (old_mem->mem_type != TTM_PL_SYSTEM) {
+@@ -62,7 +61,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+ 		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
+ 				TTM_PL_MASK_MEM);
+ 		old_mem->mem_type = TTM_PL_SYSTEM;
+-		save_flags = old_mem->placement;
+ 	}
+ 
+ 	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
+@@ -77,7 +75,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
+ 
+ 	*old_mem = *new_mem;
+ 	new_mem->mm_node = NULL;
+-	ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(ttm_bo_move_ttm);
+@@ -219,7 +217,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
+ 	void *old_iomap;
+ 	void *new_iomap;
+ 	int ret;
+-	uint32_t save_flags = old_mem->placement;
+ 	unsigned long i;
+ 	unsigned long page;
+ 	unsigned long add = 0;
+@@ -270,7 +267,6 @@ out2:
+ 
+ 	*old_mem = *new_mem;
+ 	new_mem->mm_node = NULL;
+-	ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
+ 
+ 	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
+ 		ttm_tt_unbind(ttm);
+@@ -369,6 +365,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+ #endif
+ 	return tmp;
+ }
++EXPORT_SYMBOL(ttm_io_prot);
+ 
+ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
+ 			  unsigned long bus_base,
+@@ -427,7 +424,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
+ 
+ 		/*
+ 		 * We need to use vmap to get the desired page protection
+-		 * or to make the buffer object look contigous.
++		 * or to make the buffer object look contiguous.
+ 		 */
+ 		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
+ 			PAGE_KERNEL :
+@@ -536,7 +533,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
+ 	struct ttm_mem_reg *old_mem = &bo->mem;
+ 	int ret;
+-	uint32_t save_flags = old_mem->placement;
+ 	struct ttm_buffer_object *ghost_obj;
+ 	void *tmp_obj = NULL;
+ 
+@@ -597,7 +593,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
+ 
+ 	*old_mem = *new_mem;
+ 	new_mem->mm_node = NULL;
+-	ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
++
+ 	return 0;
+ }
+ EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+index 1c040d0..668dbe8 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ 		ret = ttm_bo_wait(bo, false, true, false);
+ 		spin_unlock(&bo->lock);
+ 		if (unlikely(ret != 0)) {
+-			retval = (ret != -ERESTART) ?
++			retval = (ret != -ERESTARTSYS) ?
+ 			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
+ 			goto out_unlock;
+ 		}
+@@ -320,7 +320,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ 		return -EFAULT;
+ 
+ 	driver = bo->bdev->driver;
+-	if (unlikely(driver->verify_access)) {
++	if (unlikely(!driver->verify_access)) {
+ 		ret = -EPERM;
+ 		goto out_unref;
+ 	}
+@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ 	switch (ret) {
+ 	case 0:
+ 		break;
+-	case -ERESTART:
+-		ret = -EINTR;
+-		goto out_unref;
+ 	case -EBUSY:
+ 		ret = -EAGAIN;
+ 		goto out_unref;
+@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
+ 	switch (ret) {
+ 	case 0:
+ 		break;
+-	case -ERESTART:
+-		return -EINTR;
+ 	case -EBUSY:
+ 		return -EAGAIN;
+ 	default:
+diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+new file mode 100644
+index 0000000..c285c29
+--- /dev/null
++++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+@@ -0,0 +1,117 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "ttm/ttm_execbuf_util.h"
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_placement.h"
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <linux/module.h>
++
++void ttm_eu_backoff_reservation(struct list_head *list)
++{
++	struct ttm_validate_buffer *entry;
++
++	list_for_each_entry(entry, list, head) {
++		struct ttm_buffer_object *bo = entry->bo;
++		if (!entry->reserved)
++			continue;
++
++		entry->reserved = false;
++		ttm_bo_unreserve(bo);
++	}
++}
++EXPORT_SYMBOL(ttm_eu_backoff_reservation);
++
++/*
++ * Reserve buffers for validation.
++ *
++ * If a buffer in the list is marked for CPU access, we back off and
++ * wait for that buffer to become free for GPU access.
++ *
++ * If a buffer is reserved for another validation, the validator with
++ * the highest validation sequence backs off and waits for that buffer
++ * to become unreserved. This prevents deadlocks when validating multiple
++ * buffers in different orders.
++ */
++
++int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
++{
++	struct ttm_validate_buffer *entry;
++	int ret;
++
++retry:
++	list_for_each_entry(entry, list, head) {
++		struct ttm_buffer_object *bo = entry->bo;
++
++		entry->reserved = false;
++		ret = ttm_bo_reserve(bo, true, false, true, val_seq);
++		if (ret != 0) {
++			ttm_eu_backoff_reservation(list);
++			if (ret == -EAGAIN) {
++				ret = ttm_bo_wait_unreserved(bo, true);
++				if (unlikely(ret != 0))
++					return ret;
++				goto retry;
++			} else
++				return ret;
++		}
++
++		entry->reserved = true;
++		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
++			ttm_eu_backoff_reservation(list);
++			ret = ttm_bo_wait_cpu(bo, false);
++			if (ret)
++				return ret;
++			goto retry;
++		}
++	}
++	return 0;
++}
++EXPORT_SYMBOL(ttm_eu_reserve_buffers);
++
++void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
++{
++	struct ttm_validate_buffer *entry;
++
++	list_for_each_entry(entry, list, head) {
++		struct ttm_buffer_object *bo = entry->bo;
++		struct ttm_bo_driver *driver = bo->bdev->driver;
++		void *old_sync_obj;
++
++		spin_lock(&bo->lock);
++		old_sync_obj = bo->sync_obj;
++		bo->sync_obj = driver->sync_obj_ref(sync_obj);
++		bo->sync_obj_arg = entry->new_sync_obj_arg;
++		spin_unlock(&bo->lock);
++		ttm_bo_unreserve(bo);
++		entry->reserved = false;
++		if (old_sync_obj)
++			driver->sync_obj_unref(&old_sync_obj);
++	}
++}
++EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
+diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
+new file mode 100644
+index 0000000..3d172ef
+--- /dev/null
++++ b/drivers/gpu/drm/ttm/ttm_lock.c
+@@ -0,0 +1,313 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
++ */
++
++#include "ttm/ttm_lock.h"
++#include "ttm/ttm_module.h"
++#include <asm/atomic.h>
++#include <linux/errno.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <linux/module.h>
++
++#define TTM_WRITE_LOCK_PENDING    (1 << 0)
++#define TTM_VT_LOCK_PENDING       (1 << 1)
++#define TTM_SUSPEND_LOCK_PENDING  (1 << 2)
++#define TTM_VT_LOCK               (1 << 3)
++#define TTM_SUSPEND_LOCK          (1 << 4)
++
++void ttm_lock_init(struct ttm_lock *lock)
++{
++	spin_lock_init(&lock->lock);
++	init_waitqueue_head(&lock->queue);
++	lock->rw = 0;
++	lock->flags = 0;
++	lock->kill_takers = false;
++	lock->signal = SIGKILL;
++}
++EXPORT_SYMBOL(ttm_lock_init);
++
++void ttm_read_unlock(struct ttm_lock *lock)
++{
++	spin_lock(&lock->lock);
++	if (--lock->rw == 0)
++		wake_up_all(&lock->queue);
++	spin_unlock(&lock->lock);
++}
++EXPORT_SYMBOL(ttm_read_unlock);
++
++static bool __ttm_read_lock(struct ttm_lock *lock)
++{
++	bool locked = false;
++
++	spin_lock(&lock->lock);
++	if (unlikely(lock->kill_takers)) {
++		send_sig(lock->signal, current, 0);
++		spin_unlock(&lock->lock);
++		return false;
++	}
++	if (lock->rw >= 0 && lock->flags == 0) {
++		++lock->rw;
++		locked = true;
++	}
++	spin_unlock(&lock->lock);
++	return locked;
++}
++
++int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
++{
++	int ret = 0;
++
++	if (interruptible)
++		ret = wait_event_interruptible(lock->queue,
++					       __ttm_read_lock(lock));
++	else
++		wait_event(lock->queue, __ttm_read_lock(lock));
++	return ret;
++}
++EXPORT_SYMBOL(ttm_read_lock);
++
++static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
++{
++	bool block = true;
++
++	*locked = false;
++
++	spin_lock(&lock->lock);
++	if (unlikely(lock->kill_takers)) {
++		send_sig(lock->signal, current, 0);
++		spin_unlock(&lock->lock);
++		return false;
++	}
++	if (lock->rw >= 0 && lock->flags == 0) {
++		++lock->rw;
++		block = false;
++		*locked = true;
++	} else if (lock->flags == 0) {
++		block = false;
++	}
++	spin_unlock(&lock->lock);
++
++	return !block;
++}
++
++int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
++{
++	int ret = 0;
++	bool locked;
++
++	if (interruptible)
++		ret = wait_event_interruptible
++			(lock->queue, __ttm_read_trylock(lock, &locked));
++	else
++		wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
++
++	if (unlikely(ret != 0)) {
++		BUG_ON(locked);
++		return ret;
++	}
++
++	return (locked) ? 0 : -EBUSY;
++}
++
++void ttm_write_unlock(struct ttm_lock *lock)
++{
++	spin_lock(&lock->lock);
++	lock->rw = 0;
++	wake_up_all(&lock->queue);
++	spin_unlock(&lock->lock);
++}
++EXPORT_SYMBOL(ttm_write_unlock);
++
++static bool __ttm_write_lock(struct ttm_lock *lock)
++{
++	bool locked = false;
++
++	spin_lock(&lock->lock);
++	if (unlikely(lock->kill_takers)) {
++		send_sig(lock->signal, current, 0);
++		spin_unlock(&lock->lock);
++		return false;
++	}
++	if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
++		lock->rw = -1;
++		lock->flags &= ~TTM_WRITE_LOCK_PENDING;
++		locked = true;
++	} else {
++		lock->flags |= TTM_WRITE_LOCK_PENDING;
++	}
++	spin_unlock(&lock->lock);
++	return locked;
++}
++
++int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
++{
++	int ret = 0;
++
++	if (interruptible) {
++		ret = wait_event_interruptible(lock->queue,
++					       __ttm_write_lock(lock));
++		if (unlikely(ret != 0)) {
++			spin_lock(&lock->lock);
++			lock->flags &= ~TTM_WRITE_LOCK_PENDING;
++			wake_up_all(&lock->queue);
++			spin_unlock(&lock->lock);
++		}
++	} else
++		wait_event(lock->queue, __ttm_read_lock(lock));
++
++	return ret;
++}
++EXPORT_SYMBOL(ttm_write_lock);
++
++void ttm_write_lock_downgrade(struct ttm_lock *lock)
++{
++	spin_lock(&lock->lock);
++	lock->rw = 1;
++	wake_up_all(&lock->queue);
++	spin_unlock(&lock->lock);
++}
++
++static int __ttm_vt_unlock(struct ttm_lock *lock)
++{
++	int ret = 0;
++
++	spin_lock(&lock->lock);
++	if (unlikely(!(lock->flags & TTM_VT_LOCK)))
++		ret = -EINVAL;
++	lock->flags &= ~TTM_VT_LOCK;
++	wake_up_all(&lock->queue);
++	spin_unlock(&lock->lock);
++	printk(KERN_INFO TTM_PFX "vt unlock.\n");
++
++	return ret;
++}
++
++static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
++{
++	struct ttm_base_object *base = *p_base;
++	struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
++	int ret;
++
++	*p_base = NULL;
++	ret = __ttm_vt_unlock(lock);
++	BUG_ON(ret != 0);
++}
++
++static bool __ttm_vt_lock(struct ttm_lock *lock)
++{
++	bool locked = false;
++
++	spin_lock(&lock->lock);
++	if (lock->rw == 0) {
++		lock->flags &= ~TTM_VT_LOCK_PENDING;
++		lock->flags |= TTM_VT_LOCK;
++		locked = true;
++	} else {
++		lock->flags |= TTM_VT_LOCK_PENDING;
++	}
++	spin_unlock(&lock->lock);
++	return locked;
++}
++
++int ttm_vt_lock(struct ttm_lock *lock,
++		bool interruptible,
++		struct ttm_object_file *tfile)
++{
++	int ret = 0;
++
++	if (interruptible) {
++		ret = wait_event_interruptible(lock->queue,
++					       __ttm_vt_lock(lock));
++		if (unlikely(ret != 0)) {
++			spin_lock(&lock->lock);
++			lock->flags &= ~TTM_VT_LOCK_PENDING;
++			wake_up_all(&lock->queue);
++			spin_unlock(&lock->lock);
++			return ret;
++		}
++	} else
++		wait_event(lock->queue, __ttm_vt_lock(lock));
++
++	/*
++	 * Add a base-object, the destructor of which will
++	 * make sure the lock is released if the client dies
++	 * while holding it.
++	 */
++
++	ret = ttm_base_object_init(tfile, &lock->base, false,
++				   ttm_lock_type, &ttm_vt_lock_remove, NULL);
++	if (ret)
++		(void)__ttm_vt_unlock(lock);
++	else {
++		lock->vt_holder = tfile;
++		printk(KERN_INFO TTM_PFX "vt lock.\n");
++	}
++
++	return ret;
++}
++EXPORT_SYMBOL(ttm_vt_lock);
++
++int ttm_vt_unlock(struct ttm_lock *lock)
++{
++	return ttm_ref_object_base_unref(lock->vt_holder,
++					 lock->base.hash.key, TTM_REF_USAGE);
++}
++EXPORT_SYMBOL(ttm_vt_unlock);
++
++void ttm_suspend_unlock(struct ttm_lock *lock)
++{
++	spin_lock(&lock->lock);
++	lock->flags &= ~TTM_SUSPEND_LOCK;
++	wake_up_all(&lock->queue);
++	spin_unlock(&lock->lock);
++}
++EXPORT_SYMBOL(ttm_suspend_unlock);
++
++static bool __ttm_suspend_lock(struct ttm_lock *lock)
++{
++	bool locked = false;
++
++	spin_lock(&lock->lock);
++	if (lock->rw == 0) {
++		lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
++		lock->flags |= TTM_SUSPEND_LOCK;
++		locked = true;
++	} else {
++		lock->flags |= TTM_SUSPEND_LOCK_PENDING;
++	}
++	spin_unlock(&lock->lock);
++	return locked;
++}
++
++void ttm_suspend_lock(struct ttm_lock *lock)
++{
++	wait_event(lock->queue, __ttm_suspend_lock(lock));
++}
++EXPORT_SYMBOL(ttm_suspend_lock);
+diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
+index 072c281..f5245c0 100644
+--- a/drivers/gpu/drm/ttm/ttm_memory.c
++++ b/drivers/gpu/drm/ttm/ttm_memory.c
+@@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
+ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
+ 				     const struct sysinfo *si)
+ {
+-	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
++	struct ttm_mem_zone *zone;
+ 	uint64_t mem;
+ 	int ret;
+ 
+-	if (unlikely(!zone))
+-		return -ENOMEM;
+-
+ 	if (si->totalhigh == 0)
+ 		return 0;
+ 
++	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
++	if (unlikely(!zone))
++		return -ENOMEM;
++
+ 	mem = si->totalram;
+ 	mem *= si->mem_unit;
+ 
+@@ -322,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
+ 	 * No special dma32 zone needed.
+ 	 */
+ 
+-	if (mem <= ((uint64_t) 1ULL << 32))
++	if (mem <= ((uint64_t) 1ULL << 32)) {
++		kfree(zone);
+ 		return 0;
++	}
+ 
+ 	/*
+ 	 * Limit max dma32 memory to 4GB for now
+@@ -460,6 +463,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
+ {
+ 	return ttm_mem_global_free_zone(glob, NULL, amount);
+ }
++EXPORT_SYMBOL(ttm_mem_global_free);
+ 
+ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
+ 				  struct ttm_mem_zone *single_zone,
+@@ -533,6 +537,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
+ 	return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
+ 					 interruptible);
+ }
++EXPORT_SYMBOL(ttm_mem_global_alloc);
+ 
+ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
+ 			      struct page *page,
+@@ -588,3 +593,4 @@ size_t ttm_round_pot(size_t size)
+ 	}
+ 	return 0;
+ }
++EXPORT_SYMBOL(ttm_round_pot);
+diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
+new file mode 100644
+index 0000000..75e9d6f
+--- /dev/null
++++ b/drivers/gpu/drm/ttm/ttm_object.c
+@@ -0,0 +1,452 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
++ */
++/** @file ttm_ref_object.c
++ *
++ * Base- and reference object implementation for the various
++ * ttm objects. Implements reference counting, minimal security checks
++ * and release on file close.
++ */
++
++/**
++ * struct ttm_object_file
++ *
++ * @tdev: Pointer to the ttm_object_device.
++ *
++ * @lock: Lock that protects the ref_list list and the
++ * ref_hash hash tables.
++ *
++ * @ref_list: List of ttm_ref_objects to be destroyed at
++ * file release.
++ *
++ * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
++ * for fast lookup of ref objects given a base object.
++ */
++
++#include "ttm/ttm_object.h"
++#include "ttm/ttm_module.h"
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <asm/atomic.h>
++
++struct ttm_object_file {
++	struct ttm_object_device *tdev;
++	rwlock_t lock;
++	struct list_head ref_list;
++	struct drm_open_hash ref_hash[TTM_REF_NUM];
++	struct kref refcount;
++};
++
++/**
++ * struct ttm_object_device
++ *
++ * @object_lock: lock that protects the object_hash hash table.
++ *
++ * @object_hash: hash table for fast lookup of object global names.
++ *
++ * @object_count: Per device object count.
++ *
++ * This is the per-device data structure needed for ttm object management.
++ */
++
++struct ttm_object_device {
++	rwlock_t object_lock;
++	struct drm_open_hash object_hash;
++	atomic_t object_count;
++	struct ttm_mem_global *mem_glob;
++};
++
++/**
++ * struct ttm_ref_object
++ *
++ * @hash: Hash entry for the per-file object reference hash.
++ *
++ * @head: List entry for the per-file list of ref-objects.
++ *
++ * @kref: Ref count.
++ *
++ * @obj: Base object this ref object is referencing.
++ *
++ * @ref_type: Type of ref object.
++ *
++ * This is similar to an idr object, but it also has a hash table entry
++ * that allows lookup with a pointer to the referenced object as a key. In
++ * that way, one can easily detect whether a base object is referenced by
++ * a particular ttm_object_file. It also carries a ref count to avoid creating
++ * multiple ref objects if a ttm_object_file references the same base
++ * object more than once.
++ */
++
++struct ttm_ref_object {
++	struct drm_hash_item hash;
++	struct list_head head;
++	struct kref kref;
++	enum ttm_ref_type ref_type;
++	struct ttm_base_object *obj;
++	struct ttm_object_file *tfile;
++};
++
++static inline struct ttm_object_file *
++ttm_object_file_ref(struct ttm_object_file *tfile)
++{
++	kref_get(&tfile->refcount);
++	return tfile;
++}
++
++static void ttm_object_file_destroy(struct kref *kref)
++{
++	struct ttm_object_file *tfile =
++		container_of(kref, struct ttm_object_file, refcount);
++
++	kfree(tfile);
++}
++
++
++static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
++{
++	struct ttm_object_file *tfile = *p_tfile;
++
++	*p_tfile = NULL;
++	kref_put(&tfile->refcount, ttm_object_file_destroy);
++}
++
++
++int ttm_base_object_init(struct ttm_object_file *tfile,
++			 struct ttm_base_object *base,
++			 bool shareable,
++			 enum ttm_object_type object_type,
++			 void (*refcount_release) (struct ttm_base_object **),
++			 void (*ref_obj_release) (struct ttm_base_object *,
++						  enum ttm_ref_type ref_type))
++{
++	struct ttm_object_device *tdev = tfile->tdev;
++	int ret;
++
++	base->shareable = shareable;
++	base->tfile = ttm_object_file_ref(tfile);
++	base->refcount_release = refcount_release;
++	base->ref_obj_release = ref_obj_release;
++	base->object_type = object_type;
++	write_lock(&tdev->object_lock);
++	kref_init(&base->refcount);
++	ret = drm_ht_just_insert_please(&tdev->object_hash,
++					&base->hash,
++					(unsigned long)base, 31, 0, 0);
++	write_unlock(&tdev->object_lock);
++	if (unlikely(ret != 0))
++		goto out_err0;
++
++	ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
++	if (unlikely(ret != 0))
++		goto out_err1;
++
++	ttm_base_object_unref(&base);
++
++	return 0;
++out_err1:
++	(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
++out_err0:
++	return ret;
++}
++EXPORT_SYMBOL(ttm_base_object_init);
++
++static void ttm_release_base(struct kref *kref)
++{
++	struct ttm_base_object *base =
++	    container_of(kref, struct ttm_base_object, refcount);
++	struct ttm_object_device *tdev = base->tfile->tdev;
++
++	(void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
++	write_unlock(&tdev->object_lock);
++	if (base->refcount_release) {
++		ttm_object_file_unref(&base->tfile);
++		base->refcount_release(&base);
++	}
++	write_lock(&tdev->object_lock);
++}
++
++void ttm_base_object_unref(struct ttm_base_object **p_base)
++{
++	struct ttm_base_object *base = *p_base;
++	struct ttm_object_device *tdev = base->tfile->tdev;
++
++	*p_base = NULL;
++
++	/*
++	 * Need to take the lock here to avoid racing with
++	 * users trying to look up the object.
++	 */
++
++	write_lock(&tdev->object_lock);
++	(void)kref_put(&base->refcount, &ttm_release_base);
++	write_unlock(&tdev->object_lock);
++}
++EXPORT_SYMBOL(ttm_base_object_unref);
++
++struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
++					       uint32_t key)
++{
++	struct ttm_object_device *tdev = tfile->tdev;
++	struct ttm_base_object *base;
++	struct drm_hash_item *hash;
++	int ret;
++
++	read_lock(&tdev->object_lock);
++	ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
++
++	if (likely(ret == 0)) {
++		base = drm_hash_entry(hash, struct ttm_base_object, hash);
++		kref_get(&base->refcount);
++	}
++	read_unlock(&tdev->object_lock);
++
++	if (unlikely(ret != 0))
++		return NULL;
++
++	if (tfile != base->tfile && !base->shareable) {
++		printk(KERN_ERR TTM_PFX
++		       "Attempted access of non-shareable object.\n");
++		ttm_base_object_unref(&base);
++		return NULL;
++	}
++
++	return base;
++}
++EXPORT_SYMBOL(ttm_base_object_lookup);
++
++int ttm_ref_object_add(struct ttm_object_file *tfile,
++		       struct ttm_base_object *base,
++		       enum ttm_ref_type ref_type, bool *existed)
++{
++	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
++	struct ttm_ref_object *ref;
++	struct drm_hash_item *hash;
++	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
++	int ret = -EINVAL;
++
++	if (existed != NULL)
++		*existed = true;
++
++	while (ret == -EINVAL) {
++		read_lock(&tfile->lock);
++		ret = drm_ht_find_item(ht, base->hash.key, &hash);
++
++		if (ret == 0) {
++			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++			kref_get(&ref->kref);
++			read_unlock(&tfile->lock);
++			break;
++		}
++
++		read_unlock(&tfile->lock);
++		ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
++					   false, false);
++		if (unlikely(ret != 0))
++			return ret;
++		ref = kmalloc(sizeof(*ref), GFP_KERNEL);
++		if (unlikely(ref == NULL)) {
++			ttm_mem_global_free(mem_glob, sizeof(*ref));
++			return -ENOMEM;
++		}
++
++		ref->hash.key = base->hash.key;
++		ref->obj = base;
++		ref->tfile = tfile;
++		ref->ref_type = ref_type;
++		kref_init(&ref->kref);
++
++		write_lock(&tfile->lock);
++		ret = drm_ht_insert_item(ht, &ref->hash);
++
++		if (likely(ret == 0)) {
++			list_add_tail(&ref->head, &tfile->ref_list);
++			kref_get(&base->refcount);
++			write_unlock(&tfile->lock);
++			if (existed != NULL)
++				*existed = false;
++			break;
++		}
++
++		write_unlock(&tfile->lock);
++		BUG_ON(ret != -EINVAL);
++
++		ttm_mem_global_free(mem_glob, sizeof(*ref));
++		kfree(ref);
++	}
++
++	return ret;
++}
++EXPORT_SYMBOL(ttm_ref_object_add);
++
++static void ttm_ref_object_release(struct kref *kref)
++{
++	struct ttm_ref_object *ref =
++	    container_of(kref, struct ttm_ref_object, kref);
++	struct ttm_base_object *base = ref->obj;
++	struct ttm_object_file *tfile = ref->tfile;
++	struct drm_open_hash *ht;
++	struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
++
++	ht = &tfile->ref_hash[ref->ref_type];
++	(void)drm_ht_remove_item(ht, &ref->hash);
++	list_del(&ref->head);
++	write_unlock(&tfile->lock);
++
++	if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
++		base->ref_obj_release(base, ref->ref_type);
++
++	ttm_base_object_unref(&ref->obj);
++	ttm_mem_global_free(mem_glob, sizeof(*ref));
++	kfree(ref);
++	write_lock(&tfile->lock);
++}
++
++int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
++			      unsigned long key, enum ttm_ref_type ref_type)
++{
++	struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
++	struct ttm_ref_object *ref;
++	struct drm_hash_item *hash;
++	int ret;
++
++	write_lock(&tfile->lock);
++	ret = drm_ht_find_item(ht, key, &hash);
++	if (unlikely(ret != 0)) {
++		write_unlock(&tfile->lock);
++		return -EINVAL;
++	}
++	ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
++	kref_put(&ref->kref, ttm_ref_object_release);
++	write_unlock(&tfile->lock);
++	return 0;
++}
++EXPORT_SYMBOL(ttm_ref_object_base_unref);
++
++void ttm_object_file_release(struct ttm_object_file **p_tfile)
++{
++	struct ttm_ref_object *ref;
++	struct list_head *list;
++	unsigned int i;
++	struct ttm_object_file *tfile = *p_tfile;
++
++	*p_tfile = NULL;
++	write_lock(&tfile->lock);
++
++	/*
++	 * Since we release the lock within the loop, we have to
++	 * restart it from the beginning each time.
++	 */
++
++	while (!list_empty(&tfile->ref_list)) {
++		list = tfile->ref_list.next;
++		ref = list_entry(list, struct ttm_ref_object, head);
++		ttm_ref_object_release(&ref->kref);
++	}
++
++	for (i = 0; i < TTM_REF_NUM; ++i)
++		drm_ht_remove(&tfile->ref_hash[i]);
++
++	write_unlock(&tfile->lock);
++	ttm_object_file_unref(&tfile);
++}
++EXPORT_SYMBOL(ttm_object_file_release);
++
++struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
++					     unsigned int hash_order)
++{
++	struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
++	unsigned int i;
++	unsigned int j = 0;
++	int ret;
++
++	if (unlikely(tfile == NULL))
++		return NULL;
++
++	rwlock_init(&tfile->lock);
++	tfile->tdev = tdev;
++	kref_init(&tfile->refcount);
++	INIT_LIST_HEAD(&tfile->ref_list);
++
++	for (i = 0; i < TTM_REF_NUM; ++i) {
++		ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
++		if (ret) {
++			j = i;
++			goto out_err;
++		}
++	}
++
++	return tfile;
++out_err:
++	for (i = 0; i < j; ++i)
++		drm_ht_remove(&tfile->ref_hash[i]);
++
++	kfree(tfile);
++
++	return NULL;
++}
++EXPORT_SYMBOL(ttm_object_file_init);
++
++struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
++						 *mem_glob,
++						 unsigned int hash_order)
++{
++	struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
++	int ret;
++
++	if (unlikely(tdev == NULL))
++		return NULL;
++
++	tdev->mem_glob = mem_glob;
++	rwlock_init(&tdev->object_lock);
++	atomic_set(&tdev->object_count, 0);
++	ret = drm_ht_create(&tdev->object_hash, hash_order);
++
++	if (likely(ret == 0))
++		return tdev;
++
++	kfree(tdev);
++	return NULL;
++}
++EXPORT_SYMBOL(ttm_object_device_init);
++
++void ttm_object_device_release(struct ttm_object_device **p_tdev)
++{
++	struct ttm_object_device *tdev = *p_tdev;
++
++	*p_tdev = NULL;
++
++	write_lock(&tdev->object_lock);
++	drm_ht_remove(&tdev->object_hash);
++	write_unlock(&tdev->object_lock);
++
++	kfree(tdev);
++}
++EXPORT_SYMBOL(ttm_object_device_release);
+diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
+index 7bcb89f..3d47a2c 100644
+--- a/drivers/gpu/drm/ttm/ttm_tt.c
++++ b/drivers/gpu/drm/ttm/ttm_tt.c
+@@ -192,26 +192,38 @@ int ttm_tt_populate(struct ttm_tt *ttm)
+ 	ttm->state = tt_unbound;
+ 	return 0;
+ }
++EXPORT_SYMBOL(ttm_tt_populate);
+ 
+ #ifdef CONFIG_X86
+ static inline int ttm_tt_set_page_caching(struct page *p,
+-					  enum ttm_caching_state c_state)
++					  enum ttm_caching_state c_old,
++					  enum ttm_caching_state c_new)
+ {
++	int ret = 0;
++
+ 	if (PageHighMem(p))
+ 		return 0;
+ 
+-	switch (c_state) {
+-	case tt_cached:
+-		return set_pages_wb(p, 1);
+-	case tt_wc:
+-	    return set_memory_wc((unsigned long) page_address(p), 1);
+-	default:
+-		return set_pages_uc(p, 1);
++	if (c_old != tt_cached) {
++		/* p isn't in the default caching state, set it to
++		 * writeback first to free its current memtype. */
++
++		ret = set_pages_wb(p, 1);
++		if (ret)
++			return ret;
+ 	}
++
++	if (c_new == tt_wc)
++		ret = set_memory_wc((unsigned long) page_address(p), 1);
++	else if (c_new == tt_uncached)
++		ret = set_pages_uc(p, 1);
++
++	return ret;
+ }
+ #else /* CONFIG_X86 */
+ static inline int ttm_tt_set_page_caching(struct page *p,
+-					  enum ttm_caching_state c_state)
++					  enum ttm_caching_state c_old,
++					  enum ttm_caching_state c_new)
+ {
+ 	return 0;
+ }
+@@ -244,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
+ 	for (i = 0; i < ttm->num_pages; ++i) {
+ 		cur_page = ttm->pages[i];
+ 		if (likely(cur_page != NULL)) {
+-			ret = ttm_tt_set_page_caching(cur_page, c_state);
++			ret = ttm_tt_set_page_caching(cur_page,
++						      ttm->caching_state,
++						      c_state);
+ 			if (unlikely(ret != 0))
+ 				goto out_err;
+ 		}
+@@ -258,7 +272,7 @@ out_err:
+ 	for (j = 0; j < i; ++j) {
+ 		cur_page = ttm->pages[j];
+ 		if (likely(cur_page != NULL)) {
+-			(void)ttm_tt_set_page_caching(cur_page,
++			(void)ttm_tt_set_page_caching(cur_page, c_state,
+ 						      ttm->caching_state);
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
+index bc2f518..7a1b210 100644
+--- a/drivers/gpu/drm/via/via_drv.c
++++ b/drivers/gpu/drm/via/via_drv.c
+@@ -58,7 +58,7 @@ static struct drm_driver driver = {
+ 		.owner = THIS_MODULE,
+ 		.open = drm_open,
+ 		.release = drm_release,
+-		.ioctl = drm_ioctl,
++		.unlocked_ioctl = drm_ioctl,
+ 		.mmap = drm_mmap,
+ 		.poll = drm_poll,
+ 		.fasync = drm_fasync,
+diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
+new file mode 100644
+index 0000000..f20b8bc
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/Kconfig
+@@ -0,0 +1,13 @@
++config DRM_VMWGFX
++	tristate "DRM driver for VMware Virtual GPU"
++	depends on DRM && PCI
++	select FB_DEFERRED_IO
++	select FB_CFB_FILLRECT
++	select FB_CFB_COPYAREA
++	select FB_CFB_IMAGEBLIT
++	select DRM_TTM
++	help
++	  KMS enabled DRM driver for SVGA2 virtual hardware.
++
++	  If unsure say n. The compiled module will be
++	  called vmwgfx.ko
+diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
+new file mode 100644
+index 0000000..1a3cb68
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/Makefile
+@@ -0,0 +1,9 @@
++
++ccflags-y := -Iinclude/drm
++
++vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
++	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
++	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
++	    vmwgfx_overlay.o
++
++obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
+diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+new file mode 100644
+index 0000000..77cb453
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+@@ -0,0 +1,1793 @@
++/**********************************************************
++ * Copyright 1998-2009 VMware, Inc.  All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy,
++ * modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ **********************************************************/
++
++/*
++ * svga3d_reg.h --
++ *
++ *       SVGA 3D hardware definitions
++ */
++
++#ifndef _SVGA3D_REG_H_
++#define _SVGA3D_REG_H_
++
++#include "svga_reg.h"
++
++
++/*
++ * 3D Hardware Version
++ *
++ *   The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
++ *   register.   Is set by the host and read by the guest.  This lets
++ *   us make new guest drivers which are backwards-compatible with old
++ *   SVGA hardware revisions.  It does not let us support old guest
++ *   drivers.  Good enough for now.
++ *
++ */
++
++#define SVGA3D_MAKE_HWVERSION(major, minor)      (((major) << 16) | ((minor) & 0xFF))
++#define SVGA3D_MAJOR_HWVERSION(version)          ((version) >> 16)
++#define SVGA3D_MINOR_HWVERSION(version)          ((version) & 0xFF)
++
++typedef enum {
++   SVGA3D_HWVERSION_WS5_RC1   = SVGA3D_MAKE_HWVERSION(0, 1),
++   SVGA3D_HWVERSION_WS5_RC2   = SVGA3D_MAKE_HWVERSION(0, 2),
++   SVGA3D_HWVERSION_WS51_RC1  = SVGA3D_MAKE_HWVERSION(0, 3),
++   SVGA3D_HWVERSION_WS6_B1    = SVGA3D_MAKE_HWVERSION(1, 1),
++   SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
++   SVGA3D_HWVERSION_WS65_B1   = SVGA3D_MAKE_HWVERSION(2, 0),
++   SVGA3D_HWVERSION_CURRENT   = SVGA3D_HWVERSION_WS65_B1,
++} SVGA3dHardwareVersion;
++
++/*
++ * Generic Types
++ */
++
++typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
++#define SVGA3D_NUM_CLIPPLANES                   6
++#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS  8
++
++
++/*
++ * Surface formats.
++ *
++ * If you modify this list, be sure to keep GLUtil.c in sync. It
++ * includes the internal format definition of each surface in
++ * GLUtil_ConvertSurfaceFormat, and it contains a table of
++ * human-readable names in GLUtil_GetFormatName.
++ */
++
++typedef enum SVGA3dSurfaceFormat {
++   SVGA3D_FORMAT_INVALID = 0,
++
++   SVGA3D_X8R8G8B8       = 1,
++   SVGA3D_A8R8G8B8       = 2,
++
++   SVGA3D_R5G6B5         = 3,
++   SVGA3D_X1R5G5B5       = 4,
++   SVGA3D_A1R5G5B5       = 5,
++   SVGA3D_A4R4G4B4       = 6,
++
++   SVGA3D_Z_D32          = 7,
++   SVGA3D_Z_D16          = 8,
++   SVGA3D_Z_D24S8        = 9,
++   SVGA3D_Z_D15S1        = 10,
++
++   SVGA3D_LUMINANCE8            = 11,
++   SVGA3D_LUMINANCE4_ALPHA4     = 12,
++   SVGA3D_LUMINANCE16           = 13,
++   SVGA3D_LUMINANCE8_ALPHA8     = 14,
++
++   SVGA3D_DXT1           = 15,
++   SVGA3D_DXT2           = 16,
++   SVGA3D_DXT3           = 17,
++   SVGA3D_DXT4           = 18,
++   SVGA3D_DXT5           = 19,
++
++   SVGA3D_BUMPU8V8       = 20,
++   SVGA3D_BUMPL6V5U5     = 21,
++   SVGA3D_BUMPX8L8V8U8   = 22,
++   SVGA3D_BUMPL8V8U8     = 23,
++
++   SVGA3D_ARGB_S10E5     = 24,   /* 16-bit floating-point ARGB */
++   SVGA3D_ARGB_S23E8     = 25,   /* 32-bit floating-point ARGB */
++
++   SVGA3D_A2R10G10B10    = 26,
++
++   /* signed formats */
++   SVGA3D_V8U8           = 27,
++   SVGA3D_Q8W8V8U8       = 28,
++   SVGA3D_CxV8U8         = 29,
++
++   /* mixed formats */
++   SVGA3D_X8L8V8U8       = 30,
++   SVGA3D_A2W10V10U10    = 31,
++
++   SVGA3D_ALPHA8         = 32,
++
++   /* Single- and dual-component floating point formats */
++   SVGA3D_R_S10E5        = 33,
++   SVGA3D_R_S23E8        = 34,
++   SVGA3D_RG_S10E5       = 35,
++   SVGA3D_RG_S23E8       = 36,
++
++   /*
++    * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
++    * the most efficient format to use when creating new surfaces
++    * expressly for index or vertex data.
++    */
++   SVGA3D_BUFFER         = 37,
++
++   SVGA3D_Z_D24X8        = 38,
++
++   SVGA3D_V16U16         = 39,
++
++   SVGA3D_G16R16         = 40,
++   SVGA3D_A16B16G16R16   = 41,
++
++   /* Packed Video formats */
++   SVGA3D_UYVY           = 42,
++   SVGA3D_YUY2           = 43,
++
++   SVGA3D_FORMAT_MAX
++} SVGA3dSurfaceFormat;
++
++typedef uint32 SVGA3dColor; /* a, r, g, b */
++
++/*
++ * These match the D3DFORMAT_OP definitions used by Direct3D. We need
++ * them so that we can query the host for what the supported surface
++ * operations are (when we're using the D3D backend, in particular),
++ * and so we can send those operations to the guest.
++ */
++typedef enum {
++   SVGA3DFORMAT_OP_TEXTURE                               = 0x00000001,
++   SVGA3DFORMAT_OP_VOLUMETEXTURE                         = 0x00000002,
++   SVGA3DFORMAT_OP_CUBETEXTURE                           = 0x00000004,
++   SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET                = 0x00000008,
++   SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET              = 0x00000010,
++   SVGA3DFORMAT_OP_ZSTENCIL                              = 0x00000040,
++   SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH   = 0x00000080,
++
++/*
++ * This format can be used as a render target if the current display mode
++ * is the same depth if the alpha channel is ignored. e.g. if the device
++ * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
++ * format op list entry for A8R8G8B8 should have this cap.
++ */
++   SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET  = 0x00000100,
++
++/*
++ * This format contains DirectDraw support (including Flip).  This flag
++ * should not to be set on alpha formats.
++ */
++   SVGA3DFORMAT_OP_DISPLAYMODE                           = 0x00000400,
++
++/*
++ * The rasterizer can support some level of Direct3D support in this format
++ * and implies that the driver can create a Context in this mode (for some
++ * render target format).  When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
++ * flag must also be set.
++ */
++   SVGA3DFORMAT_OP_3DACCELERATION                        = 0x00000800,
++
++/*
++ * This is set for a private format when the driver has put the bpp in
++ * the structure.
++ */
++   SVGA3DFORMAT_OP_PIXELSIZE                             = 0x00001000,
++
++/*
++ * Indicates that this format can be converted to any RGB format for which
++ * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
++ */
++   SVGA3DFORMAT_OP_CONVERT_TO_ARGB                       = 0x00002000,
++
++/*
++ * Indicates that this format can be used to create offscreen plain surfaces.
++ */
++   SVGA3DFORMAT_OP_OFFSCREENPLAIN                        = 0x00004000,
++
++/*
++ * Indicated that this format can be read as an SRGB texture (meaning that the
++ * sampler will linearize the looked up data)
++ */
++   SVGA3DFORMAT_OP_SRGBREAD                              = 0x00008000,
++
++/*
++ * Indicates that this format can be used in the bumpmap instructions
++ */
++   SVGA3DFORMAT_OP_BUMPMAP                               = 0x00010000,
++
++/*
++ * Indicates that this format can be sampled by the displacement map sampler
++ */
++   SVGA3DFORMAT_OP_DMAP                                  = 0x00020000,
++
++/*
++ * Indicates that this format cannot be used with texture filtering
++ */
++   SVGA3DFORMAT_OP_NOFILTER                              = 0x00040000,
++
++/*
++ * Indicates that format conversions are supported to this RGB format if
++ * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
++ */
++   SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB                    = 0x00080000,
++
++/*
++ * Indicated that this format can be written as an SRGB target (meaning that the
++ * pixel pipe will DE-linearize data on output to format)
++ */
++   SVGA3DFORMAT_OP_SRGBWRITE                             = 0x00100000,
++
++/*
++ * Indicates that this format cannot be used with alpha blending
++ */
++   SVGA3DFORMAT_OP_NOALPHABLEND                          = 0x00200000,
++
++/*
++ * Indicates that the device can auto-generated sublevels for resources
++ * of this format
++ */
++   SVGA3DFORMAT_OP_AUTOGENMIPMAP                         = 0x00400000,
++
++/*
++ * Indicates that this format can be used by vertex texture sampler
++ */
++   SVGA3DFORMAT_OP_VERTEXTEXTURE                         = 0x00800000,
++
++/*
++ * Indicates that this format supports neither texture coordinate wrap
++ * modes, nor mipmapping
++ */
++   SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP                  = 0x01000000
++} SVGA3dFormatOp;
++
++/*
++ * This structure is a conversion of SVGA3DFORMAT_OP_*.
++ * Entries must be located at the same position.
++ */
++typedef union {
++   uint32 value;
++   struct {
++      uint32 texture : 1;
++      uint32 volumeTexture : 1;
++      uint32 cubeTexture : 1;
++      uint32 offscreenRenderTarget : 1;
++      uint32 sameFormatRenderTarget : 1;
++      uint32 unknown1 : 1;
++      uint32 zStencil : 1;
++      uint32 zStencilArbitraryDepth : 1;
++      uint32 sameFormatUpToAlpha : 1;
++      uint32 unknown2 : 1;
++      uint32 displayMode : 1;
++      uint32 acceleration3d : 1;
++      uint32 pixelSize : 1;
++      uint32 convertToARGB : 1;
++      uint32 offscreenPlain : 1;
++      uint32 sRGBRead : 1;
++      uint32 bumpMap : 1;
++      uint32 dmap : 1;
++      uint32 noFilter : 1;
++      uint32 memberOfGroupARGB : 1;
++      uint32 sRGBWrite : 1;
++      uint32 noAlphaBlend : 1;
++      uint32 autoGenMipMap : 1;
++      uint32 vertexTexture : 1;
++      uint32 noTexCoordWrapNorMip : 1;
++   };
++} SVGA3dSurfaceFormatCaps;
++
++/*
++ * SVGA_3D_CMD_SETRENDERSTATE Types.  All value types
++ * must fit in a uint32.
++ */
++
++typedef enum {
++   SVGA3D_RS_INVALID                   = 0,
++   SVGA3D_RS_ZENABLE                   = 1,     /* SVGA3dBool */
++   SVGA3D_RS_ZWRITEENABLE              = 2,     /* SVGA3dBool */
++   SVGA3D_RS_ALPHATESTENABLE           = 3,     /* SVGA3dBool */
++   SVGA3D_RS_DITHERENABLE              = 4,     /* SVGA3dBool */
++   SVGA3D_RS_BLENDENABLE               = 5,     /* SVGA3dBool */
++   SVGA3D_RS_FOGENABLE                 = 6,     /* SVGA3dBool */
++   SVGA3D_RS_SPECULARENABLE            = 7,     /* SVGA3dBool */
++   SVGA3D_RS_STENCILENABLE             = 8,     /* SVGA3dBool */
++   SVGA3D_RS_LIGHTINGENABLE            = 9,     /* SVGA3dBool */
++   SVGA3D_RS_NORMALIZENORMALS          = 10,    /* SVGA3dBool */
++   SVGA3D_RS_POINTSPRITEENABLE         = 11,    /* SVGA3dBool */
++   SVGA3D_RS_POINTSCALEENABLE          = 12,    /* SVGA3dBool */
++   SVGA3D_RS_STENCILREF                = 13,    /* uint32 */
++   SVGA3D_RS_STENCILMASK               = 14,    /* uint32 */
++   SVGA3D_RS_STENCILWRITEMASK          = 15,    /* uint32 */
++   SVGA3D_RS_FOGSTART                  = 16,    /* float */
++   SVGA3D_RS_FOGEND                    = 17,    /* float */
++   SVGA3D_RS_FOGDENSITY                = 18,    /* float */
++   SVGA3D_RS_POINTSIZE                 = 19,    /* float */
++   SVGA3D_RS_POINTSIZEMIN              = 20,    /* float */
++   SVGA3D_RS_POINTSIZEMAX              = 21,    /* float */
++   SVGA3D_RS_POINTSCALE_A              = 22,    /* float */
++   SVGA3D_RS_POINTSCALE_B              = 23,    /* float */
++   SVGA3D_RS_POINTSCALE_C              = 24,    /* float */
++   SVGA3D_RS_FOGCOLOR                  = 25,    /* SVGA3dColor */
++   SVGA3D_RS_AMBIENT                   = 26,    /* SVGA3dColor */
++   SVGA3D_RS_CLIPPLANEENABLE           = 27,    /* SVGA3dClipPlanes */
++   SVGA3D_RS_FOGMODE                   = 28,    /* SVGA3dFogMode */
++   SVGA3D_RS_FILLMODE                  = 29,    /* SVGA3dFillMode */
++   SVGA3D_RS_SHADEMODE                 = 30,    /* SVGA3dShadeMode */
++   SVGA3D_RS_LINEPATTERN               = 31,    /* SVGA3dLinePattern */
++   SVGA3D_RS_SRCBLEND                  = 32,    /* SVGA3dBlendOp */
++   SVGA3D_RS_DSTBLEND                  = 33,    /* SVGA3dBlendOp */
++   SVGA3D_RS_BLENDEQUATION             = 34,    /* SVGA3dBlendEquation */
++   SVGA3D_RS_CULLMODE                  = 35,    /* SVGA3dFace */
++   SVGA3D_RS_ZFUNC                     = 36,    /* SVGA3dCmpFunc */
++   SVGA3D_RS_ALPHAFUNC                 = 37,    /* SVGA3dCmpFunc */
++   SVGA3D_RS_STENCILFUNC               = 38,    /* SVGA3dCmpFunc */
++   SVGA3D_RS_STENCILFAIL               = 39,    /* SVGA3dStencilOp */
++   SVGA3D_RS_STENCILZFAIL              = 40,    /* SVGA3dStencilOp */
++   SVGA3D_RS_STENCILPASS               = 41,    /* SVGA3dStencilOp */
++   SVGA3D_RS_ALPHAREF                  = 42,    /* float (0.0 .. 1.0) */
++   SVGA3D_RS_FRONTWINDING              = 43,    /* SVGA3dFrontWinding */
++   SVGA3D_RS_COORDINATETYPE            = 44,    /* SVGA3dCoordinateType */
++   SVGA3D_RS_ZBIAS                     = 45,    /* float */
++   SVGA3D_RS_RANGEFOGENABLE            = 46,    /* SVGA3dBool */
++   SVGA3D_RS_COLORWRITEENABLE          = 47,    /* SVGA3dColorMask */
++   SVGA3D_RS_VERTEXMATERIALENABLE      = 48,    /* SVGA3dBool */
++   SVGA3D_RS_DIFFUSEMATERIALSOURCE     = 49,    /* SVGA3dVertexMaterial */
++   SVGA3D_RS_SPECULARMATERIALSOURCE    = 50,    /* SVGA3dVertexMaterial */
++   SVGA3D_RS_AMBIENTMATERIALSOURCE     = 51,    /* SVGA3dVertexMaterial */
++   SVGA3D_RS_EMISSIVEMATERIALSOURCE    = 52,    /* SVGA3dVertexMaterial */
++   SVGA3D_RS_TEXTUREFACTOR             = 53,    /* SVGA3dColor */
++   SVGA3D_RS_LOCALVIEWER               = 54,    /* SVGA3dBool */
++   SVGA3D_RS_SCISSORTESTENABLE         = 55,    /* SVGA3dBool */
++   SVGA3D_RS_BLENDCOLOR                = 56,    /* SVGA3dColor */
++   SVGA3D_RS_STENCILENABLE2SIDED       = 57,    /* SVGA3dBool */
++   SVGA3D_RS_CCWSTENCILFUNC            = 58,    /* SVGA3dCmpFunc */
++   SVGA3D_RS_CCWSTENCILFAIL            = 59,    /* SVGA3dStencilOp */
++   SVGA3D_RS_CCWSTENCILZFAIL           = 60,    /* SVGA3dStencilOp */
++   SVGA3D_RS_CCWSTENCILPASS            = 61,    /* SVGA3dStencilOp */
++   SVGA3D_RS_VERTEXBLEND               = 62,    /* SVGA3dVertexBlendFlags */
++   SVGA3D_RS_SLOPESCALEDEPTHBIAS       = 63,    /* float */
++   SVGA3D_RS_DEPTHBIAS                 = 64,    /* float */
++
++
++   /*
++    * Output Gamma Level
++    *
++    * Output gamma effects the gamma curve of colors that are output from the
++    * rendering pipeline.  A value of 1.0 specifies a linear color space. If the
++    * value is <= 0.0, gamma correction is ignored and linear color space is
++    * used.
++    */
++
++   SVGA3D_RS_OUTPUTGAMMA               = 65,    /* float */
++   SVGA3D_RS_ZVISIBLE                  = 66,    /* SVGA3dBool */
++   SVGA3D_RS_LASTPIXEL                 = 67,    /* SVGA3dBool */
++   SVGA3D_RS_CLIPPING                  = 68,    /* SVGA3dBool */
++   SVGA3D_RS_WRAP0                     = 69,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP1                     = 70,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP2                     = 71,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP3                     = 72,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP4                     = 73,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP5                     = 74,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP6                     = 75,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP7                     = 76,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP8                     = 77,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP9                     = 78,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP10                    = 79,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP11                    = 80,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP12                    = 81,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP13                    = 82,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP14                    = 83,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_WRAP15                    = 84,    /* SVGA3dWrapFlags */
++   SVGA3D_RS_MULTISAMPLEANTIALIAS      = 85,    /* SVGA3dBool */
++   SVGA3D_RS_MULTISAMPLEMASK           = 86,    /* uint32 */
++   SVGA3D_RS_INDEXEDVERTEXBLENDENABLE  = 87,    /* SVGA3dBool */
++   SVGA3D_RS_TWEENFACTOR               = 88,    /* float */
++   SVGA3D_RS_ANTIALIASEDLINEENABLE     = 89,    /* SVGA3dBool */
++   SVGA3D_RS_COLORWRITEENABLE1         = 90,    /* SVGA3dColorMask */
++   SVGA3D_RS_COLORWRITEENABLE2         = 91,    /* SVGA3dColorMask */
++   SVGA3D_RS_COLORWRITEENABLE3         = 92,    /* SVGA3dColorMask */
++   SVGA3D_RS_SEPARATEALPHABLENDENABLE  = 93,    /* SVGA3dBool */
++   SVGA3D_RS_SRCBLENDALPHA             = 94,    /* SVGA3dBlendOp */
++   SVGA3D_RS_DSTBLENDALPHA             = 95,    /* SVGA3dBlendOp */
++   SVGA3D_RS_BLENDEQUATIONALPHA        = 96,    /* SVGA3dBlendEquation */
++   SVGA3D_RS_MAX
++} SVGA3dRenderStateName;
++
++typedef enum {
++   SVGA3D_VERTEXMATERIAL_NONE     = 0,    /* Use the value in the current material */
++   SVGA3D_VERTEXMATERIAL_DIFFUSE  = 1,    /* Use the value in the diffuse component */
++   SVGA3D_VERTEXMATERIAL_SPECULAR = 2,    /* Use the value in the specular component */
++} SVGA3dVertexMaterial;
++
++typedef enum {
++   SVGA3D_FILLMODE_INVALID = 0,
++   SVGA3D_FILLMODE_POINT   = 1,
++   SVGA3D_FILLMODE_LINE    = 2,
++   SVGA3D_FILLMODE_FILL    = 3,
++   SVGA3D_FILLMODE_MAX
++} SVGA3dFillModeType;
++
++
++typedef
++union {
++   struct {
++      uint16   mode;       /* SVGA3dFillModeType */
++      uint16   face;       /* SVGA3dFace */
++   };
++   uint32 uintValue;
++} SVGA3dFillMode;
++
++typedef enum {
++   SVGA3D_SHADEMODE_INVALID = 0,
++   SVGA3D_SHADEMODE_FLAT    = 1,
++   SVGA3D_SHADEMODE_SMOOTH  = 2,
++   SVGA3D_SHADEMODE_PHONG   = 3,     /* Not supported */
++   SVGA3D_SHADEMODE_MAX
++} SVGA3dShadeMode;
++
++typedef
++union {
++   struct {
++      uint16 repeat;
++      uint16 pattern;
++   };
++   uint32 uintValue;
++} SVGA3dLinePattern;
++
++typedef enum {
++   SVGA3D_BLENDOP_INVALID            = 0,
++   SVGA3D_BLENDOP_ZERO               = 1,
++   SVGA3D_BLENDOP_ONE                = 2,
++   SVGA3D_BLENDOP_SRCCOLOR           = 3,
++   SVGA3D_BLENDOP_INVSRCCOLOR        = 4,
++   SVGA3D_BLENDOP_SRCALPHA           = 5,
++   SVGA3D_BLENDOP_INVSRCALPHA        = 6,
++   SVGA3D_BLENDOP_DESTALPHA          = 7,
++   SVGA3D_BLENDOP_INVDESTALPHA       = 8,
++   SVGA3D_BLENDOP_DESTCOLOR          = 9,
++   SVGA3D_BLENDOP_INVDESTCOLOR       = 10,
++   SVGA3D_BLENDOP_SRCALPHASAT        = 11,
++   SVGA3D_BLENDOP_BLENDFACTOR        = 12,
++   SVGA3D_BLENDOP_INVBLENDFACTOR     = 13,
++   SVGA3D_BLENDOP_MAX
++} SVGA3dBlendOp;
++
++typedef enum {
++   SVGA3D_BLENDEQ_INVALID            = 0,
++   SVGA3D_BLENDEQ_ADD                = 1,
++   SVGA3D_BLENDEQ_SUBTRACT           = 2,
++   SVGA3D_BLENDEQ_REVSUBTRACT        = 3,
++   SVGA3D_BLENDEQ_MINIMUM            = 4,
++   SVGA3D_BLENDEQ_MAXIMUM            = 5,
++   SVGA3D_BLENDEQ_MAX
++} SVGA3dBlendEquation;
++
++typedef enum {
++   SVGA3D_FRONTWINDING_INVALID = 0,
++   SVGA3D_FRONTWINDING_CW      = 1,
++   SVGA3D_FRONTWINDING_CCW     = 2,
++   SVGA3D_FRONTWINDING_MAX
++} SVGA3dFrontWinding;
++
++typedef enum {
++   SVGA3D_FACE_INVALID  = 0,
++   SVGA3D_FACE_NONE     = 1,
++   SVGA3D_FACE_FRONT    = 2,
++   SVGA3D_FACE_BACK     = 3,
++   SVGA3D_FACE_FRONT_BACK = 4,
++   SVGA3D_FACE_MAX
++} SVGA3dFace;
++
++/*
++ * The order and the values should not be changed
++ */
++
++typedef enum {
++   SVGA3D_CMP_INVALID              = 0,
++   SVGA3D_CMP_NEVER                = 1,
++   SVGA3D_CMP_LESS                 = 2,
++   SVGA3D_CMP_EQUAL                = 3,
++   SVGA3D_CMP_LESSEQUAL            = 4,
++   SVGA3D_CMP_GREATER              = 5,
++   SVGA3D_CMP_NOTEQUAL             = 6,
++   SVGA3D_CMP_GREATEREQUAL         = 7,
++   SVGA3D_CMP_ALWAYS               = 8,
++   SVGA3D_CMP_MAX
++} SVGA3dCmpFunc;
++
++/*
++ * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
++ * the fog factor to be specified in the alpha component of the specular
++ * (a.k.a. secondary) vertex color.
++ */
++typedef enum {
++   SVGA3D_FOGFUNC_INVALID          = 0,
++   SVGA3D_FOGFUNC_EXP              = 1,
++   SVGA3D_FOGFUNC_EXP2             = 2,
++   SVGA3D_FOGFUNC_LINEAR           = 3,
++   SVGA3D_FOGFUNC_PER_VERTEX       = 4
++} SVGA3dFogFunction;
++
++/*
++ * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
++ * or per-pixel basis.
++ */
++typedef enum {
++   SVGA3D_FOGTYPE_INVALID          = 0,
++   SVGA3D_FOGTYPE_VERTEX           = 1,
++   SVGA3D_FOGTYPE_PIXEL            = 2,
++   SVGA3D_FOGTYPE_MAX              = 3
++} SVGA3dFogType;
++
++/*
++ * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
++ * computed using the eye Z value of each pixel (or vertex), whereas range-
++ * based fog is computed using the actual distance (range) to the eye.
++ */
++typedef enum {
++   SVGA3D_FOGBASE_INVALID          = 0,
++   SVGA3D_FOGBASE_DEPTHBASED       = 1,
++   SVGA3D_FOGBASE_RANGEBASED       = 2,
++   SVGA3D_FOGBASE_MAX              = 3
++} SVGA3dFogBase;
++
++typedef enum {
++   SVGA3D_STENCILOP_INVALID        = 0,
++   SVGA3D_STENCILOP_KEEP           = 1,
++   SVGA3D_STENCILOP_ZERO           = 2,
++   SVGA3D_STENCILOP_REPLACE        = 3,
++   SVGA3D_STENCILOP_INCRSAT        = 4,
++   SVGA3D_STENCILOP_DECRSAT        = 5,
++   SVGA3D_STENCILOP_INVERT         = 6,
++   SVGA3D_STENCILOP_INCR           = 7,
++   SVGA3D_STENCILOP_DECR           = 8,
++   SVGA3D_STENCILOP_MAX
++} SVGA3dStencilOp;
++
++typedef enum {
++   SVGA3D_CLIPPLANE_0              = (1 << 0),
++   SVGA3D_CLIPPLANE_1              = (1 << 1),
++   SVGA3D_CLIPPLANE_2              = (1 << 2),
++   SVGA3D_CLIPPLANE_3              = (1 << 3),
++   SVGA3D_CLIPPLANE_4              = (1 << 4),
++   SVGA3D_CLIPPLANE_5              = (1 << 5),
++} SVGA3dClipPlanes;
++
++typedef enum {
++   SVGA3D_CLEAR_COLOR              = 0x1,
++   SVGA3D_CLEAR_DEPTH              = 0x2,
++   SVGA3D_CLEAR_STENCIL            = 0x4
++} SVGA3dClearFlag;
++
++typedef enum {
++   SVGA3D_RT_DEPTH                 = 0,
++   SVGA3D_RT_STENCIL               = 1,
++   SVGA3D_RT_COLOR0                = 2,
++   SVGA3D_RT_COLOR1                = 3,
++   SVGA3D_RT_COLOR2                = 4,
++   SVGA3D_RT_COLOR3                = 5,
++   SVGA3D_RT_COLOR4                = 6,
++   SVGA3D_RT_COLOR5                = 7,
++   SVGA3D_RT_COLOR6                = 8,
++   SVGA3D_RT_COLOR7                = 9,
++   SVGA3D_RT_MAX,
++   SVGA3D_RT_INVALID               = ((uint32)-1),
++} SVGA3dRenderTargetType;
++
++#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
++
++typedef
++union {
++   struct {
++      uint32  red   : 1;
++      uint32  green : 1;
++      uint32  blue  : 1;
++      uint32  alpha : 1;
++   };
++   uint32 uintValue;
++} SVGA3dColorMask;
++
++typedef enum {
++   SVGA3D_VBLEND_DISABLE            = 0,
++   SVGA3D_VBLEND_1WEIGHT            = 1,
++   SVGA3D_VBLEND_2WEIGHT            = 2,
++   SVGA3D_VBLEND_3WEIGHT            = 3,
++} SVGA3dVertexBlendFlags;
++
++typedef enum {
++   SVGA3D_WRAPCOORD_0   = 1 << 0,
++   SVGA3D_WRAPCOORD_1   = 1 << 1,
++   SVGA3D_WRAPCOORD_2   = 1 << 2,
++   SVGA3D_WRAPCOORD_3   = 1 << 3,
++   SVGA3D_WRAPCOORD_ALL = 0xF,
++} SVGA3dWrapFlags;
++
++/*
++ * SVGA_3D_CMD_TEXTURESTATE Types.  All value types
++ * must fit in a uint32.
++ */
++
++typedef enum {
++   SVGA3D_TS_INVALID                    = 0,
++   SVGA3D_TS_BIND_TEXTURE               = 1,    /* SVGA3dSurfaceId */
++   SVGA3D_TS_COLOROP                    = 2,    /* SVGA3dTextureCombiner */
++   SVGA3D_TS_COLORARG1                  = 3,    /* SVGA3dTextureArgData */
++   SVGA3D_TS_COLORARG2                  = 4,    /* SVGA3dTextureArgData */
++   SVGA3D_TS_ALPHAOP                    = 5,    /* SVGA3dTextureCombiner */
++   SVGA3D_TS_ALPHAARG1                  = 6,    /* SVGA3dTextureArgData */
++   SVGA3D_TS_ALPHAARG2                  = 7,    /* SVGA3dTextureArgData */
++   SVGA3D_TS_ADDRESSU                   = 8,    /* SVGA3dTextureAddress */
++   SVGA3D_TS_ADDRESSV                   = 9,    /* SVGA3dTextureAddress */
++   SVGA3D_TS_MIPFILTER                  = 10,   /* SVGA3dTextureFilter */
++   SVGA3D_TS_MAGFILTER                  = 11,   /* SVGA3dTextureFilter */
++   SVGA3D_TS_MINFILTER                  = 12,   /* SVGA3dTextureFilter */
++   SVGA3D_TS_BORDERCOLOR                = 13,   /* SVGA3dColor */
++   SVGA3D_TS_TEXCOORDINDEX              = 14,   /* uint32 */
++   SVGA3D_TS_TEXTURETRANSFORMFLAGS      = 15,   /* SVGA3dTexTransformFlags */
++   SVGA3D_TS_TEXCOORDGEN                = 16,   /* SVGA3dTextureCoordGen */
++   SVGA3D_TS_BUMPENVMAT00               = 17,   /* float */
++   SVGA3D_TS_BUMPENVMAT01               = 18,   /* float */
++   SVGA3D_TS_BUMPENVMAT10               = 19,   /* float */
++   SVGA3D_TS_BUMPENVMAT11               = 20,   /* float */
++   SVGA3D_TS_TEXTURE_MIPMAP_LEVEL       = 21,   /* uint32 */
++   SVGA3D_TS_TEXTURE_LOD_BIAS           = 22,   /* float */
++   SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL  = 23,   /* uint32 */
++   SVGA3D_TS_ADDRESSW                   = 24,   /* SVGA3dTextureAddress */
++
++
++   /*
++    * Sampler Gamma Level
++    *
++    * Sampler gamma effects the color of samples taken from the sampler.  A
++    * value of 1.0 will produce linear samples.  If the value is <= 0.0 the
++    * gamma value is ignored and a linear space is used.
++    */
++
++   SVGA3D_TS_GAMMA                      = 25,   /* float */
++   SVGA3D_TS_BUMPENVLSCALE              = 26,   /* float */
++   SVGA3D_TS_BUMPENVLOFFSET             = 27,   /* float */
++   SVGA3D_TS_COLORARG0                  = 28,   /* SVGA3dTextureArgData */
++   SVGA3D_TS_ALPHAARG0                  = 29,   /* SVGA3dTextureArgData */
++   SVGA3D_TS_MAX
++} SVGA3dTextureStateName;
++
++typedef enum {
++   SVGA3D_TC_INVALID                   = 0,
++   SVGA3D_TC_DISABLE                   = 1,
++   SVGA3D_TC_SELECTARG1                = 2,
++   SVGA3D_TC_SELECTARG2                = 3,
++   SVGA3D_TC_MODULATE                  = 4,
++   SVGA3D_TC_ADD                       = 5,
++   SVGA3D_TC_ADDSIGNED                 = 6,
++   SVGA3D_TC_SUBTRACT                  = 7,
++   SVGA3D_TC_BLENDTEXTUREALPHA         = 8,
++   SVGA3D_TC_BLENDDIFFUSEALPHA         = 9,
++   SVGA3D_TC_BLENDCURRENTALPHA         = 10,
++   SVGA3D_TC_BLENDFACTORALPHA          = 11,
++   SVGA3D_TC_MODULATE2X                = 12,
++   SVGA3D_TC_MODULATE4X                = 13,
++   SVGA3D_TC_DSDT                      = 14,
++   SVGA3D_TC_DOTPRODUCT3               = 15,
++   SVGA3D_TC_BLENDTEXTUREALPHAPM       = 16,
++   SVGA3D_TC_ADDSIGNED2X               = 17,
++   SVGA3D_TC_ADDSMOOTH                 = 18,
++   SVGA3D_TC_PREMODULATE               = 19,
++   SVGA3D_TC_MODULATEALPHA_ADDCOLOR    = 20,
++   SVGA3D_TC_MODULATECOLOR_ADDALPHA    = 21,
++   SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
++   SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
++   SVGA3D_TC_BUMPENVMAPLUMINANCE       = 24,
++   SVGA3D_TC_MULTIPLYADD               = 25,
++   SVGA3D_TC_LERP                      = 26,
++   SVGA3D_TC_MAX
++} SVGA3dTextureCombiner;
++
++#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
++
++typedef enum {
++   SVGA3D_TEX_ADDRESS_INVALID    = 0,
++   SVGA3D_TEX_ADDRESS_WRAP       = 1,
++   SVGA3D_TEX_ADDRESS_MIRROR     = 2,
++   SVGA3D_TEX_ADDRESS_CLAMP      = 3,
++   SVGA3D_TEX_ADDRESS_BORDER     = 4,
++   SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
++   SVGA3D_TEX_ADDRESS_EDGE       = 6,
++   SVGA3D_TEX_ADDRESS_MAX
++} SVGA3dTextureAddress;
++
++/*
++ * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
++ * disabled, and the rasterizer should use the magnification filter instead.
++ */
++typedef enum {
++   SVGA3D_TEX_FILTER_NONE           = 0,
++   SVGA3D_TEX_FILTER_NEAREST        = 1,
++   SVGA3D_TEX_FILTER_LINEAR         = 2,
++   SVGA3D_TEX_FILTER_ANISOTROPIC    = 3,
++   SVGA3D_TEX_FILTER_FLATCUBIC      = 4, // Deprecated, not implemented
++   SVGA3D_TEX_FILTER_GAUSSIANCUBIC  = 5, // Deprecated, not implemented
++   SVGA3D_TEX_FILTER_PYRAMIDALQUAD  = 6, // Not currently implemented
++   SVGA3D_TEX_FILTER_GAUSSIANQUAD   = 7, // Not currently implemented
++   SVGA3D_TEX_FILTER_MAX
++} SVGA3dTextureFilter;
++
++typedef enum {
++   SVGA3D_TEX_TRANSFORM_OFF    = 0,
++   SVGA3D_TEX_TRANSFORM_S      = (1 << 0),
++   SVGA3D_TEX_TRANSFORM_T      = (1 << 1),
++   SVGA3D_TEX_TRANSFORM_R      = (1 << 2),
++   SVGA3D_TEX_TRANSFORM_Q      = (1 << 3),
++   SVGA3D_TEX_PROJECTED        = (1 << 15),
++} SVGA3dTexTransformFlags;
++
++typedef enum {
++   SVGA3D_TEXCOORD_GEN_OFF              = 0,
++   SVGA3D_TEXCOORD_GEN_EYE_POSITION     = 1,
++   SVGA3D_TEXCOORD_GEN_EYE_NORMAL       = 2,
++   SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
++   SVGA3D_TEXCOORD_GEN_SPHERE           = 4,
++   SVGA3D_TEXCOORD_GEN_MAX
++} SVGA3dTextureCoordGen;
++
++/*
++ * Texture argument constants for texture combiner
++ */
++typedef enum {
++   SVGA3D_TA_INVALID    = 0,
++   SVGA3D_TA_CONSTANT   = 1,
++   SVGA3D_TA_PREVIOUS   = 2,
++   SVGA3D_TA_DIFFUSE    = 3,
++   SVGA3D_TA_TEXTURE    = 4,
++   SVGA3D_TA_SPECULAR   = 5,
++   SVGA3D_TA_MAX
++} SVGA3dTextureArgData;
++
++#define SVGA3D_TM_MASK_LEN 4
++
++/* Modifiers for texture argument constants defined above. */
++typedef enum {
++   SVGA3D_TM_NONE       = 0,
++   SVGA3D_TM_ALPHA      = (1 << SVGA3D_TM_MASK_LEN),
++   SVGA3D_TM_ONE_MINUS  = (2 << SVGA3D_TM_MASK_LEN),
++} SVGA3dTextureArgModifier;
++
++#define SVGA3D_INVALID_ID         ((uint32)-1)
++#define SVGA3D_MAX_CLIP_PLANES    6
++
++/*
++ * This is the limit to the number of fixed-function texture
++ * transforms and texture coordinates we can support. It does *not*
++ * correspond to the number of texture image units (samplers) we
++ * support!
++ */
++#define SVGA3D_MAX_TEXTURE_COORDS 8
++
++/*
++ * Vertex declarations
++ *
++ * Notes:
++ *
++ * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
++ * draw with any POSITIONT vertex arrays, the programmable vertex
++ * pipeline will be implicitly disabled. Drawing will take place as if
++ * no vertex shader was bound.
++ */
++
++typedef enum {
++   SVGA3D_DECLUSAGE_POSITION     = 0,
++   SVGA3D_DECLUSAGE_BLENDWEIGHT,       //  1
++   SVGA3D_DECLUSAGE_BLENDINDICES,      //  2
++   SVGA3D_DECLUSAGE_NORMAL,            //  3
++   SVGA3D_DECLUSAGE_PSIZE,             //  4
++   SVGA3D_DECLUSAGE_TEXCOORD,          //  5
++   SVGA3D_DECLUSAGE_TANGENT,           //  6
++   SVGA3D_DECLUSAGE_BINORMAL,          //  7
++   SVGA3D_DECLUSAGE_TESSFACTOR,        //  8
++   SVGA3D_DECLUSAGE_POSITIONT,         //  9
++   SVGA3D_DECLUSAGE_COLOR,             // 10
++   SVGA3D_DECLUSAGE_FOG,               // 11
++   SVGA3D_DECLUSAGE_DEPTH,             // 12
++   SVGA3D_DECLUSAGE_SAMPLE,            // 13
++   SVGA3D_DECLUSAGE_MAX
++} SVGA3dDeclUsage;
++
++typedef enum {
++   SVGA3D_DECLMETHOD_DEFAULT     = 0,
++   SVGA3D_DECLMETHOD_PARTIALU,
++   SVGA3D_DECLMETHOD_PARTIALV,
++   SVGA3D_DECLMETHOD_CROSSUV,          // Normal
++   SVGA3D_DECLMETHOD_UV,
++   SVGA3D_DECLMETHOD_LOOKUP,           // Lookup a displacement map
++   SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map
++} SVGA3dDeclMethod;
++
++typedef enum {
++   SVGA3D_DECLTYPE_FLOAT1        =  0,
++   SVGA3D_DECLTYPE_FLOAT2        =  1,
++   SVGA3D_DECLTYPE_FLOAT3        =  2,
++   SVGA3D_DECLTYPE_FLOAT4        =  3,
++   SVGA3D_DECLTYPE_D3DCOLOR      =  4,
++   SVGA3D_DECLTYPE_UBYTE4        =  5,
++   SVGA3D_DECLTYPE_SHORT2        =  6,
++   SVGA3D_DECLTYPE_SHORT4        =  7,
++   SVGA3D_DECLTYPE_UBYTE4N       =  8,
++   SVGA3D_DECLTYPE_SHORT2N       =  9,
++   SVGA3D_DECLTYPE_SHORT4N       = 10,
++   SVGA3D_DECLTYPE_USHORT2N      = 11,
++   SVGA3D_DECLTYPE_USHORT4N      = 12,
++   SVGA3D_DECLTYPE_UDEC3         = 13,
++   SVGA3D_DECLTYPE_DEC3N         = 14,
++   SVGA3D_DECLTYPE_FLOAT16_2     = 15,
++   SVGA3D_DECLTYPE_FLOAT16_4     = 16,
++   SVGA3D_DECLTYPE_MAX,
++} SVGA3dDeclType;
++
++/*
++ * This structure is used for the divisor for geometry instancing;
++ * it's a direct translation of the Direct3D equivalent.
++ */
++typedef union {
++   struct {
++      /*
++       * For index data, this number represents the number of instances to draw.
++       * For instance data, this number represents the number of
++       * instances/vertex in this stream
++       */
++      uint32 count : 30;
++
++      /*
++       * This is 1 if this is supposed to be the data that is repeated for
++       * every instance.
++       */
++      uint32 indexedData : 1;
++
++      /*
++       * This is 1 if this is supposed to be the per-instance data.
++       */
++      uint32 instanceData : 1;
++   };
++
++   uint32 value;
++} SVGA3dVertexDivisor;
++
++typedef enum {
++   SVGA3D_PRIMITIVE_INVALID                     = 0,
++   SVGA3D_PRIMITIVE_TRIANGLELIST                = 1,
++   SVGA3D_PRIMITIVE_POINTLIST                   = 2,
++   SVGA3D_PRIMITIVE_LINELIST                    = 3,
++   SVGA3D_PRIMITIVE_LINESTRIP                   = 4,
++   SVGA3D_PRIMITIVE_TRIANGLESTRIP               = 5,
++   SVGA3D_PRIMITIVE_TRIANGLEFAN                 = 6,
++   SVGA3D_PRIMITIVE_MAX
++} SVGA3dPrimitiveType;
++
++typedef enum {
++   SVGA3D_COORDINATE_INVALID                   = 0,
++   SVGA3D_COORDINATE_LEFTHANDED                = 1,
++   SVGA3D_COORDINATE_RIGHTHANDED               = 2,
++   SVGA3D_COORDINATE_MAX
++} SVGA3dCoordinateType;
++
++typedef enum {
++   SVGA3D_TRANSFORM_INVALID                     = 0,
++   SVGA3D_TRANSFORM_WORLD                       = 1,
++   SVGA3D_TRANSFORM_VIEW                        = 2,
++   SVGA3D_TRANSFORM_PROJECTION                  = 3,
++   SVGA3D_TRANSFORM_TEXTURE0                    = 4,
++   SVGA3D_TRANSFORM_TEXTURE1                    = 5,
++   SVGA3D_TRANSFORM_TEXTURE2                    = 6,
++   SVGA3D_TRANSFORM_TEXTURE3                    = 7,
++   SVGA3D_TRANSFORM_TEXTURE4                    = 8,
++   SVGA3D_TRANSFORM_TEXTURE5                    = 9,
++   SVGA3D_TRANSFORM_TEXTURE6                    = 10,
++   SVGA3D_TRANSFORM_TEXTURE7                    = 11,
++   SVGA3D_TRANSFORM_WORLD1                      = 12,
++   SVGA3D_TRANSFORM_WORLD2                      = 13,
++   SVGA3D_TRANSFORM_WORLD3                      = 14,
++   SVGA3D_TRANSFORM_MAX
++} SVGA3dTransformType;
++
++typedef enum {
++   SVGA3D_LIGHTTYPE_INVALID                     = 0,
++   SVGA3D_LIGHTTYPE_POINT                       = 1,
++   SVGA3D_LIGHTTYPE_SPOT1                       = 2, /* 1-cone, in degrees */
++   SVGA3D_LIGHTTYPE_SPOT2                       = 3, /* 2-cone, in radians */
++   SVGA3D_LIGHTTYPE_DIRECTIONAL                 = 4,
++   SVGA3D_LIGHTTYPE_MAX
++} SVGA3dLightType;
++
++typedef enum {
++   SVGA3D_CUBEFACE_POSX                         = 0,
++   SVGA3D_CUBEFACE_NEGX                         = 1,
++   SVGA3D_CUBEFACE_POSY                         = 2,
++   SVGA3D_CUBEFACE_NEGY                         = 3,
++   SVGA3D_CUBEFACE_POSZ                         = 4,
++   SVGA3D_CUBEFACE_NEGZ                         = 5,
++} SVGA3dCubeFace;
++
++typedef enum {
++   SVGA3D_SHADERTYPE_COMPILED_DX8               = 0,
++   SVGA3D_SHADERTYPE_VS                         = 1,
++   SVGA3D_SHADERTYPE_PS                         = 2,
++   SVGA3D_SHADERTYPE_MAX
++} SVGA3dShaderType;
++
++typedef enum {
++   SVGA3D_CONST_TYPE_FLOAT                      = 0,
++   SVGA3D_CONST_TYPE_INT                        = 1,
++   SVGA3D_CONST_TYPE_BOOL                       = 2,
++} SVGA3dShaderConstType;
++
++#define SVGA3D_MAX_SURFACE_FACES                6
++
++typedef enum {
++   SVGA3D_STRETCH_BLT_POINT                     = 0,
++   SVGA3D_STRETCH_BLT_LINEAR                    = 1,
++   SVGA3D_STRETCH_BLT_MAX
++} SVGA3dStretchBltMode;
++
++typedef enum {
++   SVGA3D_QUERYTYPE_OCCLUSION                   = 0,
++   SVGA3D_QUERYTYPE_MAX
++} SVGA3dQueryType;
++
++typedef enum {
++   SVGA3D_QUERYSTATE_PENDING     = 0,      /* Waiting on the host (set by guest) */
++   SVGA3D_QUERYSTATE_SUCCEEDED   = 1,      /* Completed successfully (set by host) */
++   SVGA3D_QUERYSTATE_FAILED      = 2,      /* Completed unsuccessfully (set by host) */
++   SVGA3D_QUERYSTATE_NEW         = 3,      /* Never submitted (For guest use only) */
++} SVGA3dQueryState;
++
++typedef enum {
++   SVGA3D_WRITE_HOST_VRAM        = 1,
++   SVGA3D_READ_HOST_VRAM         = 2,
++} SVGA3dTransferType;
++
++/*
++ * The maximum number vertex arrays we're guaranteed to support in
++ * SVGA_3D_CMD_DRAWPRIMITIVES.
++ */
++#define SVGA3D_MAX_VERTEX_ARRAYS   32
++
++/*
++ * Identifiers for commands in the command FIFO.
++ *
++ * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
++ * the SVGA3D protocol and remain reserved; they should not be used in the
++ * future.
++ *
++ * IDs between 1040 and 1999 (inclusive) are available for use by the
++ * current SVGA3D protocol.
++ *
++ * FIFO clients other than SVGA3D should stay below 1000, or at 2000
++ * and up.
++ */
++
++#define SVGA_3D_CMD_LEGACY_BASE            1000
++#define SVGA_3D_CMD_BASE                   1040
++
++#define SVGA_3D_CMD_SURFACE_DEFINE         SVGA_3D_CMD_BASE + 0
++#define SVGA_3D_CMD_SURFACE_DESTROY        SVGA_3D_CMD_BASE + 1
++#define SVGA_3D_CMD_SURFACE_COPY           SVGA_3D_CMD_BASE + 2
++#define SVGA_3D_CMD_SURFACE_STRETCHBLT     SVGA_3D_CMD_BASE + 3
++#define SVGA_3D_CMD_SURFACE_DMA            SVGA_3D_CMD_BASE + 4
++#define SVGA_3D_CMD_CONTEXT_DEFINE         SVGA_3D_CMD_BASE + 5
++#define SVGA_3D_CMD_CONTEXT_DESTROY        SVGA_3D_CMD_BASE + 6
++#define SVGA_3D_CMD_SETTRANSFORM           SVGA_3D_CMD_BASE + 7
++#define SVGA_3D_CMD_SETZRANGE              SVGA_3D_CMD_BASE + 8
++#define SVGA_3D_CMD_SETRENDERSTATE         SVGA_3D_CMD_BASE + 9
++#define SVGA_3D_CMD_SETRENDERTARGET        SVGA_3D_CMD_BASE + 10
++#define SVGA_3D_CMD_SETTEXTURESTATE        SVGA_3D_CMD_BASE + 11
++#define SVGA_3D_CMD_SETMATERIAL            SVGA_3D_CMD_BASE + 12
++#define SVGA_3D_CMD_SETLIGHTDATA           SVGA_3D_CMD_BASE + 13
++#define SVGA_3D_CMD_SETLIGHTENABLED        SVGA_3D_CMD_BASE + 14
++#define SVGA_3D_CMD_SETVIEWPORT            SVGA_3D_CMD_BASE + 15
++#define SVGA_3D_CMD_SETCLIPPLANE           SVGA_3D_CMD_BASE + 16
++#define SVGA_3D_CMD_CLEAR                  SVGA_3D_CMD_BASE + 17
++#define SVGA_3D_CMD_PRESENT                SVGA_3D_CMD_BASE + 18    // Deprecated
++#define SVGA_3D_CMD_SHADER_DEFINE          SVGA_3D_CMD_BASE + 19
++#define SVGA_3D_CMD_SHADER_DESTROY         SVGA_3D_CMD_BASE + 20
++#define SVGA_3D_CMD_SET_SHADER             SVGA_3D_CMD_BASE + 21
++#define SVGA_3D_CMD_SET_SHADER_CONST       SVGA_3D_CMD_BASE + 22
++#define SVGA_3D_CMD_DRAW_PRIMITIVES        SVGA_3D_CMD_BASE + 23
++#define SVGA_3D_CMD_SETSCISSORRECT         SVGA_3D_CMD_BASE + 24
++#define SVGA_3D_CMD_BEGIN_QUERY            SVGA_3D_CMD_BASE + 25
++#define SVGA_3D_CMD_END_QUERY              SVGA_3D_CMD_BASE + 26
++#define SVGA_3D_CMD_WAIT_FOR_QUERY         SVGA_3D_CMD_BASE + 27
++#define SVGA_3D_CMD_PRESENT_READBACK       SVGA_3D_CMD_BASE + 28    // Deprecated
++#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
++#define SVGA_3D_CMD_MAX                    SVGA_3D_CMD_BASE + 30
++
++#define SVGA_3D_CMD_FUTURE_MAX             2000
++
++/*
++ * Common substructures used in multiple FIFO commands:
++ */
++
++typedef struct {
++   union {
++      struct {
++         uint16  function;       // SVGA3dFogFunction
++         uint8   type;           // SVGA3dFogType
++         uint8   base;           // SVGA3dFogBase
++      };
++      uint32     uintValue;
++   };
++} SVGA3dFogMode;
++
++/*
++ * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
++ * is a surface ID as well as face/mipmap indices.
++ */
++
++typedef
++struct SVGA3dSurfaceImageId {
++   uint32               sid;
++   uint32               face;
++   uint32               mipmap;
++} SVGA3dSurfaceImageId;
++
++typedef
++struct SVGA3dGuestImage {
++   SVGAGuestPtr         ptr;
++
++   /*
++    * A note on interpretation of pitch: This value of pitch is the
++    * number of bytes between vertically adjacent image
++    * blocks. Normally this is the number of bytes between the first
++    * pixel of two adjacent scanlines. With compressed textures,
++    * however, this may represent the number of bytes between
++    * compression blocks rather than between rows of pixels.
++    *
++    * XXX: Compressed textures currently must be tightly packed in guest memory.
++    *
++    * If the image is 1-dimensional, pitch is ignored.
++    *
++    * If 'pitch' is zero, the SVGA3D device calculates a pitch value
++    * assuming each row of blocks is tightly packed.
++    */
++   uint32 pitch;
++} SVGA3dGuestImage;
++
++
++/*
++ * FIFO command format definitions:
++ */
++
++/*
++ * The data size header following cmdNum for every 3d command
++ */
++typedef
++struct {
++   uint32               id;
++   uint32               size;
++} SVGA3dCmdHeader;
++
++/*
++ * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
++ * optional mipmaps and cube faces.
++ */
++
++typedef
++struct {
++   uint32               width;
++   uint32               height;
++   uint32               depth;
++} SVGA3dSize;
++
++typedef enum {
++   SVGA3D_SURFACE_CUBEMAP              = (1 << 0),
++   SVGA3D_SURFACE_HINT_STATIC          = (1 << 1),
++   SVGA3D_SURFACE_HINT_DYNAMIC         = (1 << 2),
++   SVGA3D_SURFACE_HINT_INDEXBUFFER     = (1 << 3),
++   SVGA3D_SURFACE_HINT_VERTEXBUFFER    = (1 << 4),
++   SVGA3D_SURFACE_HINT_TEXTURE         = (1 << 5),
++   SVGA3D_SURFACE_HINT_RENDERTARGET    = (1 << 6),
++   SVGA3D_SURFACE_HINT_DEPTHSTENCIL    = (1 << 7),
++   SVGA3D_SURFACE_HINT_WRITEONLY       = (1 << 8),
++} SVGA3dSurfaceFlags;
++
++typedef
++struct {
++   uint32               numMipLevels;
++} SVGA3dSurfaceFace;
++
++typedef
++struct {
++   uint32                      sid;
++   SVGA3dSurfaceFlags          surfaceFlags;
++   SVGA3dSurfaceFormat         format;
++   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
++   /*
++    * Followed by an SVGA3dSize structure for each mip level in each face.
++    *
++    * A note on surface sizes: Sizes are always specified in pixels,
++    * even if the true surface size is not a multiple of the minimum
++    * block size of the surface's format. For example, a 3x3x1 DXT1
++    * compressed texture would actually be stored as a 4x4x1 image in
++    * memory.
++    */
++} SVGA3dCmdDefineSurface;       /* SVGA_3D_CMD_SURFACE_DEFINE */
++
++typedef
++struct {
++   uint32               sid;
++} SVGA3dCmdDestroySurface;      /* SVGA_3D_CMD_SURFACE_DESTROY */
++
++typedef
++struct {
++   uint32               cid;
++} SVGA3dCmdDefineContext;       /* SVGA_3D_CMD_CONTEXT_DEFINE */
++
++typedef
++struct {
++   uint32               cid;
++} SVGA3dCmdDestroyContext;      /* SVGA_3D_CMD_CONTEXT_DESTROY */
++
++typedef
++struct {
++   uint32               cid;
++   SVGA3dClearFlag      clearFlag;
++   uint32               color;
++   float                depth;
++   uint32               stencil;
++   /* Followed by variable number of SVGA3dRect structures */
++} SVGA3dCmdClear;               /* SVGA_3D_CMD_CLEAR */
++
++typedef
++struct SVGA3dCopyRect {
++   uint32               x;
++   uint32               y;
++   uint32               w;
++   uint32               h;
++   uint32               srcx;
++   uint32               srcy;
++} SVGA3dCopyRect;
++
++typedef
++struct SVGA3dCopyBox {
++   uint32               x;
++   uint32               y;
++   uint32               z;
++   uint32               w;
++   uint32               h;
++   uint32               d;
++   uint32               srcx;
++   uint32               srcy;
++   uint32               srcz;
++} SVGA3dCopyBox;
++
++typedef
++struct {
++   uint32               x;
++   uint32               y;
++   uint32               w;
++   uint32               h;
++} SVGA3dRect;
++
++typedef
++struct {
++   uint32               x;
++   uint32               y;
++   uint32               z;
++   uint32               w;
++   uint32               h;
++   uint32               d;
++} SVGA3dBox;
++
++typedef
++struct {
++   uint32               x;
++   uint32               y;
++   uint32               z;
++} SVGA3dPoint;
++
++typedef
++struct {
++   SVGA3dLightType      type;
++   SVGA3dBool           inWorldSpace;
++   float                diffuse[4];
++   float                specular[4];
++   float                ambient[4];
++   float                position[4];
++   float                direction[4];
++   float                range;
++   float                falloff;
++   float                attenuation0;
++   float                attenuation1;
++   float                attenuation2;
++   float                theta;
++   float                phi;
++} SVGA3dLightData;
++
++typedef
++struct {
++   uint32               sid;
++   /* Followed by variable number of SVGA3dCopyRect structures */
++} SVGA3dCmdPresent;             /* SVGA_3D_CMD_PRESENT */
++
++typedef
++struct {
++   SVGA3dRenderStateName   state;
++   union {
++      uint32               uintValue;
++      float                floatValue;
++   };
++} SVGA3dRenderState;
++
++typedef
++struct {
++   uint32               cid;
++   /* Followed by variable number of SVGA3dRenderState structures */
++} SVGA3dCmdSetRenderState;      /* SVGA_3D_CMD_SETRENDERSTATE */
++
++typedef
++struct {
++   uint32                 cid;
++   SVGA3dRenderTargetType type;
++   SVGA3dSurfaceImageId   target;
++} SVGA3dCmdSetRenderTarget;     /* SVGA_3D_CMD_SETRENDERTARGET */
++
++typedef
++struct {
++   SVGA3dSurfaceImageId  src;
++   SVGA3dSurfaceImageId  dest;
++   /* Followed by variable number of SVGA3dCopyBox structures */
++} SVGA3dCmdSurfaceCopy;               /* SVGA_3D_CMD_SURFACE_COPY */
++
++typedef
++struct {
++   SVGA3dSurfaceImageId  src;
++   SVGA3dSurfaceImageId  dest;
++   SVGA3dBox             boxSrc;
++   SVGA3dBox             boxDest;
++   SVGA3dStretchBltMode  mode;
++} SVGA3dCmdSurfaceStretchBlt;         /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
++
++typedef
++struct {
++   /*
++    * If the discard flag is present in a surface DMA operation, the host may
++    * discard the contents of the current mipmap level and face of the target
++    * surface before applying the surface DMA contents.
++    */
++   uint32 discard : 1;
++
++   /*
++    * If the unsynchronized flag is present, the host may perform this upload
++    * without syncing to pending reads on this surface.
++    */
++   uint32 unsynchronized : 1;
++
++   /*
++    * Guests *MUST* set the reserved bits to 0 before submitting the command
++    * suffix as future flags may occupy these bits.
++    */
++   uint32 reserved : 30;
++} SVGA3dSurfaceDMAFlags;
++
++typedef
++struct {
++   SVGA3dGuestImage      guest;
++   SVGA3dSurfaceImageId  host;
++   SVGA3dTransferType    transfer;
++   /*
++    * Followed by variable number of SVGA3dCopyBox structures. For consistency
++    * in all clipping logic and coordinate translation, we define the
++    * "source" in each copyBox as the guest image and the
++    * "destination" as the host image, regardless of transfer
++    * direction.
++    *
++    * For efficiency, the SVGA3D device is free to copy more data than
++    * specified. For example, it may round copy boxes outwards such
++    * that they lie on particular alignment boundaries.
++    */
++} SVGA3dCmdSurfaceDMA;                /* SVGA_3D_CMD_SURFACE_DMA */
++
++/*
++ * SVGA3dCmdSurfaceDMASuffix --
++ *
++ *    This is a command suffix that will appear after a SurfaceDMA command in
++ *    the FIFO.  It contains some extra information that hosts may use to
++ *    optimize performance or protect the guest.  This suffix exists to preserve
++ *    backwards compatibility while also allowing for new functionality to be
++ *    implemented.
++ */
++
++typedef
++struct {
++   uint32 suffixSize;
++
++   /*
++    * The maximum offset is used to determine the maximum offset from the
++    * guestPtr base address that will be accessed or written to during this
++    * surfaceDMA.  If the suffix is supported, the host will respect this
++    * boundary while performing surface DMAs.
++    *
++    * Defaults to MAX_UINT32
++    */
++   uint32 maximumOffset;
++
++   /*
++    * A set of flags that describes optimizations that the host may perform
++    * while performing this surface DMA operation.  The guest should never rely
++    * on behaviour that is different when these flags are set for correctness.
++    *
++    * Defaults to 0
++    */
++   SVGA3dSurfaceDMAFlags flags;
++} SVGA3dCmdSurfaceDMASuffix;
++
++/*
++ * SVGA_3D_CMD_DRAW_PRIMITIVES --
++ *
++ *   This command is the SVGA3D device's generic drawing entry point.
++ *   It can draw multiple ranges of primitives, optionally using an
++ *   index buffer, using an arbitrary collection of vertex buffers.
++ *
++ *   Each SVGA3dVertexDecl defines a distinct vertex array to bind
++ *   during this draw call. The declarations specify which surface
++ *   the vertex data lives in, what that vertex data is used for,
++ *   and how to interpret it.
++ *
++ *   Each SVGA3dPrimitiveRange defines a collection of primitives
++ *   to render using the same vertex arrays. An index buffer is
++ *   optional.
++ */
++
++typedef
++struct {
++   /*
++    * A range hint is an optional specification for the range of indices
++    * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
++    * that the entire array will be used.
++    *
++    * These are only hints. The SVGA3D device may use them for
++    * performance optimization if possible, but it's also allowed to
++    * ignore these values.
++    */
++   uint32               first;
++   uint32               last;
++} SVGA3dArrayRangeHint;
++
++typedef
++struct {
++   /*
++    * Define the origin and shape of a vertex or index array. Both
++    * 'offset' and 'stride' are in bytes. The provided surface will be
++    * reinterpreted as a flat array of bytes in the same format used
++    * by surface DMA operations. To avoid unnecessary conversions, the
++    * surface should be created with the SVGA3D_BUFFER format.
++    *
++    * Index 0 in the array starts 'offset' bytes into the surface.
++    * Index 1 begins at byte 'offset + stride', etc. Array indices may
++    * not be negative.
++    */
++   uint32               surfaceId;
++   uint32               offset;
++   uint32               stride;
++} SVGA3dArray;
++
++typedef
++struct {
++   /*
++    * Describe a vertex array's data type, and define how it is to be
++    * used by the fixed function pipeline or the vertex shader. It
++    * isn't useful to have two VertexDecls with the same
++    * VertexArrayIdentity in one draw call.
++    */
++   SVGA3dDeclType       type;
++   SVGA3dDeclMethod     method;
++   SVGA3dDeclUsage      usage;
++   uint32               usageIndex;
++} SVGA3dVertexArrayIdentity;
++
++typedef
++struct {
++   SVGA3dVertexArrayIdentity  identity;
++   SVGA3dArray                array;
++   SVGA3dArrayRangeHint       rangeHint;
++} SVGA3dVertexDecl;
++
++typedef
++struct {
++   /*
++    * Define a group of primitives to render, from sequential indices.
++    *
++    * The value of 'primitiveType' and 'primitiveCount' imply the
++    * total number of vertices that will be rendered.
++    */
++   SVGA3dPrimitiveType  primType;
++   uint32               primitiveCount;
++
++   /*
++    * Optional index buffer. If indexArray.surfaceId is
++    * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
++    * without an index buffer is identical to rendering with an index
++    * buffer containing the sequence [0, 1, 2, 3, ...].
++    *
++    * If an index buffer is in use, indexWidth specifies the width in
++    * bytes of each index value. It must be less than or equal to
++    * indexArray.stride.
++    *
++    * (Currently, the SVGA3D device requires index buffers to be tightly
++    * packed. In other words, indexWidth == indexArray.stride)
++    */
++   SVGA3dArray          indexArray;
++   uint32               indexWidth;
++
++   /*
++    * Optional index bias. This number is added to all indices from
++    * indexArray before they are used as vertex array indices. This
++    * can be used in multiple ways:
++    *
++    *  - When not using an indexArray, this bias can be used to
++    *    specify where in the vertex arrays to begin rendering.
++    *
++    *  - A positive number here is equivalent to increasing the
++    *    offset in each vertex array.
++    *
++    *  - A negative number can be used to render using a small
++    *    vertex array and an index buffer that contains large
++    *    values. This may be used by some applications that
++    *    crop a vertex buffer without modifying their index
++    *    buffer.
++    *
++    * Note that rendering with a negative bias value may be slower and
++    * use more memory than rendering with a positive or zero bias.
++    */
++   int32                indexBias;
++} SVGA3dPrimitiveRange;
++
++typedef
++struct {
++   uint32               cid;
++   uint32               numVertexDecls;
++   uint32               numRanges;
++
++   /*
++    * There are two variable size arrays after the
++    * SVGA3dCmdDrawPrimitives structure. In order,
++    * they are:
++    *
++    * 1. SVGA3dVertexDecl, quantity 'numVertexDecls'
++    * 2. SVGA3dPrimitiveRange, quantity 'numRanges'
++    * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
++    *    the frequency divisor for this the corresponding vertex decl)
++    */
++} SVGA3dCmdDrawPrimitives;      /* SVGA_3D_CMD_DRAWPRIMITIVES */
++
++typedef
++struct {
++   uint32                   stage;
++   SVGA3dTextureStateName   name;
++   union {
++      uint32                value;
++      float                 floatValue;
++   };
++} SVGA3dTextureState;
++
++typedef
++struct {
++   uint32               cid;
++   /* Followed by variable number of SVGA3dTextureState structures */
++} SVGA3dCmdSetTextureState;      /* SVGA_3D_CMD_SETTEXTURESTATE */
++
++typedef
++struct {
++   uint32                   cid;
++   SVGA3dTransformType      type;
++   float                    matrix[16];
++} SVGA3dCmdSetTransform;          /* SVGA_3D_CMD_SETTRANSFORM */
++
++typedef
++struct {
++   float                min;
++   float                max;
++} SVGA3dZRange;
++
++typedef
++struct {
++   uint32               cid;
++   SVGA3dZRange         zRange;
++} SVGA3dCmdSetZRange;             /* SVGA_3D_CMD_SETZRANGE */
++
++typedef
++struct {
++   float                diffuse[4];
++   float                ambient[4];
++   float                specular[4];
++   float                emissive[4];
++   float                shininess;
++} SVGA3dMaterial;
++
++typedef
++struct {
++   uint32               cid;
++   SVGA3dFace           face;
++   SVGA3dMaterial       material;
++} SVGA3dCmdSetMaterial;           /* SVGA_3D_CMD_SETMATERIAL */
++
++typedef
++struct {
++   uint32               cid;
++   uint32               index;
++   SVGA3dLightData      data;
++} SVGA3dCmdSetLightData;           /* SVGA_3D_CMD_SETLIGHTDATA */
++
++typedef
++struct {
++   uint32               cid;
++   uint32               index;
++   uint32               enabled;
++} SVGA3dCmdSetLightEnabled;      /* SVGA_3D_CMD_SETLIGHTENABLED */
++
++typedef
++struct {
++   uint32               cid;
++   SVGA3dRect           rect;
++} SVGA3dCmdSetViewport;           /* SVGA_3D_CMD_SETVIEWPORT */
++
++typedef
++struct {
++   uint32               cid;
++   SVGA3dRect           rect;
++} SVGA3dCmdSetScissorRect;         /* SVGA_3D_CMD_SETSCISSORRECT */
++
++typedef
++struct {
++   uint32               cid;
++   uint32               index;
++   float                plane[4];
++} SVGA3dCmdSetClipPlane;           /* SVGA_3D_CMD_SETCLIPPLANE */
++
++typedef
++struct {
++   uint32               cid;
++   uint32               shid;
++   SVGA3dShaderType     type;
++   /* Followed by variable number of DWORDs for shader bycode */
++} SVGA3dCmdDefineShader;           /* SVGA_3D_CMD_SHADER_DEFINE */
++
++typedef
++struct {
++   uint32               cid;
++   uint32               shid;
++   SVGA3dShaderType     type;
++} SVGA3dCmdDestroyShader;         /* SVGA_3D_CMD_SHADER_DESTROY */
++
++typedef
++struct {
++   uint32                  cid;
++   uint32                  reg;     /* register number */
++   SVGA3dShaderType        type;
++   SVGA3dShaderConstType   ctype;
++   uint32                  values[4];
++} SVGA3dCmdSetShaderConst;        /* SVGA_3D_CMD_SET_SHADER_CONST */
++
++typedef
++struct {
++   uint32               cid;
++   SVGA3dShaderType     type;
++   uint32               shid;
++} SVGA3dCmdSetShader;             /* SVGA_3D_CMD_SET_SHADER */
++
++typedef
++struct {
++   uint32               cid;
++   SVGA3dQueryType      type;
++} SVGA3dCmdBeginQuery;           /* SVGA_3D_CMD_BEGIN_QUERY */
++
++typedef
++struct {
++   uint32               cid;
++   SVGA3dQueryType      type;
++   SVGAGuestPtr         guestResult;  /* Points to an SVGA3dQueryResult structure */
++} SVGA3dCmdEndQuery;                  /* SVGA_3D_CMD_END_QUERY */
++
++typedef
++struct {
++   uint32               cid;          /* Same parameters passed to END_QUERY */
++   SVGA3dQueryType      type;
++   SVGAGuestPtr         guestResult;
++} SVGA3dCmdWaitForQuery;              /* SVGA_3D_CMD_WAIT_FOR_QUERY */
++
++typedef
++struct {
++   uint32               totalSize;    /* Set by guest before query is ended. */
++   SVGA3dQueryState     state;        /* Set by host or guest. See SVGA3dQueryState. */
++   union {                            /* Set by host on exit from PENDING state */
++      uint32            result32;
++   };
++} SVGA3dQueryResult;
++
++/*
++ * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
++ *
++ *    This is a blit from an SVGA3D surface to a Screen Object. Just
++ *    like GMR-to-screen blits, this blit may be directed at a
++ *    specific screen or to the virtual coordinate space.
++ *
++ *    The blit copies from a rectangular region of an SVGA3D surface
++ *    image to a rectangular region of a screen or screens.
++ *
++ *    This command takes an optional variable-length list of clipping
++ *    rectangles after the body of the command. If no rectangles are
++ *    specified, there is no clipping region. The entire destRect is
++ *    drawn to. If one or more rectangles are included, they describe
++ *    a clipping region. The clip rectangle coordinates are measured
++ *    relative to the top-left corner of destRect.
++ *
++ *    This clipping region serves multiple purposes:
++ *
++ *      - It can be used to perform an irregularly shaped blit more
++ *        efficiently than by issuing many separate blit commands.
++ *
++ *      - It is equivalent to allowing blits with non-integer
++ *        source coordinates. You could blit just one half-pixel
++ *        of a source, for example, by specifying a larger
++ *        destination rectangle than you need, then removing
++ *        part of it using a clip rectangle.
++ *
++ * Availability:
++ *    SVGA_FIFO_CAP_SCREEN_OBJECT
++ *
++ * Limitations:
++ *
++ *    - Currently, no backend supports blits from a mipmap or face
++ *      other than the first one.
++ */
++
++typedef
++struct {
++   SVGA3dSurfaceImageId srcImage;
++   SVGASignedRect       srcRect;
++   uint32               destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
++   SVGASignedRect       destRect;     /* Supports scaling if src/rest different size */
++   /* Clipping: zero or more SVGASignedRects follow */
++} SVGA3dCmdBlitSurfaceToScreen;         /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
++
++
++/*
++ * Capability query index.
++ *
++ * Notes:
++ *
++ *   1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
++ *      fixed-function texture units available. Each of these units
++ *      work in both FFP and Shader modes, and they support texture
++ *      transforms and texture coordinates. The host may have additional
++ *      texture image units that are only usable with shaders.
++ *
++ *   2. The BUFFER_FORMAT capabilities are deprecated, and they always
++ *      return TRUE. Even on physical hardware that does not support
++ *      these formats natively, the SVGA3D device will provide an emulation
++ *      which should be invisible to the guest OS.
++ *
++ *      In general, the SVGA3D device should support any operation on
++ *      any surface format, it just may perform some of these
++ *      operations in software depending on the capabilities of the
++ *      available physical hardware.
++ *
++ *      XXX: In the future, we will add capabilities that describe in
++ *      detail what formats are supported in hardware for what kinds
++ *      of operations.
++ */
++
++typedef enum {
++   SVGA3D_DEVCAP_3D                                = 0,
++   SVGA3D_DEVCAP_MAX_LIGHTS                        = 1,
++   SVGA3D_DEVCAP_MAX_TEXTURES                      = 2,  /* See note (1) */
++   SVGA3D_DEVCAP_MAX_CLIP_PLANES                   = 3,
++   SVGA3D_DEVCAP_VERTEX_SHADER_VERSION             = 4,
++   SVGA3D_DEVCAP_VERTEX_SHADER                     = 5,
++   SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION           = 6,
++   SVGA3D_DEVCAP_FRAGMENT_SHADER                   = 7,
++   SVGA3D_DEVCAP_MAX_RENDER_TARGETS                = 8,
++   SVGA3D_DEVCAP_S23E8_TEXTURES                    = 9,
++   SVGA3D_DEVCAP_S10E5_TEXTURES                    = 10,
++   SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND             = 11,
++   SVGA3D_DEVCAP_D16_BUFFER_FORMAT                 = 12, /* See note (2) */
++   SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT               = 13, /* See note (2) */
++   SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT               = 14, /* See note (2) */
++   SVGA3D_DEVCAP_QUERY_TYPES                       = 15,
++   SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING         = 16,
++   SVGA3D_DEVCAP_MAX_POINT_SIZE                    = 17,
++   SVGA3D_DEVCAP_MAX_SHADER_TEXTURES               = 18,
++   SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH                 = 19,
++   SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT                = 20,
++   SVGA3D_DEVCAP_MAX_VOLUME_EXTENT                 = 21,
++   SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT                = 22,
++   SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO          = 23,
++   SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY            = 24,
++   SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT               = 25,
++   SVGA3D_DEVCAP_MAX_VERTEX_INDEX                  = 26,
++   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS    = 27,
++   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS  = 28,
++   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS           = 29,
++   SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS         = 30,
++   SVGA3D_DEVCAP_TEXTURE_OPS                       = 31,
++   SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8               = 32,
++   SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8               = 33,
++   SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10            = 34,
++   SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5               = 35,
++   SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5               = 36,
++   SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4               = 37,
++   SVGA3D_DEVCAP_SURFACEFMT_R5G6B5                 = 38,
++   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16            = 39,
++   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8      = 40,
++   SVGA3D_DEVCAP_SURFACEFMT_ALPHA8                 = 41,
++   SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8             = 42,
++   SVGA3D_DEVCAP_SURFACEFMT_Z_D16                  = 43,
++   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8                = 44,
++   SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8                = 45,
++   SVGA3D_DEVCAP_SURFACEFMT_DXT1                   = 46,
++   SVGA3D_DEVCAP_SURFACEFMT_DXT2                   = 47,
++   SVGA3D_DEVCAP_SURFACEFMT_DXT3                   = 48,
++   SVGA3D_DEVCAP_SURFACEFMT_DXT4                   = 49,
++   SVGA3D_DEVCAP_SURFACEFMT_DXT5                   = 50,
++   SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8           = 51,
++   SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10            = 52,
++   SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8               = 53,
++   SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8               = 54,
++   SVGA3D_DEVCAP_SURFACEFMT_CxV8U8                 = 55,
++   SVGA3D_DEVCAP_SURFACEFMT_R_S10E5                = 56,
++   SVGA3D_DEVCAP_SURFACEFMT_R_S23E8                = 57,
++   SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5               = 58,
++   SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8               = 59,
++   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5             = 60,
++   SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8             = 61,
++   SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES        = 63,
++
++   /*
++    * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
++    * render targets.  This does no include the depth or stencil targets.
++    */
++   SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS   = 64,
++
++   SVGA3D_DEVCAP_SURFACEFMT_V16U16                 = 65,
++   SVGA3D_DEVCAP_SURFACEFMT_G16R16                 = 66,
++   SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16           = 67,
++   SVGA3D_DEVCAP_SURFACEFMT_UYVY                   = 68,
++   SVGA3D_DEVCAP_SURFACEFMT_YUY2                   = 69,
++
++   /*
++    * Don't add new caps into the previous section; the values in this
++    * enumeration must not change. You can put new values right before
++    * SVGA3D_DEVCAP_MAX.
++    */
++   SVGA3D_DEVCAP_MAX                                  /* This must be the last index. */
++} SVGA3dDevCapIndex;
++
++typedef union {
++   Bool   b;
++   uint32 u;
++   int32  i;
++   float  f;
++} SVGA3dDevCapResult;
++
++#endif /* _SVGA3D_REG_H_ */
+diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h
+new file mode 100644
+index 0000000..7b85e9b
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/svga_escape.h
+@@ -0,0 +1,89 @@
++/**********************************************************
++ * Copyright 2007-2009 VMware, Inc.  All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy,
++ * modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ **********************************************************/
++
++/*
++ * svga_escape.h --
++ *
++ *    Definitions for our own (vendor-specific) SVGA Escape commands.
++ */
++
++#ifndef _SVGA_ESCAPE_H_
++#define _SVGA_ESCAPE_H_
++
++
++/*
++ * Namespace IDs for the escape command
++ */
++
++#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
++#define SVGA_ESCAPE_NSID_DEVEL  0xFFFFFFFF
++
++
++/*
++ * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
++ * the first DWORD of escape data (after the nsID and size). As a
++ * guideline we're using the high word and low word as a major and
++ * minor command number, respectively.
++ *
++ * Major command number allocation:
++ *
++ *   0000: Reserved
++ *   0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
++ *   0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
++ *   0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
++ */
++
++#define SVGA_ESCAPE_VMWARE_MAJOR_MASK  0xFFFF0000
++
++
++/*
++ * SVGA Hint commands.
++ *
++ * These escapes let the SVGA driver provide optional information to
++ * he host about the state of the guest or guest applications. The
++ * host can use these hints to make user interface or performance
++ * decisions.
++ *
++ * Notes:
++ *
++ *   - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
++ *     that use the SVGA Screen Object extension. Instead of sending
++ *     this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
++ *     Screen Object.
++ */
++
++#define SVGA_ESCAPE_VMWARE_HINT               0x00030000
++#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN    0x00030001  // Deprecated
++
++typedef
++struct {
++   uint32 command;
++   uint32 fullscreen;
++   struct {
++      int32 x, y;
++   } monitorPosition;
++} SVGAEscapeHintFullscreen;
++
++#endif /* _SVGA_ESCAPE_H_ */
+diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h
+new file mode 100644
+index 0000000..f753d73
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/svga_overlay.h
+@@ -0,0 +1,201 @@
++/**********************************************************
++ * Copyright 2007-2009 VMware, Inc.  All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy,
++ * modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ **********************************************************/
++
++/*
++ * svga_overlay.h --
++ *
++ *    Definitions for video-overlay support.
++ */
++
++#ifndef _SVGA_OVERLAY_H_
++#define _SVGA_OVERLAY_H_
++
++#include "svga_reg.h"
++
++/*
++ * Video formats we support
++ */
++
++#define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2'
++#define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2'
++#define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y'
++
++typedef enum {
++   SVGA_OVERLAY_FORMAT_INVALID = 0,
++   SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
++   SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
++   SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
++} SVGAOverlayFormat;
++
++#define SVGA_VIDEO_COLORKEY_MASK             0x00ffffff
++
++#define SVGA_ESCAPE_VMWARE_VIDEO             0x00020000
++
++#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS    0x00020001
++        /* FIFO escape layout:
++         * Type, Stream Id, (Register Id, Value) pairs */
++
++#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH       0x00020002
++        /* FIFO escape layout:
++         * Type, Stream Id */
++
++typedef
++struct SVGAEscapeVideoSetRegs {
++   struct {
++      uint32 cmdType;
++      uint32 streamId;
++   } header;
++
++   // May include zero or more items.
++   struct {
++      uint32 registerId;
++      uint32 value;
++   } items[1];
++} SVGAEscapeVideoSetRegs;
++
++typedef
++struct SVGAEscapeVideoFlush {
++   uint32 cmdType;
++   uint32 streamId;
++} SVGAEscapeVideoFlush;
++
++
++/*
++ * Struct definitions for the video overlay commands built on
++ * SVGAFifoCmdEscape.
++ */
++typedef
++struct {
++   uint32 command;
++   uint32 overlay;
++} SVGAFifoEscapeCmdVideoBase;
++
++typedef
++struct {
++   SVGAFifoEscapeCmdVideoBase videoCmd;
++} SVGAFifoEscapeCmdVideoFlush;
++
++typedef
++struct {
++   SVGAFifoEscapeCmdVideoBase videoCmd;
++   struct {
++      uint32 regId;
++      uint32 value;
++   } items[1];
++} SVGAFifoEscapeCmdVideoSetRegs;
++
++typedef
++struct {
++   SVGAFifoEscapeCmdVideoBase videoCmd;
++   struct {
++      uint32 regId;
++      uint32 value;
++   } items[SVGA_VIDEO_NUM_REGS];
++} SVGAFifoEscapeCmdVideoSetAllRegs;
++
++
++/*
++ *----------------------------------------------------------------------
++ *
++ * VMwareVideoGetAttributes --
++ *
++ *      Computes the size, pitches and offsets for YUV frames.
++ *
++ * Results:
++ *      TRUE on success; otherwise FALSE on failure.
++ *
++ * Side effects:
++ *      Pitches and offsets for the given YUV frame are put in 'pitches'
++ *      and 'offsets' respectively. They are both optional though.
++ *
++ *----------------------------------------------------------------------
++ */
++
++static inline bool
++VMwareVideoGetAttributes(const SVGAOverlayFormat format,    // IN
++                         uint32 *width,                     // IN / OUT
++                         uint32 *height,                    // IN / OUT
++                         uint32 *size,                      // OUT
++                         uint32 *pitches,                   // OUT (optional)
++                         uint32 *offsets)                   // OUT (optional)
++{
++    int tmp;
++
++    *width = (*width + 1) & ~1;
++
++    if (offsets) {
++        offsets[0] = 0;
++    }
++
++    switch (format) {
++    case VMWARE_FOURCC_YV12:
++       *height = (*height + 1) & ~1;
++       *size = (*width + 3) & ~3;
++
++       if (pitches) {
++          pitches[0] = *size;
++       }
++
++       *size *= *height;
++
++       if (offsets) {
++          offsets[1] = *size;
++       }
++
++       tmp = ((*width >> 1) + 3) & ~3;
++
++       if (pitches) {
++          pitches[1] = pitches[2] = tmp;
++       }
++
++       tmp *= (*height >> 1);
++       *size += tmp;
++
++       if (offsets) {
++          offsets[2] = *size;
++       }
++
++       *size += tmp;
++       break;
++
++    case VMWARE_FOURCC_YUY2:
++    case VMWARE_FOURCC_UYVY:
++       *size = *width * 2;
++
++       if (pitches) {
++          pitches[0] = *size;
++       }
++
++       *size *= *height;
++       break;
++
++    default:
++       return false;
++    }
++
++    return true;
++}
++
++#endif // _SVGA_OVERLAY_H_
+diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
+new file mode 100644
+index 0000000..1b96c2e
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
+@@ -0,0 +1,1346 @@
++/**********************************************************
++ * Copyright 1998-2009 VMware, Inc.  All rights reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person
++ * obtaining a copy of this software and associated documentation
++ * files (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy,
++ * modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be
++ * included in all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
++ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
++ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
++ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
++ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
++ * SOFTWARE.
++ *
++ **********************************************************/
++
++/*
++ * svga_reg.h --
++ *
++ *    Virtual hardware definitions for the VMware SVGA II device.
++ */
++
++#ifndef _SVGA_REG_H_
++#define _SVGA_REG_H_
++
++/*
++ * PCI device IDs.
++ */
++#define PCI_VENDOR_ID_VMWARE            0x15AD
++#define PCI_DEVICE_ID_VMWARE_SVGA2      0x0405
++
++/*
++ * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
++ * cursor bypass mode. This is still supported, but no new guest
++ * drivers should use it.
++ */
++#define SVGA_CURSOR_ON_HIDE            0x0   /* Must be 0 to maintain backward compatibility */
++#define SVGA_CURSOR_ON_SHOW            0x1   /* Must be 1 to maintain backward compatibility */
++#define SVGA_CURSOR_ON_REMOVE_FROM_FB  0x2   /* Remove the cursor from the framebuffer because we need to see what's under it */
++#define SVGA_CURSOR_ON_RESTORE_TO_FB   0x3   /* Put the cursor back in the framebuffer so the user can see it */
++
++/*
++ * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
++ * The changeMap in the monitor is proportional to this number. Therefore, we'd
++ * like to keep it as small as possible to reduce monitor overhead (using
++ * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
++ * 4k!).
++ *
++ * NB: For compatibility reasons, this value must be greater than 0xff0000.
++ *     See bug 335072.
++ */
++#define SVGA_FB_MAX_TRACEABLE_SIZE      0x1000000
++
++#define SVGA_MAX_PSEUDOCOLOR_DEPTH      8
++#define SVGA_MAX_PSEUDOCOLORS           (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
++#define SVGA_NUM_PALETTE_REGS           (3 * SVGA_MAX_PSEUDOCOLORS)
++
++#define SVGA_MAGIC         0x900000UL
++#define SVGA_MAKE_ID(ver)  (SVGA_MAGIC << 8 | (ver))
++
++/* Version 2 let the address of the frame buffer be unsigned on Win32 */
++#define SVGA_VERSION_2     2
++#define SVGA_ID_2          SVGA_MAKE_ID(SVGA_VERSION_2)
++
++/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
++   PALETTE_BASE has moved */
++#define SVGA_VERSION_1     1
++#define SVGA_ID_1          SVGA_MAKE_ID(SVGA_VERSION_1)
++
++/* Version 0 is the initial version */
++#define SVGA_VERSION_0     0
++#define SVGA_ID_0          SVGA_MAKE_ID(SVGA_VERSION_0)
++
++/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
++#define SVGA_ID_INVALID    0xFFFFFFFF
++
++/* Port offsets, relative to BAR0 */
++#define SVGA_INDEX_PORT         0x0
++#define SVGA_VALUE_PORT         0x1
++#define SVGA_BIOS_PORT          0x2
++#define SVGA_IRQSTATUS_PORT     0x8
++
++/*
++ * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
++ *
++ * Interrupts are only supported when the
++ * SVGA_CAP_IRQMASK capability is present.
++ */
++#define SVGA_IRQFLAG_ANY_FENCE            0x1    /* Any fence was passed */
++#define SVGA_IRQFLAG_FIFO_PROGRESS        0x2    /* Made forward progress in the FIFO */
++#define SVGA_IRQFLAG_FENCE_GOAL           0x4    /* SVGA_FIFO_FENCE_GOAL reached */
++
++/*
++ * Registers
++ */
++
++enum {
++   SVGA_REG_ID = 0,
++   SVGA_REG_ENABLE = 1,
++   SVGA_REG_WIDTH = 2,
++   SVGA_REG_HEIGHT = 3,
++   SVGA_REG_MAX_WIDTH = 4,
++   SVGA_REG_MAX_HEIGHT = 5,
++   SVGA_REG_DEPTH = 6,
++   SVGA_REG_BITS_PER_PIXEL = 7,       /* Current bpp in the guest */
++   SVGA_REG_PSEUDOCOLOR = 8,
++   SVGA_REG_RED_MASK = 9,
++   SVGA_REG_GREEN_MASK = 10,
++   SVGA_REG_BLUE_MASK = 11,
++   SVGA_REG_BYTES_PER_LINE = 12,
++   SVGA_REG_FB_START = 13,            /* (Deprecated) */
++   SVGA_REG_FB_OFFSET = 14,
++   SVGA_REG_VRAM_SIZE = 15,
++   SVGA_REG_FB_SIZE = 16,
++
++   /* ID 0 implementation only had the above registers, then the palette */
++
++   SVGA_REG_CAPABILITIES = 17,
++   SVGA_REG_MEM_START = 18,           /* (Deprecated) */
++   SVGA_REG_MEM_SIZE = 19,
++   SVGA_REG_CONFIG_DONE = 20,         /* Set when memory area configured */
++   SVGA_REG_SYNC = 21,                /* See "FIFO Synchronization Registers" */
++   SVGA_REG_BUSY = 22,                /* See "FIFO Synchronization Registers" */
++   SVGA_REG_GUEST_ID = 23,            /* Set guest OS identifier */
++   SVGA_REG_CURSOR_ID = 24,           /* (Deprecated) */
++   SVGA_REG_CURSOR_X = 25,            /* (Deprecated) */
++   SVGA_REG_CURSOR_Y = 26,            /* (Deprecated) */
++   SVGA_REG_CURSOR_ON = 27,           /* (Deprecated) */
++   SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
++   SVGA_REG_SCRATCH_SIZE = 29,        /* Number of scratch registers */
++   SVGA_REG_MEM_REGS = 30,            /* Number of FIFO registers */
++   SVGA_REG_NUM_DISPLAYS = 31,        /* (Deprecated) */
++   SVGA_REG_PITCHLOCK = 32,           /* Fixed pitch for all modes */
++   SVGA_REG_IRQMASK = 33,             /* Interrupt mask */
++
++   /* Legacy multi-monitor support */
++   SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
++   SVGA_REG_DISPLAY_ID = 35,        /* Display ID for the following display attributes */
++   SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
++   SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
++   SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
++   SVGA_REG_DISPLAY_WIDTH = 39,     /* The display's width */
++   SVGA_REG_DISPLAY_HEIGHT = 40,    /* The display's height */
++
++   /* See "Guest memory regions" below. */
++   SVGA_REG_GMR_ID = 41,
++   SVGA_REG_GMR_DESCRIPTOR = 42,
++   SVGA_REG_GMR_MAX_IDS = 43,
++   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
++
++   SVGA_REG_TRACES = 45,            /* Enable trace-based updates even when FIFO is on */
++   SVGA_REG_TOP = 46,               /* Must be 1 more than the last register */
++
++   SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
++   /* Next 768 (== 256*3) registers exist for colormap */
++
++   SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
++                                    /* Base of scratch registers */
++   /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
++      First 4 are reserved for VESA BIOS Extension; any remaining are for
++      the use of the current SVGA driver. */
++};
++
++
++/*
++ * Guest memory regions (GMRs):
++ *
++ * This is a new memory mapping feature available in SVGA devices
++ * which have the SVGA_CAP_GMR bit set. Previously, there were two
++ * fixed memory regions available with which to share data between the
++ * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
++ * are our name for an extensible way of providing arbitrary DMA
++ * buffers for use between the driver and the SVGA device. They are a
++ * new alternative to framebuffer memory, usable for both 2D and 3D
++ * graphics operations.
++ *
++ * Since GMR mapping must be done synchronously with guest CPU
++ * execution, we use a new pair of SVGA registers:
++ *
++ *   SVGA_REG_GMR_ID --
++ *
++ *     Read/write.
++ *     This register holds the 32-bit ID (a small positive integer)
++ *     of a GMR to create, delete, or redefine. Writing this register
++ *     has no side-effects.
++ *
++ *   SVGA_REG_GMR_DESCRIPTOR --
++ *
++ *     Write-only.
++ *     Writing this register will create, delete, or redefine the GMR
++ *     specified by the above ID register. If this register is zero,
++ *     the GMR is deleted. Any pointers into this GMR (including those
++ *     currently being processed by FIFO commands) will be
++ *     synchronously invalidated.
++ *
++ *     If this register is nonzero, it must be the physical page
++ *     number (PPN) of a data structure which describes the physical
++ *     layout of the memory region this GMR should describe. The
++ *     descriptor structure will be read synchronously by the SVGA
++ *     device when this register is written. The descriptor need not
++ *     remain allocated for the lifetime of the GMR.
++ *
++ *     The guest driver should write SVGA_REG_GMR_ID first, then
++ *     SVGA_REG_GMR_DESCRIPTOR.
++ *
++ *   SVGA_REG_GMR_MAX_IDS --
++ *
++ *     Read-only.
++ *     The SVGA device may choose to support a maximum number of
++ *     user-defined GMR IDs. This register holds the number of supported
++ *     IDs. (The maximum supported ID plus 1)
++ *
++ *   SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
++ *
++ *     Read-only.
++ *     The SVGA device may choose to put a limit on the total number
++ *     of SVGAGuestMemDescriptor structures it will read when defining
++ *     a single GMR.
++ *
++ * The descriptor structure is an array of SVGAGuestMemDescriptor
++ * structures. Each structure may do one of three things:
++ *
++ *   - Terminate the GMR descriptor list.
++ *     (ppn==0, numPages==0)
++ *
++ *   - Add a PPN or range of PPNs to the GMR's virtual address space.
++ *     (ppn != 0, numPages != 0)
++ *
++ *   - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
++ *     support multi-page GMR descriptor tables without forcing the
++ *     driver to allocate physically contiguous memory.
++ *     (ppn != 0, numPages == 0)
++ *
++ * Note that each physical page of SVGAGuestMemDescriptor structures
++ * can describe at least 2MB of guest memory. If the driver needs to
++ * use more than one page of descriptor structures, it must use one of
++ * its SVGAGuestMemDescriptors to point to an additional page.  The
++ * device will never automatically cross a page boundary.
++ *
++ * Once the driver has described a GMR, it is immediately available
++ * for use via any FIFO command that uses an SVGAGuestPtr structure.
++ * These pointers include a GMR identifier plus an offset into that
++ * GMR.
++ *
++ * The driver must check the SVGA_CAP_GMR bit before using the GMR
++ * registers.
++ */
++
++/*
++ * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
++ * memory as well.  In the future, these IDs could even be used to
++ * allow legacy memory regions to be redefined by the guest as GMRs.
++ *
++ * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
++ * is being phased out. Please try to use user-defined GMRs whenever
++ * possible.
++ */
++#define SVGA_GMR_NULL         ((uint32) -1)
++#define SVGA_GMR_FRAMEBUFFER  ((uint32) -2)  // Guest Framebuffer (GFB)
++
++typedef
++struct SVGAGuestMemDescriptor {
++   uint32 ppn;
++   uint32 numPages;
++} SVGAGuestMemDescriptor;
++
++typedef
++struct SVGAGuestPtr {
++   uint32 gmrId;
++   uint32 offset;
++} SVGAGuestPtr;
++
++
++/*
++ * SVGAGMRImageFormat --
++ *
++ *    This is a packed representation of the source 2D image format
++ *    for a GMR-to-screen blit. Currently it is defined as an encoding
++ *    of the screen's color depth and bits-per-pixel, however, 16 bits
++ *    are reserved for future use to identify other encodings (such as
++ *    RGBA or higher-precision images).
++ *
++ *    Currently supported formats:
++ *
++ *       bpp depth  Format Name
++ *       --- -----  -----------
++ *        32    24  32-bit BGRX
++ *        24    24  24-bit BGR
++ *        16    16  RGB 5-6-5
++ *        16    15  RGB 5-5-5
++ *
++ */
++
++typedef
++struct SVGAGMRImageFormat {
++   union {
++      struct {
++         uint32 bitsPerPixel : 8;
++         uint32 colorDepth   : 8;
++         uint32 reserved     : 16;  // Must be zero
++      };
++
++      uint32 value;
++   };
++} SVGAGMRImageFormat;
++
++/*
++ * SVGAColorBGRX --
++ *
++ *    A 24-bit color format (BGRX), which does not depend on the
++ *    format of the legacy guest framebuffer (GFB) or the current
++ *    GMRFB state.
++ */
++
++typedef
++struct SVGAColorBGRX {
++   union {
++      struct {
++         uint32 b : 8;
++         uint32 g : 8;
++         uint32 r : 8;
++         uint32 x : 8;  // Unused
++      };
++
++      uint32 value;
++   };
++} SVGAColorBGRX;
++
++
++/*
++ * SVGASignedRect --
++ * SVGASignedPoint --
++ *
++ *    Signed rectangle and point primitives. These are used by the new
++ *    2D primitives for drawing to Screen Objects, which can occupy a
++ *    signed virtual coordinate space.
++ *
++ *    SVGASignedRect specifies a half-open interval: the (left, top)
++ *    pixel is part of the rectangle, but the (right, bottom) pixel is
++ *    not.
++ */
++
++typedef
++struct SVGASignedRect {
++   int32  left;
++   int32  top;
++   int32  right;
++   int32  bottom;
++} SVGASignedRect;
++
++typedef
++struct SVGASignedPoint {
++   int32  x;
++   int32  y;
++} SVGASignedPoint;
++
++
++/*
++ *  Capabilities
++ *
++ *  Note the holes in the bitfield. Missing bits have been deprecated,
++ *  and must not be reused. Those capabilities will never be reported
++ *  by new versions of the SVGA device.
++ */
++
++#define SVGA_CAP_NONE               0x00000000
++#define SVGA_CAP_RECT_COPY          0x00000002
++#define SVGA_CAP_CURSOR             0x00000020
++#define SVGA_CAP_CURSOR_BYPASS      0x00000040   // Legacy (Use Cursor Bypass 3 instead)
++#define SVGA_CAP_CURSOR_BYPASS_2    0x00000080   // Legacy (Use Cursor Bypass 3 instead)
++#define SVGA_CAP_8BIT_EMULATION     0x00000100
++#define SVGA_CAP_ALPHA_CURSOR       0x00000200
++#define SVGA_CAP_3D                 0x00004000
++#define SVGA_CAP_EXTENDED_FIFO      0x00008000
++#define SVGA_CAP_MULTIMON           0x00010000   // Legacy multi-monitor support
++#define SVGA_CAP_PITCHLOCK          0x00020000
++#define SVGA_CAP_IRQMASK            0x00040000
++#define SVGA_CAP_DISPLAY_TOPOLOGY   0x00080000   // Legacy multi-monitor support
++#define SVGA_CAP_GMR                0x00100000
++#define SVGA_CAP_TRACES             0x00200000
++
++
++/*
++ * FIFO register indices.
++ *
++ * The FIFO is a chunk of device memory mapped into guest physmem.  It
++ * is always treated as 32-bit words.
++ *
++ * The guest driver gets to decide how to partition it between
++ * - FIFO registers (there are always at least 4, specifying where the
++ *   following data area is and how much data it contains; there may be
++ *   more registers following these, depending on the FIFO protocol
++ *   version in use)
++ * - FIFO data, written by the guest and slurped out by the VMX.
++ * These indices are 32-bit word offsets into the FIFO.
++ */
++
++enum {
++   /*
++    * Block 1 (basic registers): The originally defined FIFO registers.
++    * These exist and are valid for all versions of the FIFO protocol.
++    */
++
++   SVGA_FIFO_MIN = 0,
++   SVGA_FIFO_MAX,       /* The distance from MIN to MAX must be at least 10K */
++   SVGA_FIFO_NEXT_CMD,
++   SVGA_FIFO_STOP,
++
++   /*
++    * Block 2 (extended registers): Mandatory registers for the extended
++    * FIFO.  These exist if the SVGA caps register includes
++    * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
++    * associated capability bit is enabled.
++    *
++    * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
++    * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
++    * This means that the guest has to test individually (in most cases
++    * using FIFO caps) for the presence of registers after this; the VMX
++    * can define "extended FIFO" to mean whatever it wants, and currently
++    * won't enable it unless there's room for that set and much more.
++    */
++
++   SVGA_FIFO_CAPABILITIES = 4,
++   SVGA_FIFO_FLAGS,
++   // Valid with SVGA_FIFO_CAP_FENCE:
++   SVGA_FIFO_FENCE,
++
++   /*
++    * Block 3a (optional extended registers): Additional registers for the
++    * extended FIFO, whose presence isn't actually implied by
++    * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
++    * leave room for them.
++    *
++    * These in block 3a, the VMX currently considers mandatory for the
++    * extended FIFO.
++    */
++
++   // Valid if exists (i.e. if extended FIFO enabled):
++   SVGA_FIFO_3D_HWVERSION,       /* See SVGA3dHardwareVersion in svga3d_reg.h */
++   // Valid with SVGA_FIFO_CAP_PITCHLOCK:
++   SVGA_FIFO_PITCHLOCK,
++
++   // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3:
++   SVGA_FIFO_CURSOR_ON,          /* Cursor bypass 3 show/hide register */
++   SVGA_FIFO_CURSOR_X,           /* Cursor bypass 3 x register */
++   SVGA_FIFO_CURSOR_Y,           /* Cursor bypass 3 y register */
++   SVGA_FIFO_CURSOR_COUNT,       /* Incremented when any of the other 3 change */
++   SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
++
++   // Valid with SVGA_FIFO_CAP_RESERVE:
++   SVGA_FIFO_RESERVED,           /* Bytes past NEXT_CMD with real contents */
++
++   /*
++    * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT:
++    *
++    * By default this is SVGA_ID_INVALID, to indicate that the cursor
++    * coordinates are specified relative to the virtual root. If this
++    * is set to a specific screen ID, cursor position is reinterpreted
++    * as a signed offset relative to that screen's origin. This is the
++    * only way to place the cursor on a non-rooted screen.
++    */
++   SVGA_FIFO_CURSOR_SCREEN_ID,
++
++   /*
++    * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
++    * registers, but this must be done carefully and with judicious use of
++    * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
++    * enough to tell you whether the register exists: we've shipped drivers
++    * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
++    * the earlier ones.  The actual order of introduction was:
++    * - PITCHLOCK
++    * - 3D_CAPS
++    * - CURSOR_* (cursor bypass 3)
++    * - RESERVED
++    * So, code that wants to know whether it can use any of the
++    * aforementioned registers, or anything else added after PITCHLOCK and
++    * before 3D_CAPS, needs to reason about something other than
++    * SVGA_FIFO_MIN.
++    */
++
++   /*
++    * 3D caps block space; valid with 3D hardware version >=
++    * SVGA3D_HWVERSION_WS6_B1.
++    */
++   SVGA_FIFO_3D_CAPS      = 32,
++   SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
++
++   /*
++    * End of VMX's current definition of "extended-FIFO registers".
++    * Registers before here are always enabled/disabled as a block; either
++    * the extended FIFO is enabled and includes all preceding registers, or
++    * it's disabled entirely.
++    *
++    * Block 3b (truly optional extended registers): Additional registers for
++    * the extended FIFO, which the VMX already knows how to enable and
++    * disable with correct granularity.
++    *
++    * Registers after here exist if and only if the guest SVGA driver
++    * sets SVGA_FIFO_MIN high enough to leave room for them.
++    */
++
++   // Valid if register exists:
++   SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
++   SVGA_FIFO_FENCE_GOAL,         /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
++   SVGA_FIFO_BUSY,               /* See "FIFO Synchronization Registers" */
++
++   /*
++    * Always keep this last.  This defines the maximum number of
++    * registers we know about.  At power-on, this value is placed in
++    * the SVGA_REG_MEM_REGS register, and we expect the guest driver
++    * to allocate this much space in FIFO memory for registers.
++    */
++    SVGA_FIFO_NUM_REGS
++};
++
++
++/*
++ * Definition of registers included in extended FIFO support.
++ *
++ * The guest SVGA driver gets to allocate the FIFO between registers
++ * and data.  It must always allocate at least 4 registers, but old
++ * drivers stopped there.
++ *
++ * The VMX will enable extended FIFO support if and only if the guest
++ * left enough room for all registers defined as part of the mandatory
++ * set for the extended FIFO.
++ *
++ * Note that the guest drivers typically allocate the FIFO only at
++ * initialization time, not at mode switches, so it's likely that the
++ * number of FIFO registers won't change without a reboot.
++ *
++ * All registers less than this value are guaranteed to be present if
++ * svgaUser->fifo.extended is set. Any later registers must be tested
++ * individually for compatibility at each use (in the VMX).
++ *
++ * This value is used only by the VMX, so it can change without
++ * affecting driver compatibility; keep it that way?
++ */
++#define SVGA_FIFO_EXTENDED_MANDATORY_REGS  (SVGA_FIFO_3D_CAPS_LAST + 1)
++
++
++/*
++ * FIFO Synchronization Registers
++ *
++ *  This explains the relationship between the various FIFO
++ *  sync-related registers in IOSpace and in FIFO space.
++ *
++ *  SVGA_REG_SYNC --
++ *
++ *       The SYNC register can be used in two different ways by the guest:
++ *
++ *         1. If the guest wishes to fully sync (drain) the FIFO,
++ *            it will write once to SYNC then poll on the BUSY
++ *            register. The FIFO is sync'ed once BUSY is zero.
++ *
++ *         2. If the guest wants to asynchronously wake up the host,
++ *            it will write once to SYNC without polling on BUSY.
++ *            Ideally it will do this after some new commands have
++ *            been placed in the FIFO, and after reading a zero
++ *            from SVGA_FIFO_BUSY.
++ *
++ *       (1) is the original behaviour that SYNC was designed to
++ *       support.  Originally, a write to SYNC would implicitly
++ *       trigger a read from BUSY. This causes us to synchronously
++ *       process the FIFO.
++ *
++ *       This behaviour has since been changed so that writing SYNC
++ *       will *not* implicitly cause a read from BUSY. Instead, it
++ *       makes a channel call which asynchronously wakes up the MKS
++ *       thread.
++ *
++ *       New guests can use this new behaviour to implement (2)
++ *       efficiently. This lets guests get the host's attention
++ *       without waiting for the MKS to poll, which gives us much
++ *       better CPU utilization on SMP hosts and on UP hosts while
++ *       we're blocked on the host GPU.
++ *
++ *       Old guests shouldn't notice the behaviour change. SYNC was
++ *       never guaranteed to process the entire FIFO, since it was
++ *       bounded to a particular number of CPU cycles. Old guests will
++ *       still loop on the BUSY register until the FIFO is empty.
++ *
++ *       Writing to SYNC currently has the following side-effects:
++ *
++ *         - Sets SVGA_REG_BUSY to TRUE (in the monitor)
++ *         - Asynchronously wakes up the MKS thread for FIFO processing
++ *         - The value written to SYNC is recorded as a "reason", for
++ *           stats purposes.
++ *
++ *       If SVGA_FIFO_BUSY is available, drivers are advised to only
++ *       write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
++ *       SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
++ *       eventually set SVGA_FIFO_BUSY on its own, but this approach
++ *       lets the driver avoid sending multiple asynchronous wakeup
++ *       messages to the MKS thread.
++ *
++ *  SVGA_REG_BUSY --
++ *
++ *       This register is set to TRUE when SVGA_REG_SYNC is written,
++ *       and it reads as FALSE when the FIFO has been completely
++ *       drained.
++ *
++ *       Every read from this register causes us to synchronously
++ *       process FIFO commands. There is no guarantee as to how many
++ *       commands each read will process.
++ *
++ *       CPU time spent processing FIFO commands will be billed to
++ *       the guest.
++ *
++ *       New drivers should avoid using this register unless they
++ *       need to guarantee that the FIFO is completely drained. It
++ *       is overkill for performing a sync-to-fence. Older drivers
++ *       will use this register for any type of synchronization.
++ *
++ *  SVGA_FIFO_BUSY --
++ *
++ *       This register is a fast way for the guest driver to check
++ *       whether the FIFO is already being processed. It reads and
++ *       writes at normal RAM speeds, with no monitor intervention.
++ *
++ *       If this register reads as TRUE, the host is guaranteeing that
++ *       any new commands written into the FIFO will be noticed before
++ *       the MKS goes back to sleep.
++ *
++ *       If this register reads as FALSE, no such guarantee can be
++ *       made.
++ *
++ *       The guest should use this register to quickly determine
++ *       whether or not it needs to wake up the host. If the guest
++ *       just wrote a command or group of commands that it would like
++ *       the host to begin processing, it should:
++ *
++ *         1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
++ *            action is necessary.
++ *
++ *         2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
++ *            code that we've already sent a SYNC to the host and we
++ *            don't need to send a duplicate.
++ *
++ *         3. Write a reason to SVGA_REG_SYNC. This will send an
++ *            asynchronous wakeup to the MKS thread.
++ */
++
++
++/*
++ * FIFO Capabilities
++ *
++ *      Fence -- Fence register and command are supported
++ *      Accel Front -- Front buffer only commands are supported
++ *      Pitch Lock -- Pitch lock register is supported
++ *      Video -- SVGA Video overlay units are supported
++ *      Escape -- Escape command is supported
++ *
++ * XXX: Add longer descriptions for each capability, including a list
++ *      of the new features that each capability provides.
++ *
++ * SVGA_FIFO_CAP_SCREEN_OBJECT --
++ *
++ *    Provides dynamic multi-screen rendering, for improved Unity and
++ *    multi-monitor modes. With Screen Object, the guest can
++ *    dynamically create and destroy 'screens', which can represent
++ *    Unity windows or virtual monitors. Screen Object also provides
++ *    strong guarantees that DMA operations happen only when
++ *    guest-initiated. Screen Object deprecates the BAR1 guest
++ *    framebuffer (GFB) and all commands that work only with the GFB.
++ *
++ *    New registers:
++ *       FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
++ *
++ *    New 2D commands:
++ *       DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
++ *       BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
++ *
++ *    New 3D commands:
++ *       BLIT_SURFACE_TO_SCREEN
++ *
++ *    New guarantees:
++ *
++ *       - The host will not read or write guest memory, including the GFB,
++ *         except when explicitly initiated by a DMA command.
++ *
++ *       - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
++ *         is guaranteed to complete before any subsequent FENCEs.
++ *
++ *       - All legacy commands which affect a Screen (UPDATE, PRESENT,
++ *         PRESENT_READBACK) as well as new Screen blit commands will
++ *         all behave consistently as blits, and memory will be read
++ *         or written in FIFO order.
++ *
++ *         For example, if you PRESENT from one SVGA3D surface to multiple
++ *         places on the screen, the data copied will always be from the
++ *         SVGA3D surface at the time the PRESENT was issued in the FIFO.
++ *         This was not necessarily true on devices without Screen Object.
++ *
++ *         This means that on devices that support Screen Object, the
++ *         PRESENT_READBACK command should not be necessary unless you
++ *         actually want to read back the results of 3D rendering into
++ *         system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
++ *         command provides a strict superset of functionality.)
++ *
++ *       - When a screen is resized, either using Screen Object commands or
++ *         legacy multimon registers, its contents are preserved.
++ */
++
++#define SVGA_FIFO_CAP_NONE                  0
++#define SVGA_FIFO_CAP_FENCE             (1<<0)
++#define SVGA_FIFO_CAP_ACCELFRONT        (1<<1)
++#define SVGA_FIFO_CAP_PITCHLOCK         (1<<2)
++#define SVGA_FIFO_CAP_VIDEO             (1<<3)
++#define SVGA_FIFO_CAP_CURSOR_BYPASS_3   (1<<4)
++#define SVGA_FIFO_CAP_ESCAPE            (1<<5)
++#define SVGA_FIFO_CAP_RESERVE           (1<<6)
++#define SVGA_FIFO_CAP_SCREEN_OBJECT     (1<<7)
++
++
++/*
++ * FIFO Flags
++ *
++ *      Accel Front -- Driver should use front buffer only commands
++ */
++
++#define SVGA_FIFO_FLAG_NONE                 0
++#define SVGA_FIFO_FLAG_ACCELFRONT       (1<<0)
++#define SVGA_FIFO_FLAG_RESERVED        (1<<31) // Internal use only
++
++/*
++ * FIFO reservation sentinel value
++ */
++
++#define SVGA_FIFO_RESERVED_UNKNOWN      0xffffffff
++
++
++/*
++ * Video overlay support
++ */
++
++#define SVGA_NUM_OVERLAY_UNITS 32
++
++
++/*
++ * Video capabilities that the guest is currently using
++ */
++
++#define SVGA_VIDEO_FLAG_COLORKEY        0x0001
++
++
++/*
++ * Offsets for the video overlay registers
++ */
++
++enum {
++   SVGA_VIDEO_ENABLED = 0,
++   SVGA_VIDEO_FLAGS,
++   SVGA_VIDEO_DATA_OFFSET,
++   SVGA_VIDEO_FORMAT,
++   SVGA_VIDEO_COLORKEY,
++   SVGA_VIDEO_SIZE,          // Deprecated
++   SVGA_VIDEO_WIDTH,
++   SVGA_VIDEO_HEIGHT,
++   SVGA_VIDEO_SRC_X,
++   SVGA_VIDEO_SRC_Y,
++   SVGA_VIDEO_SRC_WIDTH,
++   SVGA_VIDEO_SRC_HEIGHT,
++   SVGA_VIDEO_DST_X,         // Signed int32
++   SVGA_VIDEO_DST_Y,         // Signed int32
++   SVGA_VIDEO_DST_WIDTH,
++   SVGA_VIDEO_DST_HEIGHT,
++   SVGA_VIDEO_PITCH_1,
++   SVGA_VIDEO_PITCH_2,
++   SVGA_VIDEO_PITCH_3,
++   SVGA_VIDEO_DATA_GMRID,    // Optional, defaults to SVGA_GMR_FRAMEBUFFER
++   SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID)
++   SVGA_VIDEO_NUM_REGS
++};
++
++
++/*
++ * SVGA Overlay Units
++ *
++ *      width and height relate to the entire source video frame.
++ *      srcX, srcY, srcWidth and srcHeight represent subset of the source
++ *      video frame to be displayed.
++ */
++
++typedef struct SVGAOverlayUnit {
++   uint32 enabled;
++   uint32 flags;
++   uint32 dataOffset;
++   uint32 format;
++   uint32 colorKey;
++   uint32 size;
++   uint32 width;
++   uint32 height;
++   uint32 srcX;
++   uint32 srcY;
++   uint32 srcWidth;
++   uint32 srcHeight;
++   int32  dstX;
++   int32  dstY;
++   uint32 dstWidth;
++   uint32 dstHeight;
++   uint32 pitches[3];
++   uint32 dataGMRId;
++   uint32 dstScreenId;
++} SVGAOverlayUnit;
++
++
++/*
++ * SVGAScreenObject --
++ *
++ *    This is a new way to represent a guest's multi-monitor screen or
++ *    Unity window. Screen objects are only supported if the
++ *    SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
++ *
++ *    If Screen Objects are supported, they can be used to fully
++ *    replace the functionality provided by the framebuffer registers
++ *    (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
++ *
++ *    The screen object is a struct with guaranteed binary
++ *    compatibility. New flags can be added, and the struct may grow,
++ *    but existing fields must retain their meaning.
++ *
++ */
++
++#define SVGA_SCREEN_HAS_ROOT    (1 << 0)  // Screen is present in the virtual coord space
++#define SVGA_SCREEN_IS_PRIMARY  (1 << 1)  // Guest considers this screen to be 'primary'
++#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2)   // Guest is running a fullscreen app here
++
++typedef
++struct SVGAScreenObject {
++   uint32 structSize;   // sizeof(SVGAScreenObject)
++   uint32 id;
++   uint32 flags;
++   struct {
++      uint32 width;
++      uint32 height;
++   } size;
++   struct {
++      int32 x;
++      int32 y;
++   } root;              // Only used if SVGA_SCREEN_HAS_ROOT is set.
++} SVGAScreenObject;
++
++
++/*
++ *  Commands in the command FIFO:
++ *
++ *  Command IDs defined below are used for the traditional 2D FIFO
++ *  communication (not all commands are available for all versions of the
++ *  SVGA FIFO protocol).
++ *
++ *  Note the holes in the command ID numbers: These commands have been
++ *  deprecated, and the old IDs must not be reused.
++ *
++ *  Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
++ *  protocol.
++ *
++ *  Each command's parameters are described by the comments and
++ *  structs below.
++ */
++
++typedef enum {
++   SVGA_CMD_INVALID_CMD           = 0,
++   SVGA_CMD_UPDATE                = 1,
++   SVGA_CMD_RECT_COPY             = 3,
++   SVGA_CMD_DEFINE_CURSOR         = 19,
++   SVGA_CMD_DEFINE_ALPHA_CURSOR   = 22,
++   SVGA_CMD_UPDATE_VERBOSE        = 25,
++   SVGA_CMD_FRONT_ROP_FILL        = 29,
++   SVGA_CMD_FENCE                 = 30,
++   SVGA_CMD_ESCAPE                = 33,
++   SVGA_CMD_DEFINE_SCREEN         = 34,
++   SVGA_CMD_DESTROY_SCREEN        = 35,
++   SVGA_CMD_DEFINE_GMRFB          = 36,
++   SVGA_CMD_BLIT_GMRFB_TO_SCREEN  = 37,
++   SVGA_CMD_BLIT_SCREEN_TO_GMRFB  = 38,
++   SVGA_CMD_ANNOTATION_FILL       = 39,
++   SVGA_CMD_ANNOTATION_COPY       = 40,
++   SVGA_CMD_MAX
++} SVGAFifoCmdId;
++
++#define SVGA_CMD_MAX_ARGS           64
++
++
++/*
++ * SVGA_CMD_UPDATE --
++ *
++ *    This is a DMA transfer which copies from the Guest Framebuffer
++ *    (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
++ *    intersect with the provided virtual rectangle.
++ *
++ *    This command does not support using arbitrary guest memory as a
++ *    data source- it only works with the pre-defined GFB memory.
++ *    This command also does not support signed virtual coordinates.
++ *    If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
++ *    negative root x/y coordinates, the negative portion of those
++ *    screens will not be reachable by this command.
++ *
++ *    This command is not necessary when using framebuffer
++ *    traces. Traces are automatically enabled if the SVGA FIFO is
++ *    disabled, and you may explicitly enable/disable traces using
++ *    SVGA_REG_TRACES. With traces enabled, any write to the GFB will
++ *    automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
++ *
++ *    Traces and SVGA_CMD_UPDATE are the only supported ways to render
++ *    pseudocolor screen updates. The newer Screen Object commands
++ *    only support true color formats.
++ *
++ * Availability:
++ *    Always available.
++ */
++
++typedef
++struct {
++   uint32 x;
++   uint32 y;
++   uint32 width;
++   uint32 height;
++} SVGAFifoCmdUpdate;
++
++
++/*
++ * SVGA_CMD_RECT_COPY --
++ *
++ *    Perform a rectangular DMA transfer from one area of the GFB to
++ *    another, and copy the result to any screens which intersect it.
++ *
++ * Availability:
++ *    SVGA_CAP_RECT_COPY
++ */
++
++typedef
++struct {
++   uint32 srcX;
++   uint32 srcY;
++   uint32 destX;
++   uint32 destY;
++   uint32 width;
++   uint32 height;
++} SVGAFifoCmdRectCopy;
++
++
++/*
++ * SVGA_CMD_DEFINE_CURSOR --
++ *
++ *    Provide a new cursor image, as an AND/XOR mask.
++ *
++ *    The recommended way to position the cursor overlay is by using
++ *    the SVGA_FIFO_CURSOR_* registers, supported by the
++ *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
++ *
++ * Availability:
++ *    SVGA_CAP_CURSOR
++ */
++
++typedef
++struct {
++   uint32 id;             // Reserved, must be zero.
++   uint32 hotspotX;
++   uint32 hotspotY;
++   uint32 width;
++   uint32 height;
++   uint32 andMaskDepth;   // Value must be 1 or equal to BITS_PER_PIXEL
++   uint32 xorMaskDepth;   // Value must be 1 or equal to BITS_PER_PIXEL
++   /*
++    * Followed by scanline data for AND mask, then XOR mask.
++    * Each scanline is padded to a 32-bit boundary.
++   */
++} SVGAFifoCmdDefineCursor;
++
++
++/*
++ * SVGA_CMD_DEFINE_ALPHA_CURSOR --
++ *
++ *    Provide a new cursor image, in 32-bit BGRA format.
++ *
++ *    The recommended way to position the cursor overlay is by using
++ *    the SVGA_FIFO_CURSOR_* registers, supported by the
++ *    SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
++ *
++ * Availability:
++ *    SVGA_CAP_ALPHA_CURSOR
++ */
++
++typedef
++struct {
++   uint32 id;             // Reserved, must be zero.
++   uint32 hotspotX;
++   uint32 hotspotY;
++   uint32 width;
++   uint32 height;
++   /* Followed by scanline data */
++} SVGAFifoCmdDefineAlphaCursor;
++
++
++/*
++ * SVGA_CMD_UPDATE_VERBOSE --
++ *
++ *    Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
++ *    'reason' value, an opaque cookie which is used by internal
++ *    debugging tools. Third party drivers should not use this
++ *    command.
++ *
++ * Availability:
++ *    SVGA_CAP_EXTENDED_FIFO
++ */
++
++typedef
++struct {
++   uint32 x;
++   uint32 y;
++   uint32 width;
++   uint32 height;
++   uint32 reason;
++} SVGAFifoCmdUpdateVerbose;
++
++
++/*
++ * SVGA_CMD_FRONT_ROP_FILL --
++ *
++ *    This is a hint which tells the SVGA device that the driver has
++ *    just filled a rectangular region of the GFB with a solid
++ *    color. Instead of reading these pixels from the GFB, the device
++ *    can assume that they all equal 'color'. This is primarily used
++ *    for remote desktop protocols.
++ *
++ * Availability:
++ *    SVGA_FIFO_CAP_ACCELFRONT
++ */
++
++#define  SVGA_ROP_COPY                    0x03
++
++typedef
++struct {
++   uint32 color;     // In the same format as the GFB
++   uint32 x;
++   uint32 y;
++   uint32 width;
++   uint32 height;
++   uint32 rop;       // Must be SVGA_ROP_COPY
++} SVGAFifoCmdFrontRopFill;
++
++
++/*
++ * SVGA_CMD_FENCE --
++ *
++ *    Insert a synchronization fence.  When the SVGA device reaches
++ *    this command, it will copy the 'fence' value into the
++ *    SVGA_FIFO_FENCE register. It will also compare the fence against
++ *    SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
++ *    SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
++ *    raise this interrupt.
++ *
++ * Availability:
++ *    SVGA_FIFO_FENCE for this command,
++ *    SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
++ */
++
++typedef
++struct {
++   uint32 fence;
++} SVGAFifoCmdFence;
++
++
++/*
++ * SVGA_CMD_ESCAPE --
++ *
++ *    Send an extended or vendor-specific variable length command.
++ *    This is used for video overlay, third party plugins, and
++ *    internal debugging tools. See svga_escape.h
++ *
++ * Availability:
++ *    SVGA_FIFO_CAP_ESCAPE
++ */
++
++typedef
++struct {
++   uint32 nsid;
++   uint32 size;
++   /* followed by 'size' bytes of data */
++} SVGAFifoCmdEscape;
++
++
++/*
++ * SVGA_CMD_DEFINE_SCREEN --
++ *
++ *    Define or redefine an SVGAScreenObject. See the description of
++ *    SVGAScreenObject above.  The video driver is responsible for
++ *    generating new screen IDs. They should be small positive
++ *    integers. The virtual device will have an implementation
++ *    specific upper limit on the number of screen IDs
++ *    supported. Drivers are responsible for recycling IDs. The first
++ *    valid ID is zero.
++ *
++ *    - Interaction with other registers:
++ *
++ *    For backwards compatibility, when the GFB mode registers (WIDTH,
++ *    HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
++ *    deletes all screens other than screen #0, and redefines screen
++ *    #0 according to the specified mode. Drivers that use
++ *    SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
++ *
++ *    If you use screen objects, do not use the legacy multi-mon
++ *    registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
++ *
++ * Availability:
++ *    SVGA_FIFO_CAP_SCREEN_OBJECT
++ */
++
++typedef
++struct {
++   SVGAScreenObject screen;   // Variable-length according to version
++} SVGAFifoCmdDefineScreen;
++
++
++/*
++ * SVGA_CMD_DESTROY_SCREEN --
++ *
++ *    Destroy an SVGAScreenObject. Its ID is immediately available for
++ *    re-use.
++ *
++ * Availability:
++ *    SVGA_FIFO_CAP_SCREEN_OBJECT
++ */
++
++typedef
++struct {
++   uint32 screenId;
++} SVGAFifoCmdDestroyScreen;
++
++
++/*
++ * SVGA_CMD_DEFINE_GMRFB --
++ *
++ *    This command sets a piece of SVGA device state called the
++ *    Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
++ *    piece of light-weight state which identifies the location and
++ *    format of an image in guest memory or in BAR1. The GMRFB has
++ *    an arbitrary size, and it doesn't need to match the geometry
++ *    of the GFB or any screen object.
++ *
++ *    The GMRFB can be redefined as often as you like. You could
++ *    always use the same GMRFB, you could redefine it before
++ *    rendering from a different guest screen, or you could even
++ *    redefine it before every blit.
++ *
++ *    There are multiple ways to use this command. The simplest way is
++ *    to use it to move the framebuffer either to elsewhere in the GFB
++ *    (BAR1) memory region, or to a user-defined GMR. This lets a
++ *    driver use a framebuffer allocated entirely out of normal system
++ *    memory, which we encourage.
++ *
++ *    Another way to use this command is to set up a ring buffer of
++ *    updates in GFB memory. If a driver wants to ensure that no
++ *    frames are skipped by the SVGA device, it is important that the
++ *    driver not modify the source data for a blit until the device is
++ *    done processing the command. One efficient way to accomplish
++ *    this is to use a ring of small DMA buffers. Each buffer is used
++ *    for one blit, then we move on to the next buffer in the
++ *    ring. The FENCE mechanism is used to protect each buffer from
++ *    re-use until the device is finished with that buffer's
++ *    corresponding blit.
++ *
++ *    This command does not affect the meaning of SVGA_CMD_UPDATE.
++ *    UPDATEs always occur from the legacy GFB memory area. This
++ *    command has no support for pseudocolor GMRFBs. Currently only
++ *    true-color 15, 16, and 24-bit depths are supported. Future
++ *    devices may expose capabilities for additional framebuffer
++ *    formats.
++ *
++ *    The default GMRFB value is undefined. Drivers must always send
++ *    this command at least once before performing any blit from the
++ *    GMRFB.
++ *
++ * Availability:
++ *    SVGA_FIFO_CAP_SCREEN_OBJECT
++ */
++
++typedef
++struct {
++   SVGAGuestPtr        ptr;
++   uint32              bytesPerLine;
++   SVGAGMRImageFormat  format;
++} SVGAFifoCmdDefineGMRFB;
++
++
++/*
++ * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
++ *
++ *    This is a guest-to-host blit. It performs a DMA operation to
++ *    copy a rectangular region of pixels from the current GMRFB to
++ *    one or more Screen Objects.
++ *
++ *    The destination coordinate may be specified relative to a
++ *    screen's origin (if a screen ID is specified) or relative to the
++ *    virtual coordinate system's origin (if the screen ID is
++ *    SVGA_ID_INVALID). The actual destination may span zero or more
++ *    screens, in the case of a virtual destination rect or a rect
++ *    which extends off the edge of the specified screen.
++ *
++ *    This command writes to the screen's "base layer": the underlying
++ *    framebuffer which exists below any cursor or video overlays. No
++ *    action is necessary to explicitly hide or update any overlays
++ *    which exist on top of the updated region.
++ *
++ *    The SVGA device is guaranteed to finish reading from the GMRFB
++ *    by the time any subsequent FENCE commands are reached.
++ *
++ *    This command consumes an annotation. See the
++ *    SVGA_CMD_ANNOTATION_* commands for details.
++ *
++ * Availability:
++ *    SVGA_FIFO_CAP_SCREEN_OBJECT
++ */
++
++typedef
++struct {
++   SVGASignedPoint  srcOrigin;
++   SVGASignedRect   destRect;
++   uint32           destScreenId;
++} SVGAFifoCmdBlitGMRFBToScreen;
++
++
++/*
++ * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
++ *
++ *    This is a host-to-guest blit. It performs a DMA operation to
++ *    copy a rectangular region of pixels from a single Screen Object
++ *    back to the current GMRFB.
++ *
++ *    Usage note: This command should be used rarely. It will
++ *    typically be inefficient, but it is necessary for some types of
++ *    synchronization between 3D (GPU) and 2D (CPU) rendering into
++ *    overlapping areas of a screen.
++ *
++ *    The source coordinate is specified relative to a screen's
++ *    origin. The provided screen ID must be valid. If any parameters
++ *    are invalid, the resulting pixel values are undefined.
++ *
++ *    This command reads the screen's "base layer". Overlays like
++ *    video and cursor are not included, but any data which was sent
++ *    using a blit-to-screen primitive will be available, no matter
++ *    whether the data's original source was the GMRFB or the 3D
++ *    acceleration hardware.
++ *
++ *    Note that our guest-to-host blits and host-to-guest blits aren't
++ *    symmetric in their current implementation. While the parameters
++ *    are identical, host-to-guest blits are a lot less featureful.
++ *    They do not support clipping: If the source parameters don't
++ *    fully fit within a screen, the blit fails. They must originate
++ *    from exactly one screen. Virtual coordinates are not directly
++ *    supported.
++ *
++ *    Host-to-guest blits do support the same set of GMRFB formats
++ *    offered by guest-to-host blits.
++ *
++ *    The SVGA device is guaranteed to finish writing to the GMRFB by
++ *    the time any subsequent FENCE commands are reached.
++ *
++ * Availability:
++ *    SVGA_FIFO_CAP_SCREEN_OBJECT
++ */
++
++typedef
++struct {
++   SVGASignedPoint  destOrigin;
++   SVGASignedRect   srcRect;
++   uint32           srcScreenId;
++} SVGAFifoCmdBlitScreenToGMRFB;
++
++
++/*
++ * SVGA_CMD_ANNOTATION_FILL --
++ *
++ *    This is a blit annotation. This command stores a small piece of
++ *    device state which is consumed by the next blit-to-screen
++ *    command. The state is only cleared by commands which are
++ *    specifically documented as consuming an annotation. Other
++ *    commands (such as ESCAPEs for debugging) may intervene between
++ *    the annotation and its associated blit.
++ *
++ *    This annotation is a promise about the contents of the next
++ *    blit: The video driver is guaranteeing that all pixels in that
++ *    blit will have the same value, specified here as a color in
++ *    SVGAColorBGRX format.
++ *
++ *    The SVGA device can still render the blit correctly even if it
++ *    ignores this annotation, but the annotation may allow it to
++ *    perform the blit more efficiently, for example by ignoring the
++ *    source data and performing a fill in hardware.
++ *
++ *    This annotation is most important for performance when the
++ *    user's display is being remoted over a network connection.
++ *
++ * Availability:
++ *    SVGA_FIFO_CAP_SCREEN_OBJECT
++ */
++
++typedef
++struct {
++   SVGAColorBGRX  color;
++} SVGAFifoCmdAnnotationFill;
++
++
++/*
++ * SVGA_CMD_ANNOTATION_COPY --
++ *
++ *    This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
++ *    information about annotations.
++ *
++ *    This annotation is a promise about the contents of the next
++ *    blit: The video driver is guaranteeing that all pixels in that
++ *    blit will have the same value as those which already exist at an
++ *    identically-sized region on the same or a different screen.
++ *
++ *    Note that the source pixels for the COPY in this annotation are
++ *    sampled before applying the anqnotation's associated blit. They
++ *    are allowed to overlap with the blit's destination pixels.
++ *
++ *    The copy source rectangle is specified the same way as the blit
++ *    destination: it can be a rectangle which spans zero or more
++ *    screens, specified relative to either a screen or to the virtual
++ *    coordinate system's origin. If the source rectangle includes
++ *    pixels which are not from exactly one screen, the results are
++ *    undefined.
++ *
++ * Availability:
++ *    SVGA_FIFO_CAP_SCREEN_OBJECT
++ */
++
++typedef
++struct {
++   SVGASignedPoint  srcOrigin;
++   uint32           srcScreenId;
++} SVGAFifoCmdAnnotationCopy;
++
++#endif
+diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
+new file mode 100644
+index 0000000..55836de
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/svga_types.h
+@@ -0,0 +1,45 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++/**
++ * Silly typedefs for the svga headers. Currently the headers are shared
++ * between all components that talk to svga. And as such the headers are
++ * are in a completely different style and use weird defines.
++ *
++ * This file lets all the ugly be prefixed with svga*.
++ */
++
++#ifndef _SVGA_TYPES_H_
++#define _SVGA_TYPES_H_
++
++typedef uint16_t uint16;
++typedef uint32_t uint32;
++typedef uint8_t uint8;
++typedef int32_t int32;
++typedef bool Bool;
++
++#endif
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+new file mode 100644
+index 0000000..825ebe3
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+@@ -0,0 +1,252 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "vmwgfx_drv.h"
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_placement.h"
++
++static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
++	TTM_PL_FLAG_CACHED;
++
++static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
++	TTM_PL_FLAG_CACHED |
++	TTM_PL_FLAG_NO_EVICT;
++
++static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
++	TTM_PL_FLAG_CACHED;
++
++struct ttm_placement vmw_vram_placement = {
++	.fpfn = 0,
++	.lpfn = 0,
++	.num_placement = 1,
++	.placement = &vram_placement_flags,
++	.num_busy_placement = 1,
++	.busy_placement = &vram_placement_flags
++};
++
++struct ttm_placement vmw_vram_sys_placement = {
++	.fpfn = 0,
++	.lpfn = 0,
++	.num_placement = 1,
++	.placement = &vram_placement_flags,
++	.num_busy_placement = 1,
++	.busy_placement = &sys_placement_flags
++};
++
++struct ttm_placement vmw_vram_ne_placement = {
++	.fpfn = 0,
++	.lpfn = 0,
++	.num_placement = 1,
++	.placement = &vram_ne_placement_flags,
++	.num_busy_placement = 1,
++	.busy_placement = &vram_ne_placement_flags
++};
++
++struct ttm_placement vmw_sys_placement = {
++	.fpfn = 0,
++	.lpfn = 0,
++	.num_placement = 1,
++	.placement = &sys_placement_flags,
++	.num_busy_placement = 1,
++	.busy_placement = &sys_placement_flags
++};
++
++struct vmw_ttm_backend {
++	struct ttm_backend backend;
++};
++
++static int vmw_ttm_populate(struct ttm_backend *backend,
++			    unsigned long num_pages, struct page **pages,
++			    struct page *dummy_read_page)
++{
++	return 0;
++}
++
++static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
++{
++	return 0;
++}
++
++static int vmw_ttm_unbind(struct ttm_backend *backend)
++{
++	return 0;
++}
++
++static void vmw_ttm_clear(struct ttm_backend *backend)
++{
++}
++
++static void vmw_ttm_destroy(struct ttm_backend *backend)
++{
++	struct vmw_ttm_backend *vmw_be =
++	    container_of(backend, struct vmw_ttm_backend, backend);
++
++	kfree(vmw_be);
++}
++
++static struct ttm_backend_func vmw_ttm_func = {
++	.populate = vmw_ttm_populate,
++	.clear = vmw_ttm_clear,
++	.bind = vmw_ttm_bind,
++	.unbind = vmw_ttm_unbind,
++	.destroy = vmw_ttm_destroy,
++};
++
++struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
++{
++	struct vmw_ttm_backend *vmw_be;
++
++	vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
++	if (!vmw_be)
++		return NULL;
++
++	vmw_be->backend.func = &vmw_ttm_func;
++
++	return &vmw_be->backend;
++}
++
++int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
++{
++	return 0;
++}
++
++int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
++		      struct ttm_mem_type_manager *man)
++{
++	struct vmw_private *dev_priv =
++	    container_of(bdev, struct vmw_private, bdev);
++
++	switch (type) {
++	case TTM_PL_SYSTEM:
++		/* System memory */
++
++		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
++		man->available_caching = TTM_PL_MASK_CACHING;
++		man->default_caching = TTM_PL_FLAG_CACHED;
++		break;
++	case TTM_PL_VRAM:
++		/* "On-card" video ram */
++		man->gpu_offset = 0;
++		man->io_offset = dev_priv->vram_start;
++		man->io_size = dev_priv->vram_size;
++		man->flags = TTM_MEMTYPE_FLAG_FIXED |
++		    TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
++		man->io_addr = NULL;
++		man->available_caching = TTM_PL_MASK_CACHING;
++		man->default_caching = TTM_PL_FLAG_WC;
++		break;
++	default:
++		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
++		return -EINVAL;
++	}
++	return 0;
++}
++
++void vmw_evict_flags(struct ttm_buffer_object *bo,
++		     struct ttm_placement *placement)
++{
++	*placement = vmw_sys_placement;
++}
++
++/**
++ * FIXME: Proper access checks on buffers.
++ */
++
++static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
++{
++	return 0;
++}
++
++static void vmw_move_notify(struct ttm_buffer_object *bo,
++		     struct ttm_mem_reg *new_mem)
++{
++	if (new_mem->mem_type != TTM_PL_SYSTEM)
++		vmw_dmabuf_gmr_unbind(bo);
++}
++
++static void vmw_swap_notify(struct ttm_buffer_object *bo)
++{
++	vmw_dmabuf_gmr_unbind(bo);
++}
++
++/**
++ * FIXME: We're using the old vmware polling method to sync.
++ * Do this with fences instead.
++ */
++
++static void *vmw_sync_obj_ref(void *sync_obj)
++{
++	return sync_obj;
++}
++
++static void vmw_sync_obj_unref(void **sync_obj)
++{
++	*sync_obj = NULL;
++}
++
++static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
++{
++	struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
++
++	mutex_lock(&dev_priv->hw_mutex);
++	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
++	mutex_unlock(&dev_priv->hw_mutex);
++	return 0;
++}
++
++static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
++{
++	struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
++	uint32_t sequence = (unsigned long) sync_obj;
++
++	return vmw_fence_signaled(dev_priv, sequence);
++}
++
++static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
++			     bool lazy, bool interruptible)
++{
++	struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
++	uint32_t sequence = (unsigned long) sync_obj;
++
++	return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
++}
++
++struct ttm_bo_driver vmw_bo_driver = {
++	.create_ttm_backend_entry = vmw_ttm_backend_init,
++	.invalidate_caches = vmw_invalidate_caches,
++	.init_mem_type = vmw_init_mem_type,
++	.evict_flags = vmw_evict_flags,
++	.move = NULL,
++	.verify_access = vmw_verify_access,
++	.sync_obj_signaled = vmw_sync_obj_signaled,
++	.sync_obj_wait = vmw_sync_obj_wait,
++	.sync_obj_flush = vmw_sync_obj_flush,
++	.sync_obj_unref = vmw_sync_obj_unref,
++	.sync_obj_ref = vmw_sync_obj_ref,
++	.move_notify = vmw_move_notify,
++	.swap_notify = vmw_swap_notify
++};
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+new file mode 100644
+index 0000000..0c9c081
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -0,0 +1,783 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "drmP.h"
++#include "vmwgfx_drv.h"
++#include "ttm/ttm_placement.h"
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_object.h"
++#include "ttm/ttm_module.h"
++
++#define VMWGFX_DRIVER_NAME "vmwgfx"
++#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
++#define VMWGFX_CHIP_SVGAII 0
++#define VMW_FB_RESERVATION 0
++
++/**
++ * Fully encoded drm commands. Might move to vmw_drm.h
++ */
++
++#define DRM_IOCTL_VMW_GET_PARAM					\
++	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,		\
++		 struct drm_vmw_getparam_arg)
++#define DRM_IOCTL_VMW_ALLOC_DMABUF				\
++	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,	\
++		union drm_vmw_alloc_dmabuf_arg)
++#define DRM_IOCTL_VMW_UNREF_DMABUF				\
++	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,	\
++		struct drm_vmw_unref_dmabuf_arg)
++#define DRM_IOCTL_VMW_CURSOR_BYPASS				\
++	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,	\
++		 struct drm_vmw_cursor_bypass_arg)
++
++#define DRM_IOCTL_VMW_CONTROL_STREAM				\
++	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,	\
++		 struct drm_vmw_control_stream_arg)
++#define DRM_IOCTL_VMW_CLAIM_STREAM				\
++	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,	\
++		 struct drm_vmw_stream_arg)
++#define DRM_IOCTL_VMW_UNREF_STREAM				\
++	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,	\
++		 struct drm_vmw_stream_arg)
++
++#define DRM_IOCTL_VMW_CREATE_CONTEXT				\
++	DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,	\
++		struct drm_vmw_context_arg)
++#define DRM_IOCTL_VMW_UNREF_CONTEXT				\
++	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,	\
++		struct drm_vmw_context_arg)
++#define DRM_IOCTL_VMW_CREATE_SURFACE				\
++	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,	\
++		 union drm_vmw_surface_create_arg)
++#define DRM_IOCTL_VMW_UNREF_SURFACE				\
++	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,	\
++		 struct drm_vmw_surface_arg)
++#define DRM_IOCTL_VMW_REF_SURFACE				\
++	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,	\
++		 union drm_vmw_surface_reference_arg)
++#define DRM_IOCTL_VMW_EXECBUF					\
++	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,		\
++		struct drm_vmw_execbuf_arg)
++#define DRM_IOCTL_VMW_FIFO_DEBUG				\
++	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG,		\
++		 struct drm_vmw_fifo_debug_arg)
++#define DRM_IOCTL_VMW_FENCE_WAIT				\
++	DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,		\
++		 struct drm_vmw_fence_wait_arg)
++
++
++/**
++ * The core DRM version of this macro doesn't account for
++ * DRM_COMMAND_BASE.
++ */
++
++#define VMW_IOCTL_DEF(ioctl, func, flags) \
++	[DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
++
++/**
++ * Ioctl definitions.
++ */
++
++static struct drm_ioctl_desc vmw_ioctls[] = {
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
++		      DRM_AUTH | DRM_UNLOCKED),
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
++		      DRM_AUTH | DRM_UNLOCKED),
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
++		      DRM_AUTH | DRM_UNLOCKED),
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
++		      vmw_kms_cursor_bypass_ioctl,
++		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
++
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
++		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
++		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
++		      DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
++
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
++		      DRM_AUTH | DRM_UNLOCKED),
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
++		      DRM_AUTH | DRM_UNLOCKED),
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
++		      DRM_AUTH | DRM_UNLOCKED),
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
++		      DRM_AUTH | DRM_UNLOCKED),
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
++		      DRM_AUTH | DRM_UNLOCKED),
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
++		      DRM_AUTH | DRM_UNLOCKED),
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
++		      DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
++	VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
++		      DRM_AUTH | DRM_UNLOCKED)
++};
++
++static struct pci_device_id vmw_pci_id_list[] = {
++	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
++	{0, 0, 0}
++};
++
++static char *vmw_devname = "vmwgfx";
++
++static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
++static void vmw_master_init(struct vmw_master *);
++static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
++			      void *ptr);
++
++static void vmw_print_capabilities(uint32_t capabilities)
++{
++	DRM_INFO("Capabilities:\n");
++	if (capabilities & SVGA_CAP_RECT_COPY)
++		DRM_INFO("  Rect copy.\n");
++	if (capabilities & SVGA_CAP_CURSOR)
++		DRM_INFO("  Cursor.\n");
++	if (capabilities & SVGA_CAP_CURSOR_BYPASS)
++		DRM_INFO("  Cursor bypass.\n");
++	if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
++		DRM_INFO("  Cursor bypass 2.\n");
++	if (capabilities & SVGA_CAP_8BIT_EMULATION)
++		DRM_INFO("  8bit emulation.\n");
++	if (capabilities & SVGA_CAP_ALPHA_CURSOR)
++		DRM_INFO("  Alpha cursor.\n");
++	if (capabilities & SVGA_CAP_3D)
++		DRM_INFO("  3D.\n");
++	if (capabilities & SVGA_CAP_EXTENDED_FIFO)
++		DRM_INFO("  Extended Fifo.\n");
++	if (capabilities & SVGA_CAP_MULTIMON)
++		DRM_INFO("  Multimon.\n");
++	if (capabilities & SVGA_CAP_PITCHLOCK)
++		DRM_INFO("  Pitchlock.\n");
++	if (capabilities & SVGA_CAP_IRQMASK)
++		DRM_INFO("  Irq mask.\n");
++	if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
++		DRM_INFO("  Display Topology.\n");
++	if (capabilities & SVGA_CAP_GMR)
++		DRM_INFO("  GMR.\n");
++	if (capabilities & SVGA_CAP_TRACES)
++		DRM_INFO("  Traces.\n");
++}
++
++static int vmw_request_device(struct vmw_private *dev_priv)
++{
++	int ret;
++
++	vmw_kms_save_vga(dev_priv);
++
++	ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Unable to initialize FIFO.\n");
++		return ret;
++	}
++
++	return 0;
++}
++
++static void vmw_release_device(struct vmw_private *dev_priv)
++{
++	vmw_fifo_release(dev_priv, &dev_priv->fifo);
++	vmw_kms_restore_vga(dev_priv);
++}
++
++
++static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
++{
++	struct vmw_private *dev_priv;
++	int ret;
++	uint32_t svga_id;
++
++	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
++	if (unlikely(dev_priv == NULL)) {
++		DRM_ERROR("Failed allocating a device private struct.\n");
++		return -ENOMEM;
++	}
++	memset(dev_priv, 0, sizeof(*dev_priv));
++
++	dev_priv->dev = dev;
++	dev_priv->vmw_chipset = chipset;
++	dev_priv->last_read_sequence = (uint32_t) -100;
++	mutex_init(&dev_priv->hw_mutex);
++	mutex_init(&dev_priv->cmdbuf_mutex);
++	rwlock_init(&dev_priv->resource_lock);
++	idr_init(&dev_priv->context_idr);
++	idr_init(&dev_priv->surface_idr);
++	idr_init(&dev_priv->stream_idr);
++	ida_init(&dev_priv->gmr_ida);
++	mutex_init(&dev_priv->init_mutex);
++	init_waitqueue_head(&dev_priv->fence_queue);
++	init_waitqueue_head(&dev_priv->fifo_queue);
++	atomic_set(&dev_priv->fence_queue_waiters, 0);
++	atomic_set(&dev_priv->fifo_queue_waiters, 0);
++	INIT_LIST_HEAD(&dev_priv->gmr_lru);
++
++	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
++	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
++	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
++
++	mutex_lock(&dev_priv->hw_mutex);
++
++	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
++	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
++	if (svga_id != SVGA_ID_2) {
++		ret = -ENOSYS;
++		DRM_ERROR("Unsuported SVGA ID 0x%x\n", svga_id);
++		mutex_unlock(&dev_priv->hw_mutex);
++		goto out_err0;
++	}
++
++	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
++
++	if (dev_priv->capabilities & SVGA_CAP_GMR) {
++		dev_priv->max_gmr_descriptors =
++			vmw_read(dev_priv,
++				 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
++		dev_priv->max_gmr_ids =
++			vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
++	}
++
++	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
++	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
++	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
++	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
++
++	mutex_unlock(&dev_priv->hw_mutex);
++
++	vmw_print_capabilities(dev_priv->capabilities);
++
++	if (dev_priv->capabilities & SVGA_CAP_GMR) {
++		DRM_INFO("Max GMR ids is %u\n",
++			 (unsigned)dev_priv->max_gmr_ids);
++		DRM_INFO("Max GMR descriptors is %u\n",
++			 (unsigned)dev_priv->max_gmr_descriptors);
++	}
++	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
++		 dev_priv->vram_start, dev_priv->vram_size / 1024);
++	DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
++		 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
++
++	ret = vmw_ttm_global_init(dev_priv);
++	if (unlikely(ret != 0))
++		goto out_err0;
++
++
++	vmw_master_init(&dev_priv->fbdev_master);
++	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
++	dev_priv->active_master = &dev_priv->fbdev_master;
++
++
++	ret = ttm_bo_device_init(&dev_priv->bdev,
++				 dev_priv->bo_global_ref.ref.object,
++				 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
++				 false);
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Failed initializing TTM buffer object driver.\n");
++		goto out_err1;
++	}
++
++	ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
++			     (dev_priv->vram_size >> PAGE_SHIFT));
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Failed initializing memory manager for VRAM.\n");
++		goto out_err2;
++	}
++
++	dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
++					   dev_priv->mmio_size, DRM_MTRR_WC);
++
++	dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
++					 dev_priv->mmio_size);
++
++	if (unlikely(dev_priv->mmio_virt == NULL)) {
++		ret = -ENOMEM;
++		DRM_ERROR("Failed mapping MMIO.\n");
++		goto out_err3;
++	}
++
++	dev_priv->tdev = ttm_object_device_init
++	    (dev_priv->mem_global_ref.object, 12);
++
++	if (unlikely(dev_priv->tdev == NULL)) {
++		DRM_ERROR("Unable to initialize TTM object management.\n");
++		ret = -ENOMEM;
++		goto out_err4;
++	}
++
++	dev->dev_private = dev_priv;
++
++	if (!dev->devname)
++		dev->devname = vmw_devname;
++
++	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
++		ret = drm_irq_install(dev);
++		if (unlikely(ret != 0)) {
++			DRM_ERROR("Failed installing irq: %d\n", ret);
++			goto out_no_irq;
++		}
++	}
++
++	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
++	dev_priv->stealth = (ret != 0);
++	if (dev_priv->stealth) {
++		/**
++		 * Request at least the mmio PCI resource.
++		 */
++
++		DRM_INFO("It appears like vesafb is loaded. "
++			 "Ignore above error if any.\n");
++		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
++		if (unlikely(ret != 0)) {
++			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
++			goto out_no_device;
++		}
++	}
++	ret = vmw_request_device(dev_priv);
++	if (unlikely(ret != 0))
++		goto out_no_device;
++	vmw_kms_init(dev_priv);
++	vmw_overlay_init(dev_priv);
++	vmw_fb_init(dev_priv);
++
++	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
++	register_pm_notifier(&dev_priv->pm_nb);
++
++	DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
++
++	return 0;
++
++out_no_device:
++	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
++		drm_irq_uninstall(dev_priv->dev);
++	if (dev->devname == vmw_devname)
++		dev->devname = NULL;
++out_no_irq:
++	ttm_object_device_release(&dev_priv->tdev);
++out_err4:
++	iounmap(dev_priv->mmio_virt);
++out_err3:
++	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
++		     dev_priv->mmio_size, DRM_MTRR_WC);
++	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
++out_err2:
++	(void)ttm_bo_device_release(&dev_priv->bdev);
++out_err1:
++	vmw_ttm_global_release(dev_priv);
++out_err0:
++	ida_destroy(&dev_priv->gmr_ida);
++	idr_destroy(&dev_priv->surface_idr);
++	idr_destroy(&dev_priv->context_idr);
++	idr_destroy(&dev_priv->stream_idr);
++	kfree(dev_priv);
++	return ret;
++}
++
++static int vmw_driver_unload(struct drm_device *dev)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++
++	DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
++
++	unregister_pm_notifier(&dev_priv->pm_nb);
++
++	vmw_fb_close(dev_priv);
++	vmw_kms_close(dev_priv);
++	vmw_overlay_close(dev_priv);
++	vmw_release_device(dev_priv);
++	if (dev_priv->stealth)
++		pci_release_region(dev->pdev, 2);
++	else
++		pci_release_regions(dev->pdev);
++
++	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
++		drm_irq_uninstall(dev_priv->dev);
++	if (dev->devname == vmw_devname)
++		dev->devname = NULL;
++	ttm_object_device_release(&dev_priv->tdev);
++	iounmap(dev_priv->mmio_virt);
++	drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
++		     dev_priv->mmio_size, DRM_MTRR_WC);
++	(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
++	(void)ttm_bo_device_release(&dev_priv->bdev);
++	vmw_ttm_global_release(dev_priv);
++	ida_destroy(&dev_priv->gmr_ida);
++	idr_destroy(&dev_priv->surface_idr);
++	idr_destroy(&dev_priv->context_idr);
++	idr_destroy(&dev_priv->stream_idr);
++
++	kfree(dev_priv);
++
++	return 0;
++}
++
++static void vmw_postclose(struct drm_device *dev,
++			 struct drm_file *file_priv)
++{
++	struct vmw_fpriv *vmw_fp;
++
++	vmw_fp = vmw_fpriv(file_priv);
++	ttm_object_file_release(&vmw_fp->tfile);
++	if (vmw_fp->locked_master)
++		drm_master_put(&vmw_fp->locked_master);
++	kfree(vmw_fp);
++}
++
++static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct vmw_fpriv *vmw_fp;
++	int ret = -ENOMEM;
++
++	vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
++	if (unlikely(vmw_fp == NULL))
++		return ret;
++
++	vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
++	if (unlikely(vmw_fp->tfile == NULL))
++		goto out_no_tfile;
++
++	file_priv->driver_priv = vmw_fp;
++
++	if (unlikely(dev_priv->bdev.dev_mapping == NULL))
++		dev_priv->bdev.dev_mapping =
++			file_priv->filp->f_path.dentry->d_inode->i_mapping;
++
++	return 0;
++
++out_no_tfile:
++	kfree(vmw_fp);
++	return ret;
++}
++
++static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
++			       unsigned long arg)
++{
++	struct drm_file *file_priv = filp->private_data;
++	struct drm_device *dev = file_priv->minor->dev;
++	unsigned int nr = DRM_IOCTL_NR(cmd);
++
++	/*
++	 * Do extra checking on driver private ioctls.
++	 */
++
++	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
++	    && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
++		struct drm_ioctl_desc *ioctl =
++		    &vmw_ioctls[nr - DRM_COMMAND_BASE];
++
++		if (unlikely(ioctl->cmd != cmd)) {
++			DRM_ERROR("Invalid command format, ioctl %d\n",
++				  nr - DRM_COMMAND_BASE);
++			return -EINVAL;
++		}
++	}
++
++	return drm_ioctl(filp, cmd, arg);
++}
++
++static int vmw_firstopen(struct drm_device *dev)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	dev_priv->is_opened = true;
++
++	return 0;
++}
++
++static void vmw_lastclose(struct drm_device *dev)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct drm_crtc *crtc;
++	struct drm_mode_set set;
++	int ret;
++
++	/**
++	 * Do nothing on the lastclose call from drm_unload.
++	 */
++
++	if (!dev_priv->is_opened)
++		return;
++
++	dev_priv->is_opened = false;
++	set.x = 0;
++	set.y = 0;
++	set.fb = NULL;
++	set.mode = NULL;
++	set.connectors = NULL;
++	set.num_connectors = 0;
++
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		set.crtc = crtc;
++		ret = crtc->funcs->set_config(&set);
++		WARN_ON(ret != 0);
++	}
++
++}
++
++static void vmw_master_init(struct vmw_master *vmaster)
++{
++	ttm_lock_init(&vmaster->lock);
++}
++
++static int vmw_master_create(struct drm_device *dev,
++			     struct drm_master *master)
++{
++	struct vmw_master *vmaster;
++
++	DRM_INFO("Master create.\n");
++	vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
++	if (unlikely(vmaster == NULL))
++		return -ENOMEM;
++
++	ttm_lock_init(&vmaster->lock);
++	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
++	master->driver_priv = vmaster;
++
++	return 0;
++}
++
++static void vmw_master_destroy(struct drm_device *dev,
++			       struct drm_master *master)
++{
++	struct vmw_master *vmaster = vmw_master(master);
++
++	DRM_INFO("Master destroy.\n");
++	master->driver_priv = NULL;
++	kfree(vmaster);
++}
++
++
++static int vmw_master_set(struct drm_device *dev,
++			  struct drm_file *file_priv,
++			  bool from_open)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
++	struct vmw_master *active = dev_priv->active_master;
++	struct vmw_master *vmaster = vmw_master(file_priv->master);
++	int ret = 0;
++
++	DRM_INFO("Master set.\n");
++
++	if (active) {
++		BUG_ON(active != &dev_priv->fbdev_master);
++		ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
++		if (unlikely(ret != 0))
++			goto out_no_active_lock;
++
++		ttm_lock_set_kill(&active->lock, true, SIGTERM);
++		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
++		if (unlikely(ret != 0)) {
++			DRM_ERROR("Unable to clean VRAM on "
++				  "master drop.\n");
++		}
++
++		dev_priv->active_master = NULL;
++	}
++
++	ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
++	if (!from_open) {
++		ttm_vt_unlock(&vmaster->lock);
++		BUG_ON(vmw_fp->locked_master != file_priv->master);
++		drm_master_put(&vmw_fp->locked_master);
++	}
++
++	dev_priv->active_master = vmaster;
++
++	return 0;
++
++out_no_active_lock:
++	vmw_release_device(dev_priv);
++	return ret;
++}
++
++static void vmw_master_drop(struct drm_device *dev,
++			    struct drm_file *file_priv,
++			    bool from_release)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
++	struct vmw_master *vmaster = vmw_master(file_priv->master);
++	int ret;
++
++	DRM_INFO("Master drop.\n");
++
++	/**
++	 * Make sure the master doesn't disappear while we have
++	 * it locked.
++	 */
++
++	vmw_fp->locked_master = drm_master_get(file_priv->master);
++	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
++
++	if (unlikely((ret != 0))) {
++		DRM_ERROR("Unable to lock TTM at VT switch.\n");
++		drm_master_put(&vmw_fp->locked_master);
++	}
++
++	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
++
++	dev_priv->active_master = &dev_priv->fbdev_master;
++	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
++	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
++
++	vmw_fb_on(dev_priv);
++}
++
++
++static void vmw_remove(struct pci_dev *pdev)
++{
++	struct drm_device *dev = pci_get_drvdata(pdev);
++
++	drm_put_dev(dev);
++}
++
++static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
++			      void *ptr)
++{
++	struct vmw_private *dev_priv =
++		container_of(nb, struct vmw_private, pm_nb);
++	struct vmw_master *vmaster = dev_priv->active_master;
++
++	switch (val) {
++	case PM_HIBERNATION_PREPARE:
++	case PM_SUSPEND_PREPARE:
++		ttm_suspend_lock(&vmaster->lock);
++
++		/**
++		 * This empties VRAM and unbinds all GMR bindings.
++		 * Buffer contents is moved to swappable memory.
++		 */
++		ttm_bo_swapout_all(&dev_priv->bdev);
++		break;
++	case PM_POST_HIBERNATION:
++	case PM_POST_SUSPEND:
++		ttm_suspend_unlock(&vmaster->lock);
++		break;
++	case PM_RESTORE_PREPARE:
++		break;
++	case PM_POST_RESTORE:
++		break;
++	default:
++		break;
++	}
++	return 0;
++}
++
++/**
++ * These might not be needed with the virtual SVGA device.
++ */
++
++int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++	pci_save_state(pdev);
++	pci_disable_device(pdev);
++	pci_set_power_state(pdev, PCI_D3hot);
++	return 0;
++}
++
++int vmw_pci_resume(struct pci_dev *pdev)
++{
++	pci_set_power_state(pdev, PCI_D0);
++	pci_restore_state(pdev);
++	return pci_enable_device(pdev);
++}
++
++static struct drm_driver driver = {
++	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
++	DRIVER_MODESET,
++	.load = vmw_driver_load,
++	.unload = vmw_driver_unload,
++	.firstopen = vmw_firstopen,
++	.lastclose = vmw_lastclose,
++	.irq_preinstall = vmw_irq_preinstall,
++	.irq_postinstall = vmw_irq_postinstall,
++	.irq_uninstall = vmw_irq_uninstall,
++	.irq_handler = vmw_irq_handler,
++	.reclaim_buffers_locked = NULL,
++	.get_map_ofs = drm_core_get_map_ofs,
++	.get_reg_ofs = drm_core_get_reg_ofs,
++	.ioctls = vmw_ioctls,
++	.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
++	.dma_quiescent = NULL,	/*vmw_dma_quiescent, */
++	.master_create = vmw_master_create,
++	.master_destroy = vmw_master_destroy,
++	.master_set = vmw_master_set,
++	.master_drop = vmw_master_drop,
++	.open = vmw_driver_open,
++	.postclose = vmw_postclose,
++	.fops = {
++		 .owner = THIS_MODULE,
++		 .open = drm_open,
++		 .release = drm_release,
++		 .unlocked_ioctl = vmw_unlocked_ioctl,
++		 .mmap = vmw_mmap,
++		 .poll = drm_poll,
++		 .fasync = drm_fasync,
++#if defined(CONFIG_COMPAT)
++		 .compat_ioctl = drm_compat_ioctl,
++#endif
++		 },
++	.pci_driver = {
++		       .name = VMWGFX_DRIVER_NAME,
++		       .id_table = vmw_pci_id_list,
++		       .probe = vmw_probe,
++		       .remove = vmw_remove,
++		       .suspend = vmw_pci_suspend,
++		       .resume = vmw_pci_resume
++		       },
++	.name = VMWGFX_DRIVER_NAME,
++	.desc = VMWGFX_DRIVER_DESC,
++	.date = VMWGFX_DRIVER_DATE,
++	.major = VMWGFX_DRIVER_MAJOR,
++	.minor = VMWGFX_DRIVER_MINOR,
++	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
++};
++
++static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++	return drm_get_dev(pdev, ent, &driver);
++}
++
++static int __init vmwgfx_init(void)
++{
++	int ret;
++	ret = drm_init(&driver);
++	if (ret)
++		DRM_ERROR("Failed initializing DRM.\n");
++	return ret;
++}
++
++static void __exit vmwgfx_exit(void)
++{
++	drm_exit(&driver);
++}
++
++module_init(vmwgfx_init);
++module_exit(vmwgfx_exit);
++
++MODULE_AUTHOR("VMware Inc. and others");
++MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
++MODULE_LICENSE("GPL and additional rights");
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+new file mode 100644
+index 0000000..356dc93
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -0,0 +1,521 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#ifndef _VMWGFX_DRV_H_
++#define _VMWGFX_DRV_H_
++
++#include "vmwgfx_reg.h"
++#include "drmP.h"
++#include "vmwgfx_drm.h"
++#include "drm_hashtab.h"
++#include "linux/suspend.h"
++#include "ttm/ttm_bo_driver.h"
++#include "ttm/ttm_object.h"
++#include "ttm/ttm_lock.h"
++#include "ttm/ttm_execbuf_util.h"
++#include "ttm/ttm_module.h"
++
++#define VMWGFX_DRIVER_DATE "20100209"
++#define VMWGFX_DRIVER_MAJOR 1
++#define VMWGFX_DRIVER_MINOR 0
++#define VMWGFX_DRIVER_PATCHLEVEL 0
++#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
++#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
++#define VMWGFX_MAX_RELOCATIONS 2048
++#define VMWGFX_MAX_GMRS 2048
++
++struct vmw_fpriv {
++	struct drm_master *locked_master;
++	struct ttm_object_file *tfile;
++};
++
++struct vmw_dma_buffer {
++	struct ttm_buffer_object base;
++	struct list_head validate_list;
++	struct list_head gmr_lru;
++	uint32_t gmr_id;
++	bool gmr_bound;
++	uint32_t cur_validate_node;
++	bool on_validate_list;
++};
++
++struct vmw_resource {
++	struct kref kref;
++	struct vmw_private *dev_priv;
++	struct idr *idr;
++	int id;
++	enum ttm_object_type res_type;
++	bool avail;
++	void (*hw_destroy) (struct vmw_resource *res);
++	void (*res_free) (struct vmw_resource *res);
++
++	/* TODO is a generic snooper needed? */
++#if 0
++	void (*snoop)(struct vmw_resource *res,
++		      struct ttm_object_file *tfile,
++		      SVGA3dCmdHeader *header);
++	void *snoop_priv;
++#endif
++};
++
++struct vmw_cursor_snooper {
++	struct drm_crtc *crtc;
++	size_t age;
++	uint32_t *image;
++};
++
++struct vmw_surface {
++	struct vmw_resource res;
++	uint32_t flags;
++	uint32_t format;
++	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
++	struct drm_vmw_size *sizes;
++	uint32_t num_sizes;
++
++	bool scanout;
++
++	/* TODO so far just a extra pointer */
++	struct vmw_cursor_snooper snooper;
++};
++
++struct vmw_fifo_state {
++	unsigned long reserved_size;
++	__le32 *dynamic_buffer;
++	__le32 *static_buffer;
++	__le32 *last_buffer;
++	uint32_t last_data_size;
++	uint32_t last_buffer_size;
++	bool last_buffer_add;
++	unsigned long static_buffer_size;
++	bool using_bounce_buffer;
++	uint32_t capabilities;
++	struct mutex fifo_mutex;
++	struct rw_semaphore rwsem;
++};
++
++struct vmw_relocation {
++	SVGAGuestPtr *location;
++	uint32_t index;
++};
++
++struct vmw_sw_context{
++	struct ida bo_list;
++	uint32_t last_cid;
++	bool cid_valid;
++	uint32_t last_sid;
++	uint32_t sid_translation;
++	bool sid_valid;
++	struct ttm_object_file *tfile;
++	struct list_head validate_nodes;
++	struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
++	uint32_t cur_reloc;
++	struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
++	uint32_t cur_val_buf;
++};
++
++struct vmw_legacy_display;
++struct vmw_overlay;
++
++struct vmw_master {
++	struct ttm_lock lock;
++};
++
++struct vmw_private {
++	struct ttm_bo_device bdev;
++	struct ttm_bo_global_ref bo_global_ref;
++	struct ttm_global_reference mem_global_ref;
++
++	struct vmw_fifo_state fifo;
++
++	struct drm_device *dev;
++	unsigned long vmw_chipset;
++	unsigned int io_start;
++	uint32_t vram_start;
++	uint32_t vram_size;
++	uint32_t mmio_start;
++	uint32_t mmio_size;
++	uint32_t fb_max_width;
++	uint32_t fb_max_height;
++	__le32 __iomem *mmio_virt;
++	int mmio_mtrr;
++	uint32_t capabilities;
++	uint32_t max_gmr_descriptors;
++	uint32_t max_gmr_ids;
++	struct mutex hw_mutex;
++
++	/*
++	 * VGA registers.
++	 */
++
++	uint32_t vga_width;
++	uint32_t vga_height;
++	uint32_t vga_depth;
++	uint32_t vga_bpp;
++	uint32_t vga_pseudo;
++	uint32_t vga_red_mask;
++	uint32_t vga_blue_mask;
++	uint32_t vga_green_mask;
++
++	/*
++	 * Framebuffer info.
++	 */
++
++	void *fb_info;
++	struct vmw_legacy_display *ldu_priv;
++	struct vmw_overlay *overlay_priv;
++
++	/*
++	 * Context and surface management.
++	 */
++
++	rwlock_t resource_lock;
++	struct idr context_idr;
++	struct idr surface_idr;
++	struct idr stream_idr;
++
++	/*
++	 * Block lastclose from racing with firstopen.
++	 */
++
++	struct mutex init_mutex;
++
++	/*
++	 * A resource manager for kernel-only surfaces and
++	 * contexts.
++	 */
++
++	struct ttm_object_device *tdev;
++
++	/*
++	 * Fencing and IRQs.
++	 */
++
++	atomic_t fence_seq;
++	wait_queue_head_t fence_queue;
++	wait_queue_head_t fifo_queue;
++	atomic_t fence_queue_waiters;
++	atomic_t fifo_queue_waiters;
++	uint32_t last_read_sequence;
++	spinlock_t irq_lock;
++
++	/*
++	 * Device state
++	 */
++
++	uint32_t traces_state;
++	uint32_t enable_state;
++	uint32_t config_done_state;
++
++	/**
++	 * Execbuf
++	 */
++	/**
++	 * Protected by the cmdbuf mutex.
++	 */
++
++	struct vmw_sw_context ctx;
++	uint32_t val_seq;
++	struct mutex cmdbuf_mutex;
++
++	/**
++	 * GMR management. Protected by the lru spinlock.
++	 */
++
++	struct ida gmr_ida;
++	struct list_head gmr_lru;
++
++
++	/**
++	 * Operating mode.
++	 */
++
++	bool stealth;
++	bool is_opened;
++
++	/**
++	 * Master management.
++	 */
++
++	struct vmw_master *active_master;
++	struct vmw_master fbdev_master;
++	struct notifier_block pm_nb;
++};
++
++static inline struct vmw_private *vmw_priv(struct drm_device *dev)
++{
++	return (struct vmw_private *)dev->dev_private;
++}
++
++static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
++{
++	return (struct vmw_fpriv *)file_priv->driver_priv;
++}
++
++static inline struct vmw_master *vmw_master(struct drm_master *master)
++{
++	return (struct vmw_master *) master->driver_priv;
++}
++
++static inline void vmw_write(struct vmw_private *dev_priv,
++			     unsigned int offset, uint32_t value)
++{
++	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
++	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
++}
++
++static inline uint32_t vmw_read(struct vmw_private *dev_priv,
++				unsigned int offset)
++{
++	uint32_t val;
++
++	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
++	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
++	return val;
++}
++
++/**
++ * GMR utilities - vmwgfx_gmr.c
++ */
++
++extern int vmw_gmr_bind(struct vmw_private *dev_priv,
++			struct ttm_buffer_object *bo);
++extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
++
++/**
++ * Resource utilities - vmwgfx_resource.c
++ */
++
++extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
++extern void vmw_resource_unreference(struct vmw_resource **p_res);
++extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
++extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
++				     struct drm_file *file_priv);
++extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
++				    struct drm_file *file_priv);
++extern int vmw_context_check(struct vmw_private *dev_priv,
++			     struct ttm_object_file *tfile,
++			     int id);
++extern void vmw_surface_res_free(struct vmw_resource *res);
++extern int vmw_surface_init(struct vmw_private *dev_priv,
++			    struct vmw_surface *srf,
++			    void (*res_free) (struct vmw_resource *res));
++extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
++					  struct ttm_object_file *tfile,
++					  uint32_t handle,
++					  struct vmw_surface **out);
++extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
++				     struct drm_file *file_priv);
++extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
++				    struct drm_file *file_priv);
++extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
++				       struct drm_file *file_priv);
++extern int vmw_surface_check(struct vmw_private *dev_priv,
++			     struct ttm_object_file *tfile,
++			     uint32_t handle, int *id);
++extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
++extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
++			   struct vmw_dma_buffer *vmw_bo,
++			   size_t size, struct ttm_placement *placement,
++			   bool interuptable,
++			   void (*bo_free) (struct ttm_buffer_object *bo));
++extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
++				  struct drm_file *file_priv);
++extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
++				  struct drm_file *file_priv);
++extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
++					 uint32_t cur_validate_node);
++extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
++extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
++				  uint32_t id, struct vmw_dma_buffer **out);
++extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
++extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
++extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
++extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
++				       struct vmw_dma_buffer *bo);
++extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
++				struct vmw_dma_buffer *bo);
++extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
++extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
++				  struct drm_file *file_priv);
++extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
++				  struct drm_file *file_priv);
++extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
++				  struct ttm_object_file *tfile,
++				  uint32_t *inout_id,
++				  struct vmw_resource **out);
++
++
++/**
++ * Misc Ioctl functionality - vmwgfx_ioctl.c
++ */
++
++extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
++			      struct drm_file *file_priv);
++extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
++				struct drm_file *file_priv);
++
++/**
++ * Fifo utilities - vmwgfx_fifo.c
++ */
++
++extern int vmw_fifo_init(struct vmw_private *dev_priv,
++			 struct vmw_fifo_state *fifo);
++extern void vmw_fifo_release(struct vmw_private *dev_priv,
++			     struct vmw_fifo_state *fifo);
++extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
++extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
++extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
++			       uint32_t *sequence);
++extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
++extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
++extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
++
++/**
++ * TTM glue - vmwgfx_ttm_glue.c
++ */
++
++extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
++extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
++extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
++
++/**
++ * TTM buffer object driver - vmwgfx_buffer.c
++ */
++
++extern struct ttm_placement vmw_vram_placement;
++extern struct ttm_placement vmw_vram_ne_placement;
++extern struct ttm_placement vmw_vram_sys_placement;
++extern struct ttm_placement vmw_sys_placement;
++extern struct ttm_bo_driver vmw_bo_driver;
++extern int vmw_dma_quiescent(struct drm_device *dev);
++
++/**
++ * Command submission - vmwgfx_execbuf.c
++ */
++
++extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
++			     struct drm_file *file_priv);
++
++/**
++ * IRQs and wating - vmwgfx_irq.c
++ */
++
++extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
++extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
++			  uint32_t sequence, bool interruptible,
++			  unsigned long timeout);
++extern void vmw_irq_preinstall(struct drm_device *dev);
++extern int vmw_irq_postinstall(struct drm_device *dev);
++extern void vmw_irq_uninstall(struct drm_device *dev);
++extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
++			       uint32_t sequence);
++extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
++				struct drm_file *file_priv);
++extern int vmw_fallback_wait(struct vmw_private *dev_priv,
++			     bool lazy,
++			     bool fifo_idle,
++			     uint32_t sequence,
++			     bool interruptible,
++			     unsigned long timeout);
++
++/**
++ * Kernel framebuffer - vmwgfx_fb.c
++ */
++
++int vmw_fb_init(struct vmw_private *vmw_priv);
++int vmw_fb_close(struct vmw_private *dev_priv);
++int vmw_fb_off(struct vmw_private *vmw_priv);
++int vmw_fb_on(struct vmw_private *vmw_priv);
++
++/**
++ * Kernel modesetting - vmwgfx_kms.c
++ */
++
++int vmw_kms_init(struct vmw_private *dev_priv);
++int vmw_kms_close(struct vmw_private *dev_priv);
++int vmw_kms_save_vga(struct vmw_private *vmw_priv);
++int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
++int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
++				struct drm_file *file_priv);
++void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
++void vmw_kms_cursor_snoop(struct vmw_surface *srf,
++			  struct ttm_object_file *tfile,
++			  struct ttm_buffer_object *bo,
++			  SVGA3dCmdHeader *header);
++
++/**
++ * Overlay control - vmwgfx_overlay.c
++ */
++
++int vmw_overlay_init(struct vmw_private *dev_priv);
++int vmw_overlay_close(struct vmw_private *dev_priv);
++int vmw_overlay_ioctl(struct drm_device *dev, void *data,
++		      struct drm_file *file_priv);
++int vmw_overlay_stop_all(struct vmw_private *dev_priv);
++int vmw_overlay_resume_all(struct vmw_private *dev_priv);
++int vmw_overlay_pause_all(struct vmw_private *dev_priv);
++int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
++int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
++int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
++int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
++
++/**
++ * Inline helper functions
++ */
++
++static inline void vmw_surface_unreference(struct vmw_surface **srf)
++{
++	struct vmw_surface *tmp_srf = *srf;
++	struct vmw_resource *res = &tmp_srf->res;
++	*srf = NULL;
++
++	vmw_resource_unreference(&res);
++}
++
++static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
++{
++	(void) vmw_resource_reference(&srf->res);
++	return srf;
++}
++
++static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
++{
++	struct vmw_dma_buffer *tmp_buf = *buf;
++	struct ttm_buffer_object *bo = &tmp_buf->base;
++	*buf = NULL;
++
++	ttm_bo_unref(&bo);
++}
++
++static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
++{
++	if (ttm_bo_reference(&buf->base))
++		return buf;
++	return NULL;
++}
++
++#endif
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+new file mode 100644
+index 0000000..0897359
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -0,0 +1,716 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "vmwgfx_drv.h"
++#include "vmwgfx_reg.h"
++#include "ttm/ttm_bo_api.h"
++#include "ttm/ttm_placement.h"
++
++static int vmw_cmd_invalid(struct vmw_private *dev_priv,
++			   struct vmw_sw_context *sw_context,
++			   SVGA3dCmdHeader *header)
++{
++	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
++}
++
++static int vmw_cmd_ok(struct vmw_private *dev_priv,
++		      struct vmw_sw_context *sw_context,
++		      SVGA3dCmdHeader *header)
++{
++	return 0;
++}
++
++static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
++			     struct vmw_sw_context *sw_context,
++			     SVGA3dCmdHeader *header)
++{
++	struct vmw_cid_cmd {
++		SVGA3dCmdHeader header;
++		__le32 cid;
++	} *cmd;
++	int ret;
++
++	cmd = container_of(header, struct vmw_cid_cmd, header);
++	if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
++		return 0;
++
++	ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Could not find or use context %u\n",
++			  (unsigned) cmd->cid);
++		return ret;
++	}
++
++	sw_context->last_cid = cmd->cid;
++	sw_context->cid_valid = true;
++
++	return 0;
++}
++
++static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
++			     struct vmw_sw_context *sw_context,
++			     uint32_t *sid)
++{
++	if (*sid == SVGA3D_INVALID_ID)
++		return 0;
++
++	if (unlikely((!sw_context->sid_valid  ||
++		      *sid != sw_context->last_sid))) {
++		int real_id;
++		int ret = vmw_surface_check(dev_priv, sw_context->tfile,
++					    *sid, &real_id);
++
++		if (unlikely(ret != 0)) {
++			DRM_ERROR("Could ot find or use surface 0x%08x "
++				  "address 0x%08lx\n",
++				  (unsigned int) *sid,
++				  (unsigned long) sid);
++			return ret;
++		}
++
++		sw_context->last_sid = *sid;
++		sw_context->sid_valid = true;
++		*sid = real_id;
++		sw_context->sid_translation = real_id;
++	} else
++		*sid = sw_context->sid_translation;
++
++	return 0;
++}
++
++
++static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
++					   struct vmw_sw_context *sw_context,
++					   SVGA3dCmdHeader *header)
++{
++	struct vmw_sid_cmd {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdSetRenderTarget body;
++	} *cmd;
++	int ret;
++
++	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
++	if (unlikely(ret != 0))
++		return ret;
++
++	cmd = container_of(header, struct vmw_sid_cmd, header);
++	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
++	return ret;
++}
++
++static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
++				      struct vmw_sw_context *sw_context,
++				      SVGA3dCmdHeader *header)
++{
++	struct vmw_sid_cmd {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdSurfaceCopy body;
++	} *cmd;
++	int ret;
++
++	cmd = container_of(header, struct vmw_sid_cmd, header);
++	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
++	if (unlikely(ret != 0))
++		return ret;
++	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
++}
++
++static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
++				     struct vmw_sw_context *sw_context,
++				     SVGA3dCmdHeader *header)
++{
++	struct vmw_sid_cmd {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdSurfaceStretchBlt body;
++	} *cmd;
++	int ret;
++
++	cmd = container_of(header, struct vmw_sid_cmd, header);
++	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
++	if (unlikely(ret != 0))
++		return ret;
++	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
++}
++
++static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
++					 struct vmw_sw_context *sw_context,
++					 SVGA3dCmdHeader *header)
++{
++	struct vmw_sid_cmd {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdBlitSurfaceToScreen body;
++	} *cmd;
++
++	cmd = container_of(header, struct vmw_sid_cmd, header);
++	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
++}
++
++static int vmw_cmd_present_check(struct vmw_private *dev_priv,
++				 struct vmw_sw_context *sw_context,
++				 SVGA3dCmdHeader *header)
++{
++	struct vmw_sid_cmd {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdPresent body;
++	} *cmd;
++
++	cmd = container_of(header, struct vmw_sid_cmd, header);
++	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
++}
++
++static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
++				   struct vmw_sw_context *sw_context,
++				   SVGAGuestPtr *ptr,
++				   struct vmw_dma_buffer **vmw_bo_p)
++{
++	struct vmw_dma_buffer *vmw_bo = NULL;
++	struct ttm_buffer_object *bo;
++	uint32_t handle = ptr->gmrId;
++	struct vmw_relocation *reloc;
++	uint32_t cur_validate_node;
++	struct ttm_validate_buffer *val_buf;
++	int ret;
++
++	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Could not find or use GMR region.\n");
++		return -EINVAL;
++	}
++	bo = &vmw_bo->base;
++
++	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
++		DRM_ERROR("Max number relocations per submission"
++			  " exceeded\n");
++		ret = -EINVAL;
++		goto out_no_reloc;
++	}
++
++	reloc = &sw_context->relocs[sw_context->cur_reloc++];
++	reloc->location = ptr;
++
++	cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
++	if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
++		DRM_ERROR("Max number of DMA buffers per submission"
++			  " exceeded.\n");
++		ret = -EINVAL;
++		goto out_no_reloc;
++	}
++
++	reloc->index = cur_validate_node;
++	if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
++		val_buf = &sw_context->val_bufs[cur_validate_node];
++		val_buf->bo = ttm_bo_reference(bo);
++		val_buf->new_sync_obj_arg = (void *) dev_priv;
++		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
++		++sw_context->cur_val_buf;
++	}
++	*vmw_bo_p = vmw_bo;
++	return 0;
++
++out_no_reloc:
++	vmw_dmabuf_unreference(&vmw_bo);
++	vmw_bo_p = NULL;
++	return ret;
++}
++
++static int vmw_cmd_end_query(struct vmw_private *dev_priv,
++			     struct vmw_sw_context *sw_context,
++			     SVGA3dCmdHeader *header)
++{
++	struct vmw_dma_buffer *vmw_bo;
++	struct vmw_query_cmd {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdEndQuery q;
++	} *cmd;
++	int ret;
++
++	cmd = container_of(header, struct vmw_query_cmd, header);
++	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
++	if (unlikely(ret != 0))
++		return ret;
++
++	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
++				      &cmd->q.guestResult,
++				      &vmw_bo);
++	if (unlikely(ret != 0))
++		return ret;
++
++	vmw_dmabuf_unreference(&vmw_bo);
++	return 0;
++}
++
++static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
++			      struct vmw_sw_context *sw_context,
++			      SVGA3dCmdHeader *header)
++{
++	struct vmw_dma_buffer *vmw_bo;
++	struct vmw_query_cmd {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdWaitForQuery q;
++	} *cmd;
++	int ret;
++
++	cmd = container_of(header, struct vmw_query_cmd, header);
++	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
++	if (unlikely(ret != 0))
++		return ret;
++
++	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
++				      &cmd->q.guestResult,
++				      &vmw_bo);
++	if (unlikely(ret != 0))
++		return ret;
++
++	vmw_dmabuf_unreference(&vmw_bo);
++	return 0;
++}
++
++
++static int vmw_cmd_dma(struct vmw_private *dev_priv,
++		       struct vmw_sw_context *sw_context,
++		       SVGA3dCmdHeader *header)
++{
++	struct vmw_dma_buffer *vmw_bo = NULL;
++	struct ttm_buffer_object *bo;
++	struct vmw_surface *srf = NULL;
++	struct vmw_dma_cmd {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdSurfaceDMA dma;
++	} *cmd;
++	int ret;
++
++	cmd = container_of(header, struct vmw_dma_cmd, header);
++	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
++				      &cmd->dma.guest.ptr,
++				      &vmw_bo);
++	if (unlikely(ret != 0))
++		return ret;
++
++	bo = &vmw_bo->base;
++	ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
++					     cmd->dma.host.sid, &srf);
++	if (ret) {
++		DRM_ERROR("could not find surface\n");
++		goto out_no_reloc;
++	}
++
++	/**
++	 * Patch command stream with device SID.
++	 */
++
++	cmd->dma.host.sid = srf->res.id;
++	vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
++	/**
++	 * FIXME: May deadlock here when called from the
++	 * command parsing code.
++	 */
++	vmw_surface_unreference(&srf);
++
++out_no_reloc:
++	vmw_dmabuf_unreference(&vmw_bo);
++	return ret;
++}
++
++static int vmw_cmd_draw(struct vmw_private *dev_priv,
++			struct vmw_sw_context *sw_context,
++			SVGA3dCmdHeader *header)
++{
++	struct vmw_draw_cmd {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdDrawPrimitives body;
++	} *cmd;
++	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
++		(unsigned long)header + sizeof(*cmd));
++	SVGA3dPrimitiveRange *range;
++	uint32_t i;
++	uint32_t maxnum;
++	int ret;
++
++	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
++	if (unlikely(ret != 0))
++		return ret;
++
++	cmd = container_of(header, struct vmw_draw_cmd, header);
++	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
++
++	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
++		DRM_ERROR("Illegal number of vertex declarations.\n");
++		return -EINVAL;
++	}
++
++	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
++		ret = vmw_cmd_sid_check(dev_priv, sw_context,
++					&decl->array.surfaceId);
++		if (unlikely(ret != 0))
++			return ret;
++	}
++
++	maxnum = (header->size - sizeof(cmd->body) -
++		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
++	if (unlikely(cmd->body.numRanges > maxnum)) {
++		DRM_ERROR("Illegal number of index ranges.\n");
++		return -EINVAL;
++	}
++
++	range = (SVGA3dPrimitiveRange *) decl;
++	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
++		ret = vmw_cmd_sid_check(dev_priv, sw_context,
++					&range->indexArray.surfaceId);
++		if (unlikely(ret != 0))
++			return ret;
++	}
++	return 0;
++}
++
++
++static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
++			     struct vmw_sw_context *sw_context,
++			     SVGA3dCmdHeader *header)
++{
++	struct vmw_tex_state_cmd {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdSetTextureState state;
++	};
++
++	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
++	  ((unsigned long) header + header->size + sizeof(header));
++	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
++		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
++	int ret;
++
++	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
++	if (unlikely(ret != 0))
++		return ret;
++
++	for (; cur_state < last_state; ++cur_state) {
++		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
++			continue;
++
++		ret = vmw_cmd_sid_check(dev_priv, sw_context,
++					&cur_state->value);
++		if (unlikely(ret != 0))
++			return ret;
++	}
++
++	return 0;
++}
++
++
++typedef int (*vmw_cmd_func) (struct vmw_private *,
++			     struct vmw_sw_context *,
++			     SVGA3dCmdHeader *);
++
++#define VMW_CMD_DEF(cmd, func) \
++	[cmd - SVGA_3D_CMD_BASE] = func
++
++static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
++	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
++	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
++	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
++	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
++	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
++	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
++		    &vmw_cmd_set_render_target_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
++	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
++	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
++	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
++	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
++	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
++	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
++		    &vmw_cmd_blt_surf_screen_check)
++};
++
++static int vmw_cmd_check(struct vmw_private *dev_priv,
++			 struct vmw_sw_context *sw_context,
++			 void *buf, uint32_t *size)
++{
++	uint32_t cmd_id;
++	uint32_t size_remaining = *size;
++	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
++	int ret;
++
++	cmd_id = ((uint32_t *)buf)[0];
++	if (cmd_id == SVGA_CMD_UPDATE) {
++		*size = 5 << 2;
++		return 0;
++	}
++
++	cmd_id = le32_to_cpu(header->id);
++	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
++
++	cmd_id -= SVGA_3D_CMD_BASE;
++	if (unlikely(*size > size_remaining))
++		goto out_err;
++
++	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
++		goto out_err;
++
++	ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
++	if (unlikely(ret != 0))
++		goto out_err;
++
++	return 0;
++out_err:
++	DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
++		  cmd_id + SVGA_3D_CMD_BASE);
++	return -EINVAL;
++}
++
++static int vmw_cmd_check_all(struct vmw_private *dev_priv,
++			     struct vmw_sw_context *sw_context,
++			     void *buf, uint32_t size)
++{
++	int32_t cur_size = size;
++	int ret;
++
++	while (cur_size > 0) {
++		size = cur_size;
++		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
++		if (unlikely(ret != 0))
++			return ret;
++		buf = (void *)((unsigned long) buf + size);
++		cur_size -= size;
++	}
++
++	if (unlikely(cur_size != 0)) {
++		DRM_ERROR("Command verifier out of sync.\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static void vmw_free_relocations(struct vmw_sw_context *sw_context)
++{
++	sw_context->cur_reloc = 0;
++}
++
++static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
++{
++	uint32_t i;
++	struct vmw_relocation *reloc;
++	struct ttm_validate_buffer *validate;
++	struct ttm_buffer_object *bo;
++
++	for (i = 0; i < sw_context->cur_reloc; ++i) {
++		reloc = &sw_context->relocs[i];
++		validate = &sw_context->val_bufs[reloc->index];
++		bo = validate->bo;
++		reloc->location->offset += bo->offset;
++		reloc->location->gmrId = vmw_dmabuf_gmr(bo);
++	}
++	vmw_free_relocations(sw_context);
++}
++
++static void vmw_clear_validations(struct vmw_sw_context *sw_context)
++{
++	struct ttm_validate_buffer *entry, *next;
++
++	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
++				 head) {
++		list_del(&entry->head);
++		vmw_dmabuf_validate_clear(entry->bo);
++		ttm_bo_unref(&entry->bo);
++		sw_context->cur_val_buf--;
++	}
++	BUG_ON(sw_context->cur_val_buf != 0);
++}
++
++static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
++				      struct ttm_buffer_object *bo)
++{
++	int ret;
++
++	if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
++		return 0;
++
++	/**
++	 * Put BO in VRAM, only if there is space.
++	 */
++
++	ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
++	if (unlikely(ret == -ERESTARTSYS))
++		return ret;
++
++	/**
++	 * Otherwise, set it up as GMR.
++	 */
++
++	if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
++		return 0;
++
++	ret = vmw_gmr_bind(dev_priv, bo);
++	if (likely(ret == 0 || ret == -ERESTARTSYS))
++		return ret;
++
++	/**
++	 * If that failed, try VRAM again, this time evicting
++	 * previous contents.
++	 */
++
++	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
++	return ret;
++}
++
++
++static int vmw_validate_buffers(struct vmw_private *dev_priv,
++				struct vmw_sw_context *sw_context)
++{
++	struct ttm_validate_buffer *entry;
++	int ret;
++
++	list_for_each_entry(entry, &sw_context->validate_nodes, head) {
++		ret = vmw_validate_single_buffer(dev_priv, entry->bo);
++		if (unlikely(ret != 0))
++			return ret;
++	}
++	return 0;
++}
++
++int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
++		      struct drm_file *file_priv)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
++	struct drm_vmw_fence_rep fence_rep;
++	struct drm_vmw_fence_rep __user *user_fence_rep;
++	int ret;
++	void *user_cmd;
++	void *cmd;
++	uint32_t sequence;
++	struct vmw_sw_context *sw_context = &dev_priv->ctx;
++	struct vmw_master *vmaster = vmw_master(file_priv->master);
++
++	ret = ttm_read_lock(&vmaster->lock, true);
++	if (unlikely(ret != 0))
++		return ret;
++
++	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
++	if (unlikely(ret != 0)) {
++		ret = -ERESTARTSYS;
++		goto out_no_cmd_mutex;
++	}
++
++	cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
++	if (unlikely(cmd == NULL)) {
++		DRM_ERROR("Failed reserving fifo space for commands.\n");
++		ret = -ENOMEM;
++		goto out_unlock;
++	}
++
++	user_cmd = (void __user *)(unsigned long)arg->commands;
++	ret = copy_from_user(cmd, user_cmd, arg->command_size);
++
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Failed copying commands.\n");
++		goto out_commit;
++	}
++
++	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
++	sw_context->cid_valid = false;
++	sw_context->sid_valid = false;
++	sw_context->cur_reloc = 0;
++	sw_context->cur_val_buf = 0;
++
++	INIT_LIST_HEAD(&sw_context->validate_nodes);
++
++	ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
++	if (unlikely(ret != 0))
++		goto out_err;
++	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
++				     dev_priv->val_seq++);
++	if (unlikely(ret != 0))
++		goto out_err;
++
++	ret = vmw_validate_buffers(dev_priv, sw_context);
++	if (unlikely(ret != 0))
++		goto out_err;
++
++	vmw_apply_relocations(sw_context);
++	vmw_fifo_commit(dev_priv, arg->command_size);
++
++	ret = vmw_fifo_send_fence(dev_priv, &sequence);
++
++	ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
++				    (void *)(unsigned long) sequence);
++	vmw_clear_validations(sw_context);
++	mutex_unlock(&dev_priv->cmdbuf_mutex);
++
++	/*
++	 * This error is harmless, because if fence submission fails,
++	 * vmw_fifo_send_fence will sync.
++	 */
++
++	if (ret != 0)
++		DRM_ERROR("Fence submission error. Syncing.\n");
++
++	fence_rep.error = ret;
++	fence_rep.fence_seq = (uint64_t) sequence;
++
++	user_fence_rep = (struct drm_vmw_fence_rep __user *)
++	    (unsigned long)arg->fence_rep;
++
++	/*
++	 * copy_to_user errors will be detected by user space not
++	 * seeing fence_rep::error filled in.
++	 */
++
++	ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
++
++	vmw_kms_cursor_post_execbuf(dev_priv);
++	ttm_read_unlock(&vmaster->lock);
++	return 0;
++out_err:
++	vmw_free_relocations(sw_context);
++	ttm_eu_backoff_reservation(&sw_context->validate_nodes);
++	vmw_clear_validations(sw_context);
++out_commit:
++	vmw_fifo_commit(dev_priv, 0);
++out_unlock:
++	mutex_unlock(&dev_priv->cmdbuf_mutex);
++out_no_cmd_mutex:
++	ttm_read_unlock(&vmaster->lock);
++	return ret;
++}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+new file mode 100644
+index 0000000..a933670
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+@@ -0,0 +1,737 @@
++/**************************************************************************
++ *
++ * Copyright © 2007 David Airlie
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "drmP.h"
++#include "vmwgfx_drv.h"
++
++#include "ttm/ttm_placement.h"
++
++#define VMW_DIRTY_DELAY (HZ / 30)
++
++struct vmw_fb_par {
++	struct vmw_private *vmw_priv;
++
++	void *vmalloc;
++
++	struct vmw_dma_buffer *vmw_bo;
++	struct ttm_bo_kmap_obj map;
++
++	u32 pseudo_palette[17];
++
++	unsigned depth;
++	unsigned bpp;
++
++	unsigned max_width;
++	unsigned max_height;
++
++	void *bo_ptr;
++	unsigned bo_size;
++	bool bo_iowrite;
++
++	struct {
++		spinlock_t lock;
++		bool active;
++		unsigned x1;
++		unsigned y1;
++		unsigned x2;
++		unsigned y2;
++	} dirty;
++};
++
++static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
++			    unsigned blue, unsigned transp,
++			    struct fb_info *info)
++{
++	struct vmw_fb_par *par = info->par;
++	u32 *pal = par->pseudo_palette;
++
++	if (regno > 15) {
++		DRM_ERROR("Bad regno %u.\n", regno);
++		return 1;
++	}
++
++	switch (par->depth) {
++	case 24:
++	case 32:
++		pal[regno] = ((red & 0xff00) << 8) |
++			      (green & 0xff00) |
++			     ((blue  & 0xff00) >> 8);
++		break;
++	default:
++		DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
++		return 1;
++	}
++
++	return 0;
++}
++
++static int vmw_fb_check_var(struct fb_var_screeninfo *var,
++			    struct fb_info *info)
++{
++	int depth = var->bits_per_pixel;
++	struct vmw_fb_par *par = info->par;
++	struct vmw_private *vmw_priv = par->vmw_priv;
++
++	switch (var->bits_per_pixel) {
++	case 32:
++		depth = (var->transp.length > 0) ? 32 : 24;
++		break;
++	default:
++		DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
++		return -EINVAL;
++	}
++
++	switch (depth) {
++	case 24:
++		var->red.offset = 16;
++		var->green.offset = 8;
++		var->blue.offset = 0;
++		var->red.length = 8;
++		var->green.length = 8;
++		var->blue.length = 8;
++		var->transp.length = 0;
++		var->transp.offset = 0;
++		break;
++	case 32:
++		var->red.offset = 16;
++		var->green.offset = 8;
++		var->blue.offset = 0;
++		var->red.length = 8;
++		var->green.length = 8;
++		var->blue.length = 8;
++		var->transp.length = 8;
++		var->transp.offset = 24;
++		break;
++	default:
++		DRM_ERROR("Bad depth %u.\n", depth);
++		return -EINVAL;
++	}
++
++	/* without multimon its hard to resize */
++	if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
++	    (var->xres != par->max_width ||
++	     var->yres != par->max_height)) {
++		DRM_ERROR("Tried to resize, but we don't have multimon\n");
++		return -EINVAL;
++	}
++
++	if (var->xres > par->max_width ||
++	    var->yres > par->max_height) {
++		DRM_ERROR("Requested geom can not fit in framebuffer\n");
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++static int vmw_fb_set_par(struct fb_info *info)
++{
++	struct vmw_fb_par *par = info->par;
++	struct vmw_private *vmw_priv = par->vmw_priv;
++
++	if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
++		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
++
++		vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
++		vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
++		vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
++		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
++		vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
++		vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
++		vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
++		vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
++
++		/* TODO check if pitch and offset changes */
++
++		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
++	} else {
++		vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
++		vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
++
++		/* TODO check if pitch and offset changes */
++	}
++
++	return 0;
++}
++
++static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
++			      struct fb_info *info)
++{
++	return 0;
++}
++
++static int vmw_fb_blank(int blank, struct fb_info *info)
++{
++	return 0;
++}
++
++/*
++ * Dirty code
++ */
++
++static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
++{
++	struct vmw_private *vmw_priv = par->vmw_priv;
++	struct fb_info *info = vmw_priv->fb_info;
++	int stride = (info->fix.line_length / 4);
++	int *src = (int *)info->screen_base;
++	__le32 __iomem *vram_mem = par->bo_ptr;
++	unsigned long flags;
++	unsigned x, y, w, h;
++	int i, k;
++	struct {
++		uint32_t header;
++		SVGAFifoCmdUpdate body;
++	} *cmd;
++
++	spin_lock_irqsave(&par->dirty.lock, flags);
++	if (!par->dirty.active) {
++		spin_unlock_irqrestore(&par->dirty.lock, flags);
++		return;
++	}
++	x = par->dirty.x1;
++	y = par->dirty.y1;
++	w = min(par->dirty.x2, info->var.xres) - x;
++	h = min(par->dirty.y2, info->var.yres) - y;
++	par->dirty.x1 = par->dirty.x2 = 0;
++	par->dirty.y1 = par->dirty.y2 = 0;
++	spin_unlock_irqrestore(&par->dirty.lock, flags);
++
++	for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
++		for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
++			iowrite32(src[k], vram_mem + k);
++	}
++
++#if 0
++	DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
++#endif
++
++	cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
++	if (unlikely(cmd == NULL)) {
++		DRM_ERROR("Fifo reserve failed.\n");
++		return;
++	}
++
++	cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
++	cmd->body.x = cpu_to_le32(x);
++	cmd->body.y = cpu_to_le32(y);
++	cmd->body.width = cpu_to_le32(w);
++	cmd->body.height = cpu_to_le32(h);
++	vmw_fifo_commit(vmw_priv, sizeof(*cmd));
++}
++
++static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
++			      unsigned x1, unsigned y1,
++			      unsigned width, unsigned height)
++{
++	struct fb_info *info = par->vmw_priv->fb_info;
++	unsigned long flags;
++	unsigned x2 = x1 + width;
++	unsigned y2 = y1 + height;
++
++	spin_lock_irqsave(&par->dirty.lock, flags);
++	if (par->dirty.x1 == par->dirty.x2) {
++		par->dirty.x1 = x1;
++		par->dirty.y1 = y1;
++		par->dirty.x2 = x2;
++		par->dirty.y2 = y2;
++		/* if we are active start the dirty work
++		 * we share the work with the defio system */
++		if (par->dirty.active)
++			schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
++	} else {
++		if (x1 < par->dirty.x1)
++			par->dirty.x1 = x1;
++		if (y1 < par->dirty.y1)
++			par->dirty.y1 = y1;
++		if (x2 > par->dirty.x2)
++			par->dirty.x2 = x2;
++		if (y2 > par->dirty.y2)
++			par->dirty.y2 = y2;
++	}
++	spin_unlock_irqrestore(&par->dirty.lock, flags);
++}
++
++static void vmw_deferred_io(struct fb_info *info,
++			    struct list_head *pagelist)
++{
++	struct vmw_fb_par *par = info->par;
++	unsigned long start, end, min, max;
++	unsigned long flags;
++	struct page *page;
++	int y1, y2;
++
++	min = ULONG_MAX;
++	max = 0;
++	list_for_each_entry(page, pagelist, lru) {
++		start = page->index << PAGE_SHIFT;
++		end = start + PAGE_SIZE - 1;
++		min = min(min, start);
++		max = max(max, end);
++	}
++
++	if (min < max) {
++		y1 = min / info->fix.line_length;
++		y2 = (max / info->fix.line_length) + 1;
++
++		spin_lock_irqsave(&par->dirty.lock, flags);
++		par->dirty.x1 = 0;
++		par->dirty.y1 = y1;
++		par->dirty.x2 = info->var.xres;
++		par->dirty.y2 = y2;
++		spin_unlock_irqrestore(&par->dirty.lock, flags);
++	}
++
++	vmw_fb_dirty_flush(par);
++};
++
++struct fb_deferred_io vmw_defio = {
++	.delay		= VMW_DIRTY_DELAY,
++	.deferred_io	= vmw_deferred_io,
++};
++
++/*
++ * Draw code
++ */
++
++static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
++{
++	cfb_fillrect(info, rect);
++	vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
++			  rect->width, rect->height);
++}
++
++static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
++{
++	cfb_copyarea(info, region);
++	vmw_fb_dirty_mark(info->par, region->dx, region->dy,
++			  region->width, region->height);
++}
++
++static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
++{
++	cfb_imageblit(info, image);
++	vmw_fb_dirty_mark(info->par, image->dx, image->dy,
++			  image->width, image->height);
++}
++
++/*
++ * Bring up code
++ */
++
++static struct fb_ops vmw_fb_ops = {
++	.owner = THIS_MODULE,
++	.fb_check_var = vmw_fb_check_var,
++	.fb_set_par = vmw_fb_set_par,
++	.fb_setcolreg = vmw_fb_setcolreg,
++	.fb_fillrect = vmw_fb_fillrect,
++	.fb_copyarea = vmw_fb_copyarea,
++	.fb_imageblit = vmw_fb_imageblit,
++	.fb_pan_display = vmw_fb_pan_display,
++	.fb_blank = vmw_fb_blank,
++};
++
++static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
++			    size_t size, struct vmw_dma_buffer **out)
++{
++	struct vmw_dma_buffer *vmw_bo;
++	struct ttm_placement ne_placement = vmw_vram_ne_placement;
++	int ret;
++
++	ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++	/* interuptable? */
++	ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
++	if (unlikely(ret != 0))
++		return ret;
++
++	vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
++	if (!vmw_bo)
++		goto err_unlock;
++
++	ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
++			      &ne_placement,
++			      false,
++			      &vmw_dmabuf_bo_free);
++	if (unlikely(ret != 0))
++		goto err_unlock; /* init frees the buffer on failure */
++
++	*out = vmw_bo;
++
++	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
++
++	return 0;
++
++err_unlock:
++	ttm_write_unlock(&vmw_priv->fbdev_master.lock);
++	return ret;
++}
++
++int vmw_fb_init(struct vmw_private *vmw_priv)
++{
++	struct device *device = &vmw_priv->dev->pdev->dev;
++	struct vmw_fb_par *par;
++	struct fb_info *info;
++	unsigned initial_width, initial_height;
++	unsigned fb_width, fb_height;
++	unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
++	int ret;
++
++	initial_width = 800;
++	initial_height = 600;
++
++	fb_bbp = 32;
++	fb_depth = 24;
++
++	if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
++		fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
++		fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
++	} else {
++		fb_width = min(vmw_priv->fb_max_width, initial_width);
++		fb_height = min(vmw_priv->fb_max_height, initial_height);
++	}
++
++	initial_width = min(fb_width, initial_width);
++	initial_height = min(fb_height, initial_height);
++
++	vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
++	vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
++	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
++	vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
++	vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
++	vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
++	vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
++
++	fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
++	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
++	fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
++
++	DRM_DEBUG("width  %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
++	DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
++	DRM_DEBUG("width  %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
++	DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
++	DRM_DEBUG("bpp    %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
++	DRM_DEBUG("depth  %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
++	DRM_DEBUG("bpl    %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
++	DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
++	DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
++	DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
++	DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
++	DRM_DEBUG("fb_pitch  %u\n", fb_pitch);
++	DRM_DEBUG("fb_size   %u kiB\n", fb_size / 1024);
++
++	info = framebuffer_alloc(sizeof(*par), device);
++	if (!info)
++		return -ENOMEM;
++
++	/*
++	 * Par
++	 */
++	vmw_priv->fb_info = info;
++	par = info->par;
++	par->vmw_priv = vmw_priv;
++	par->depth = fb_depth;
++	par->bpp = fb_bbp;
++	par->vmalloc = NULL;
++	par->max_width = fb_width;
++	par->max_height = fb_height;
++
++	/*
++	 * Create buffers and alloc memory
++	 */
++	par->vmalloc = vmalloc(fb_size);
++	if (unlikely(par->vmalloc == NULL)) {
++		ret = -ENOMEM;
++		goto err_free;
++	}
++
++	ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
++	if (unlikely(ret != 0))
++		goto err_free;
++
++	ret = ttm_bo_kmap(&par->vmw_bo->base,
++			  0,
++			  par->vmw_bo->base.num_pages,
++			  &par->map);
++	if (unlikely(ret != 0))
++		goto err_unref;
++	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
++	par->bo_size = fb_size;
++
++	/*
++	 * Fixed and var
++	 */
++	strcpy(info->fix.id, "svgadrmfb");
++	info->fix.type = FB_TYPE_PACKED_PIXELS;
++	info->fix.visual = FB_VISUAL_TRUECOLOR;
++	info->fix.type_aux = 0;
++	info->fix.xpanstep = 1; /* doing it in hw */
++	info->fix.ypanstep = 1; /* doing it in hw */
++	info->fix.ywrapstep = 0;
++	info->fix.accel = FB_ACCEL_NONE;
++	info->fix.line_length = fb_pitch;
++
++	info->fix.smem_start = 0;
++	info->fix.smem_len = fb_size;
++
++	info->fix.mmio_start = 0;
++	info->fix.mmio_len = 0;
++
++	info->pseudo_palette = par->pseudo_palette;
++	info->screen_base = par->vmalloc;
++	info->screen_size = fb_size;
++
++	info->flags = FBINFO_DEFAULT;
++	info->fbops = &vmw_fb_ops;
++
++	/* 24 depth per default */
++	info->var.red.offset = 16;
++	info->var.green.offset = 8;
++	info->var.blue.offset = 0;
++	info->var.red.length = 8;
++	info->var.green.length = 8;
++	info->var.blue.length = 8;
++	info->var.transp.offset = 0;
++	info->var.transp.length = 0;
++
++	info->var.xres_virtual = fb_width;
++	info->var.yres_virtual = fb_height;
++	info->var.bits_per_pixel = par->bpp;
++	info->var.xoffset = 0;
++	info->var.yoffset = 0;
++	info->var.activate = FB_ACTIVATE_NOW;
++	info->var.height = -1;
++	info->var.width = -1;
++
++	info->var.xres = initial_width;
++	info->var.yres = initial_height;
++
++#if 0
++	info->pixmap.size = 64*1024;
++	info->pixmap.buf_align = 8;
++	info->pixmap.access_align = 32;
++	info->pixmap.flags = FB_PIXMAP_SYSTEM;
++	info->pixmap.scan_align = 1;
++#else
++	info->pixmap.size = 0;
++	info->pixmap.buf_align = 8;
++	info->pixmap.access_align = 32;
++	info->pixmap.flags = FB_PIXMAP_SYSTEM;
++	info->pixmap.scan_align = 1;
++#endif
++
++	info->aperture_base = vmw_priv->vram_start;
++	info->aperture_size = vmw_priv->vram_size;
++
++	/*
++	 * Dirty & Deferred IO
++	 */
++	par->dirty.x1 = par->dirty.x2 = 0;
++	par->dirty.y1 = par->dirty.y1 = 0;
++	par->dirty.active = true;
++	spin_lock_init(&par->dirty.lock);
++	info->fbdefio = &vmw_defio;
++	fb_deferred_io_init(info);
++
++	ret = register_framebuffer(info);
++	if (unlikely(ret != 0))
++		goto err_defio;
++
++	return 0;
++
++err_defio:
++	fb_deferred_io_cleanup(info);
++	ttm_bo_kunmap(&par->map);
++err_unref:
++	ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
++err_free:
++	vfree(par->vmalloc);
++	framebuffer_release(info);
++	vmw_priv->fb_info = NULL;
++
++	return ret;
++}
++
++int vmw_fb_close(struct vmw_private *vmw_priv)
++{
++	struct fb_info *info;
++	struct vmw_fb_par *par;
++	struct ttm_buffer_object *bo;
++
++	if (!vmw_priv->fb_info)
++		return 0;
++
++	info = vmw_priv->fb_info;
++	par = info->par;
++	bo = &par->vmw_bo->base;
++	par->vmw_bo = NULL;
++
++	/* ??? order */
++	fb_deferred_io_cleanup(info);
++	unregister_framebuffer(info);
++
++	ttm_bo_kunmap(&par->map);
++	ttm_bo_unref(&bo);
++
++	vfree(par->vmalloc);
++	framebuffer_release(info);
++
++	return 0;
++}
++
++int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
++			 struct vmw_dma_buffer *vmw_bo)
++{
++	struct ttm_buffer_object *bo = &vmw_bo->base;
++	int ret = 0;
++
++	ret = ttm_bo_reserve(bo, false, false, false, 0);
++	if (unlikely(ret != 0))
++		return ret;
++
++	ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
++	ttm_bo_unreserve(bo);
++
++	return ret;
++}
++
++int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
++				struct vmw_dma_buffer *vmw_bo)
++{
++	struct ttm_buffer_object *bo = &vmw_bo->base;
++	struct ttm_placement ne_placement = vmw_vram_ne_placement;
++	int ret = 0;
++
++	ne_placement.lpfn = bo->num_pages;
++
++	/* interuptable? */
++	ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
++	if (unlikely(ret != 0))
++		return ret;
++
++	ret = ttm_bo_reserve(bo, false, false, false, 0);
++	if (unlikely(ret != 0))
++		goto err_unlock;
++
++	ret = ttm_bo_validate(bo, &ne_placement, false, false);
++	ttm_bo_unreserve(bo);
++err_unlock:
++	ttm_write_unlock(&vmw_priv->active_master->lock);
++
++	return ret;
++}
++
++int vmw_fb_off(struct vmw_private *vmw_priv)
++{
++	struct fb_info *info;
++	struct vmw_fb_par *par;
++	unsigned long flags;
++
++	if (!vmw_priv->fb_info)
++		return -EINVAL;
++
++	info = vmw_priv->fb_info;
++	par = info->par;
++
++	spin_lock_irqsave(&par->dirty.lock, flags);
++	par->dirty.active = false;
++	spin_unlock_irqrestore(&par->dirty.lock, flags);
++
++	flush_scheduled_work();
++
++	par->bo_ptr = NULL;
++	ttm_bo_kunmap(&par->map);
++
++	vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
++
++	return 0;
++}
++
++int vmw_fb_on(struct vmw_private *vmw_priv)
++{
++	struct fb_info *info;
++	struct vmw_fb_par *par;
++	unsigned long flags;
++	bool dummy;
++	int ret;
++
++	if (!vmw_priv->fb_info)
++		return -EINVAL;
++
++	info = vmw_priv->fb_info;
++	par = info->par;
++
++	/* we are already active */
++	if (par->bo_ptr != NULL)
++		return 0;
++
++	/* Make sure that all overlays are stoped when we take over */
++	vmw_overlay_stop_all(vmw_priv);
++
++	ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("could not move buffer to start of VRAM\n");
++		goto err_no_buffer;
++	}
++
++	ret = ttm_bo_kmap(&par->vmw_bo->base,
++			  0,
++			  par->vmw_bo->base.num_pages,
++			  &par->map);
++	BUG_ON(ret != 0);
++	par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
++
++	spin_lock_irqsave(&par->dirty.lock, flags);
++	par->dirty.active = true;
++	spin_unlock_irqrestore(&par->dirty.lock, flags);
++
++err_no_buffer:
++	vmw_fb_set_par(info);
++
++	vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
++
++	/* If there already was stuff dirty we wont
++	 * schedule a new work, so lets do it now */
++	schedule_delayed_work(&info->deferred_work, 0);
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+new file mode 100644
+index 0000000..39d43a0
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+@@ -0,0 +1,538 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "vmwgfx_drv.h"
++#include "drmP.h"
++#include "ttm/ttm_placement.h"
++
++bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
++{
++	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
++	uint32_t fifo_min, hwversion;
++
++	fifo_min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
++	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
++		return false;
++
++	hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
++	if (hwversion == 0)
++		return false;
++
++	if (hwversion < SVGA3D_HWVERSION_WS65_B1)
++		return false;
++
++	return true;
++}
++
++int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
++{
++	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
++	uint32_t max;
++	uint32_t min;
++	uint32_t dummy;
++	int ret;
++
++	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
++	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
++	if (unlikely(fifo->static_buffer == NULL))
++		return -ENOMEM;
++
++	fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
++	fifo->last_data_size = 0;
++	fifo->last_buffer_add = false;
++	fifo->last_buffer = vmalloc(fifo->last_buffer_size);
++	if (unlikely(fifo->last_buffer == NULL)) {
++		ret = -ENOMEM;
++		goto out_err;
++	}
++
++	fifo->dynamic_buffer = NULL;
++	fifo->reserved_size = 0;
++	fifo->using_bounce_buffer = false;
++
++	mutex_init(&fifo->fifo_mutex);
++	init_rwsem(&fifo->rwsem);
++
++	/*
++	 * Allow mapping the first page read-only to user-space.
++	 */
++
++	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
++	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
++	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
++
++	mutex_lock(&dev_priv->hw_mutex);
++	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
++	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
++	vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
++
++	min = 4;
++	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
++		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
++	min <<= 2;
++
++	if (min < PAGE_SIZE)
++		min = PAGE_SIZE;
++
++	iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
++	iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
++	wmb();
++	iowrite32(min,  fifo_mem + SVGA_FIFO_NEXT_CMD);
++	iowrite32(min,  fifo_mem + SVGA_FIFO_STOP);
++	iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
++	mb();
++
++	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
++	mutex_unlock(&dev_priv->hw_mutex);
++
++	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
++	min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
++	fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
++
++	DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
++		 (unsigned int) max,
++		 (unsigned int) min,
++		 (unsigned int) fifo->capabilities);
++
++	atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
++	iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
++
++	return vmw_fifo_send_fence(dev_priv, &dummy);
++out_err:
++	vfree(fifo->static_buffer);
++	fifo->static_buffer = NULL;
++	return ret;
++}
++
++void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
++{
++	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
++
++	mutex_lock(&dev_priv->hw_mutex);
++
++	if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
++		iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
++		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
++	}
++
++	mutex_unlock(&dev_priv->hw_mutex);
++}
++
++void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
++{
++	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
++
++	mutex_lock(&dev_priv->hw_mutex);
++
++	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
++		vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
++
++	dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
++
++	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
++		  dev_priv->config_done_state);
++	vmw_write(dev_priv, SVGA_REG_ENABLE,
++		  dev_priv->enable_state);
++
++	mutex_unlock(&dev_priv->hw_mutex);
++
++	if (likely(fifo->last_buffer != NULL)) {
++		vfree(fifo->last_buffer);
++		fifo->last_buffer = NULL;
++	}
++
++	if (likely(fifo->static_buffer != NULL)) {
++		vfree(fifo->static_buffer);
++		fifo->static_buffer = NULL;
++	}
++
++	if (likely(fifo->dynamic_buffer != NULL)) {
++		vfree(fifo->dynamic_buffer);
++		fifo->dynamic_buffer = NULL;
++	}
++}
++
++static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
++{
++	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
++	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
++	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
++	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
++	uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
++
++	return ((max - next_cmd) + (stop - min) <= bytes);
++}
++
++static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
++			       uint32_t bytes, bool interruptible,
++			       unsigned long timeout)
++{
++	int ret = 0;
++	unsigned long end_jiffies = jiffies + timeout;
++	DEFINE_WAIT(__wait);
++
++	DRM_INFO("Fifo wait noirq.\n");
++
++	for (;;) {
++		prepare_to_wait(&dev_priv->fifo_queue, &__wait,
++				(interruptible) ?
++				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
++		if (!vmw_fifo_is_full(dev_priv, bytes))
++			break;
++		if (time_after_eq(jiffies, end_jiffies)) {
++			ret = -EBUSY;
++			DRM_ERROR("SVGA device lockup.\n");
++			break;
++		}
++		schedule_timeout(1);
++		if (interruptible && signal_pending(current)) {
++			ret = -ERESTARTSYS;
++			break;
++		}
++	}
++	finish_wait(&dev_priv->fifo_queue, &__wait);
++	wake_up_all(&dev_priv->fifo_queue);
++	DRM_INFO("Fifo noirq exit.\n");
++	return ret;
++}
++
++static int vmw_fifo_wait(struct vmw_private *dev_priv,
++			 uint32_t bytes, bool interruptible,
++			 unsigned long timeout)
++{
++	long ret = 1L;
++	unsigned long irq_flags;
++
++	if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
++		return 0;
++
++	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
++	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
++		return vmw_fifo_wait_noirq(dev_priv, bytes,
++					   interruptible, timeout);
++
++	mutex_lock(&dev_priv->hw_mutex);
++	if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
++		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
++		outl(SVGA_IRQFLAG_FIFO_PROGRESS,
++		     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
++		vmw_write(dev_priv, SVGA_REG_IRQMASK,
++			  vmw_read(dev_priv, SVGA_REG_IRQMASK) |
++			  SVGA_IRQFLAG_FIFO_PROGRESS);
++		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
++	}
++	mutex_unlock(&dev_priv->hw_mutex);
++
++	if (interruptible)
++		ret = wait_event_interruptible_timeout
++		    (dev_priv->fifo_queue,
++		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
++	else
++		ret = wait_event_timeout
++		    (dev_priv->fifo_queue,
++		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
++
++	if (unlikely(ret == 0))
++		ret = -EBUSY;
++	else if (likely(ret > 0))
++		ret = 0;
++
++	mutex_lock(&dev_priv->hw_mutex);
++	if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
++		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
++		vmw_write(dev_priv, SVGA_REG_IRQMASK,
++			  vmw_read(dev_priv, SVGA_REG_IRQMASK) &
++			  ~SVGA_IRQFLAG_FIFO_PROGRESS);
++		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
++	}
++	mutex_unlock(&dev_priv->hw_mutex);
++
++	return ret;
++}
++
++void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
++{
++	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
++	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
++	uint32_t max;
++	uint32_t min;
++	uint32_t next_cmd;
++	uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
++	int ret;
++
++	mutex_lock(&fifo_state->fifo_mutex);
++	max = ioread32(fifo_mem + SVGA_FIFO_MAX);
++	min = ioread32(fifo_mem + SVGA_FIFO_MIN);
++	next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
++
++	if (unlikely(bytes >= (max - min)))
++		goto out_err;
++
++	BUG_ON(fifo_state->reserved_size != 0);
++	BUG_ON(fifo_state->dynamic_buffer != NULL);
++
++	fifo_state->reserved_size = bytes;
++
++	while (1) {
++		uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
++		bool need_bounce = false;
++		bool reserve_in_place = false;
++
++		if (next_cmd >= stop) {
++			if (likely((next_cmd + bytes < max ||
++				    (next_cmd + bytes == max && stop > min))))
++				reserve_in_place = true;
++
++			else if (vmw_fifo_is_full(dev_priv, bytes)) {
++				ret = vmw_fifo_wait(dev_priv, bytes,
++						    false, 3 * HZ);
++				if (unlikely(ret != 0))
++					goto out_err;
++			} else
++				need_bounce = true;
++
++		} else {
++
++			if (likely((next_cmd + bytes < stop)))
++				reserve_in_place = true;
++			else {
++				ret = vmw_fifo_wait(dev_priv, bytes,
++						    false, 3 * HZ);
++				if (unlikely(ret != 0))
++					goto out_err;
++			}
++		}
++
++		if (reserve_in_place) {
++			if (reserveable || bytes <= sizeof(uint32_t)) {
++				fifo_state->using_bounce_buffer = false;
++
++				if (reserveable)
++					iowrite32(bytes, fifo_mem +
++						  SVGA_FIFO_RESERVED);
++				return fifo_mem + (next_cmd >> 2);
++			} else {
++				need_bounce = true;
++			}
++		}
++
++		if (need_bounce) {
++			fifo_state->using_bounce_buffer = true;
++			if (bytes < fifo_state->static_buffer_size)
++				return fifo_state->static_buffer;
++			else {
++				fifo_state->dynamic_buffer = vmalloc(bytes);
++				return fifo_state->dynamic_buffer;
++			}
++		}
++	}
++out_err:
++	fifo_state->reserved_size = 0;
++	mutex_unlock(&fifo_state->fifo_mutex);
++	return NULL;
++}
++
++static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
++			      __le32 __iomem *fifo_mem,
++			      uint32_t next_cmd,
++			      uint32_t max, uint32_t min, uint32_t bytes)
++{
++	uint32_t chunk_size = max - next_cmd;
++	uint32_t rest;
++	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
++	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
++
++	if (bytes < chunk_size)
++		chunk_size = bytes;
++
++	iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
++	mb();
++	memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
++	rest = bytes - chunk_size;
++	if (rest)
++		memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
++			    rest);
++}
++
++static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
++			       __le32 __iomem *fifo_mem,
++			       uint32_t next_cmd,
++			       uint32_t max, uint32_t min, uint32_t bytes)
++{
++	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
++	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
++
++	while (bytes > 0) {
++		iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
++		next_cmd += sizeof(uint32_t);
++		if (unlikely(next_cmd == max))
++			next_cmd = min;
++		mb();
++		iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
++		mb();
++		bytes -= sizeof(uint32_t);
++	}
++}
++
++void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
++{
++	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
++	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
++	uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
++	uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
++	uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
++	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
++
++	BUG_ON((bytes & 3) != 0);
++	BUG_ON(bytes > fifo_state->reserved_size);
++
++	fifo_state->reserved_size = 0;
++
++	if (fifo_state->using_bounce_buffer) {
++		if (reserveable)
++			vmw_fifo_res_copy(fifo_state, fifo_mem,
++					  next_cmd, max, min, bytes);
++		else
++			vmw_fifo_slow_copy(fifo_state, fifo_mem,
++					   next_cmd, max, min, bytes);
++
++		if (fifo_state->dynamic_buffer) {
++			vfree(fifo_state->dynamic_buffer);
++			fifo_state->dynamic_buffer = NULL;
++		}
++
++	}
++
++	down_write(&fifo_state->rwsem);
++	if (fifo_state->using_bounce_buffer || reserveable) {
++		next_cmd += bytes;
++		if (next_cmd >= max)
++			next_cmd -= max - min;
++		mb();
++		iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
++	}
++
++	if (reserveable)
++		iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
++	mb();
++	up_write(&fifo_state->rwsem);
++	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
++	mutex_unlock(&fifo_state->fifo_mutex);
++}
++
++int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
++{
++	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
++	struct svga_fifo_cmd_fence *cmd_fence;
++	void *fm;
++	int ret = 0;
++	uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
++
++	fm = vmw_fifo_reserve(dev_priv, bytes);
++	if (unlikely(fm == NULL)) {
++		*sequence = atomic_read(&dev_priv->fence_seq);
++		ret = -ENOMEM;
++		(void)vmw_fallback_wait(dev_priv, false, true, *sequence,
++					false, 3*HZ);
++		goto out_err;
++	}
++
++	do {
++		*sequence = atomic_add_return(1, &dev_priv->fence_seq);
++	} while (*sequence == 0);
++
++	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
++
++		/*
++		 * Don't request hardware to send a fence. The
++		 * waiting code in vmwgfx_irq.c will emulate this.
++		 */
++
++		vmw_fifo_commit(dev_priv, 0);
++		return 0;
++	}
++
++	*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
++	cmd_fence = (struct svga_fifo_cmd_fence *)
++	    ((unsigned long)fm + sizeof(__le32));
++
++	iowrite32(*sequence, &cmd_fence->fence);
++	fifo_state->last_buffer_add = true;
++	vmw_fifo_commit(dev_priv, bytes);
++	fifo_state->last_buffer_add = false;
++
++out_err:
++	return ret;
++}
++
++/**
++ * Map the first page of the FIFO read-only to user-space.
++ */
++
++static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++	int ret;
++	unsigned long address = (unsigned long)vmf->virtual_address;
++
++	if (address != vma->vm_start)
++		return VM_FAULT_SIGBUS;
++
++	ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
++	if (likely(ret == -EBUSY || ret == 0))
++		return VM_FAULT_NOPAGE;
++	else if (ret == -ENOMEM)
++		return VM_FAULT_OOM;
++
++	return VM_FAULT_SIGBUS;
++}
++
++static struct vm_operations_struct vmw_fifo_vm_ops = {
++	.fault = vmw_fifo_vm_fault,
++	.open = NULL,
++	.close = NULL
++};
++
++int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++	struct drm_file *file_priv;
++	struct vmw_private *dev_priv;
++
++	file_priv = (struct drm_file *)filp->private_data;
++	dev_priv = vmw_priv(file_priv->minor->dev);
++
++	if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
++	    (vma->vm_end - vma->vm_start) != PAGE_SIZE)
++		return -EINVAL;
++
++	vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
++	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
++	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
++	vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
++					vma->vm_page_prot);
++	vma->vm_ops = &vmw_fifo_vm_ops;
++	return 0;
++}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+new file mode 100644
+index 0000000..5f8908a
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+@@ -0,0 +1,213 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "vmwgfx_drv.h"
++#include "drmP.h"
++#include "ttm/ttm_bo_driver.h"
++
++/**
++ * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
++ * the number of used descriptors.
++ */
++
++static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
++				     struct page *pages[],
++				     unsigned long num_pages)
++{
++	struct page *page, *next;
++	struct svga_guest_mem_descriptor *page_virtual = NULL;
++	struct svga_guest_mem_descriptor *desc_virtual = NULL;
++	unsigned int desc_per_page;
++	unsigned long prev_pfn;
++	unsigned long pfn;
++	int ret;
++
++	desc_per_page = PAGE_SIZE /
++	    sizeof(struct svga_guest_mem_descriptor) - 1;
++
++	while (likely(num_pages != 0)) {
++		page = alloc_page(__GFP_HIGHMEM);
++		if (unlikely(page == NULL)) {
++			ret = -ENOMEM;
++			goto out_err;
++		}
++
++		list_add_tail(&page->lru, desc_pages);
++
++		/*
++		 * Point previous page terminating descriptor to this
++		 * page before unmapping it.
++		 */
++
++		if (likely(page_virtual != NULL)) {
++			desc_virtual->ppn = page_to_pfn(page);
++			kunmap_atomic(page_virtual, KM_USER0);
++		}
++
++		page_virtual = kmap_atomic(page, KM_USER0);
++		desc_virtual = page_virtual - 1;
++		prev_pfn = ~(0UL);
++
++		while (likely(num_pages != 0)) {
++			pfn = page_to_pfn(*pages);
++
++			if (pfn != prev_pfn + 1) {
++
++				if (desc_virtual - page_virtual ==
++				    desc_per_page - 1)
++					break;
++
++				(++desc_virtual)->ppn = cpu_to_le32(pfn);
++				desc_virtual->num_pages = cpu_to_le32(1);
++			} else {
++				uint32_t tmp =
++				    le32_to_cpu(desc_virtual->num_pages);
++				desc_virtual->num_pages = cpu_to_le32(tmp + 1);
++			}
++			prev_pfn = pfn;
++			--num_pages;
++			++pages;
++		}
++
++		(++desc_virtual)->ppn = cpu_to_le32(0);
++		desc_virtual->num_pages = cpu_to_le32(0);
++	}
++
++	if (likely(page_virtual != NULL))
++		kunmap_atomic(page_virtual, KM_USER0);
++
++	return 0;
++out_err:
++	list_for_each_entry_safe(page, next, desc_pages, lru) {
++		list_del_init(&page->lru);
++		__free_page(page);
++	}
++	return ret;
++}
++
++static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
++{
++	struct page *page, *next;
++
++	list_for_each_entry_safe(page, next, desc_pages, lru) {
++		list_del_init(&page->lru);
++		__free_page(page);
++	}
++}
++
++static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
++				     int gmr_id, struct list_head *desc_pages)
++{
++	struct page *page;
++
++	if (unlikely(list_empty(desc_pages)))
++		return;
++
++	page = list_entry(desc_pages->next, struct page, lru);
++
++	mutex_lock(&dev_priv->hw_mutex);
++
++	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
++	wmb();
++	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
++	mb();
++
++	mutex_unlock(&dev_priv->hw_mutex);
++
++}
++
++/**
++ * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
++ * the number of used descriptors.
++ */
++
++static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
++					       unsigned long num_pages)
++{
++	unsigned long prev_pfn = ~(0UL);
++	unsigned long pfn;
++	unsigned long descriptors = 0;
++
++	while (num_pages--) {
++		pfn = page_to_pfn(*pages++);
++		if (prev_pfn + 1 != pfn)
++			++descriptors;
++		prev_pfn = pfn;
++	}
++
++	return descriptors;
++}
++
++int vmw_gmr_bind(struct vmw_private *dev_priv,
++		 struct ttm_buffer_object *bo)
++{
++	struct ttm_tt *ttm = bo->ttm;
++	unsigned long descriptors;
++	int ret;
++	uint32_t id;
++	struct list_head desc_pages;
++
++	if (!(dev_priv->capabilities & SVGA_CAP_GMR))
++		return -EINVAL;
++
++	ret = ttm_tt_populate(ttm);
++	if (unlikely(ret != 0))
++		return ret;
++
++	descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
++	if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
++		return -EINVAL;
++
++	INIT_LIST_HEAD(&desc_pages);
++	ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
++					ttm->num_pages);
++	if (unlikely(ret != 0))
++		return ret;
++
++	ret = vmw_gmr_id_alloc(dev_priv, &id);
++	if (unlikely(ret != 0))
++		goto out_no_id;
++
++	vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
++	vmw_gmr_free_descriptors(&desc_pages);
++	vmw_dmabuf_set_gmr(bo, id);
++	return 0;
++
++out_no_id:
++	vmw_gmr_free_descriptors(&desc_pages);
++	return ret;
++}
++
++void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
++{
++	mutex_lock(&dev_priv->hw_mutex);
++	vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
++	wmb();
++	vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
++	mb();
++	mutex_unlock(&dev_priv->hw_mutex);
++}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+new file mode 100644
+index 0000000..1c7a316
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+@@ -0,0 +1,87 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "vmwgfx_drv.h"
++#include "vmwgfx_drm.h"
++
++int vmw_getparam_ioctl(struct drm_device *dev, void *data,
++		       struct drm_file *file_priv)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct drm_vmw_getparam_arg *param =
++	    (struct drm_vmw_getparam_arg *)data;
++
++	switch (param->param) {
++	case DRM_VMW_PARAM_NUM_STREAMS:
++		param->value = vmw_overlay_num_overlays(dev_priv);
++		break;
++	case DRM_VMW_PARAM_NUM_FREE_STREAMS:
++		param->value = vmw_overlay_num_free_overlays(dev_priv);
++		break;
++	case DRM_VMW_PARAM_3D:
++		param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
++		break;
++	case DRM_VMW_PARAM_FIFO_OFFSET:
++		param->value = dev_priv->mmio_start;
++		break;
++	case DRM_VMW_PARAM_HW_CAPS:
++		param->value = dev_priv->capabilities;
++		break;
++	case DRM_VMW_PARAM_FIFO_CAPS:
++		param->value = dev_priv->fifo.capabilities;
++		break;
++	default:
++		DRM_ERROR("Illegal vmwgfx get param request: %d\n",
++			  param->param);
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
++			 struct drm_file *file_priv)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
++	struct drm_vmw_fifo_debug_arg *arg =
++	    (struct drm_vmw_fifo_debug_arg *)data;
++	__le32 __user *buffer = (__le32 __user *)
++	    (unsigned long)arg->debug_buffer;
++
++	if (unlikely(fifo_state->last_buffer == NULL))
++		return -EINVAL;
++
++	if (arg->debug_buffer_size < fifo_state->last_data_size) {
++		arg->used_size = arg->debug_buffer_size;
++		arg->did_not_fit = 1;
++	} else {
++		arg->used_size = fifo_state->last_data_size;
++		arg->did_not_fit = 0;
++	}
++	return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
++}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+new file mode 100644
+index 0000000..4d7cb53
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+@@ -0,0 +1,286 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "drmP.h"
++#include "vmwgfx_drv.h"
++
++#define VMW_FENCE_WRAP (1 << 24)
++
++irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
++{
++	struct drm_device *dev = (struct drm_device *)arg;
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	uint32_t status;
++
++	spin_lock(&dev_priv->irq_lock);
++	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
++	spin_unlock(&dev_priv->irq_lock);
++
++	if (status & SVGA_IRQFLAG_ANY_FENCE)
++		wake_up_all(&dev_priv->fence_queue);
++	if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
++		wake_up_all(&dev_priv->fifo_queue);
++
++	if (likely(status)) {
++		outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
++		return IRQ_HANDLED;
++	}
++
++	return IRQ_NONE;
++}
++
++static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
++{
++	uint32_t busy;
++
++	mutex_lock(&dev_priv->hw_mutex);
++	busy = vmw_read(dev_priv, SVGA_REG_BUSY);
++	mutex_unlock(&dev_priv->hw_mutex);
++
++	return (busy == 0);
++}
++
++
++bool vmw_fence_signaled(struct vmw_private *dev_priv,
++			uint32_t sequence)
++{
++	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
++	struct vmw_fifo_state *fifo_state;
++	bool ret;
++
++	if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
++		return true;
++
++	dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
++	if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
++		return true;
++
++	fifo_state = &dev_priv->fifo;
++	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
++	    vmw_fifo_idle(dev_priv, sequence))
++		return true;
++
++	/**
++	 * Then check if the sequence is higher than what we've actually
++	 * emitted. Then the fence is stale and signaled.
++	 */
++
++	ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
++	       > VMW_FENCE_WRAP);
++
++	return ret;
++}
++
++int vmw_fallback_wait(struct vmw_private *dev_priv,
++		      bool lazy,
++		      bool fifo_idle,
++		      uint32_t sequence,
++		      bool interruptible,
++		      unsigned long timeout)
++{
++	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
++
++	uint32_t count = 0;
++	uint32_t signal_seq;
++	int ret;
++	unsigned long end_jiffies = jiffies + timeout;
++	bool (*wait_condition)(struct vmw_private *, uint32_t);
++	DEFINE_WAIT(__wait);
++
++	wait_condition = (fifo_idle) ? &vmw_fifo_idle :
++		&vmw_fence_signaled;
++
++	/**
++	 * Block command submission while waiting for idle.
++	 */
++
++	if (fifo_idle)
++		down_read(&fifo_state->rwsem);
++	signal_seq = atomic_read(&dev_priv->fence_seq);
++	ret = 0;
++
++	for (;;) {
++		prepare_to_wait(&dev_priv->fence_queue, &__wait,
++				(interruptible) ?
++				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
++		if (wait_condition(dev_priv, sequence))
++			break;
++		if (time_after_eq(jiffies, end_jiffies)) {
++			DRM_ERROR("SVGA device lockup.\n");
++			break;
++		}
++		if (lazy)
++			schedule_timeout(1);
++		else if ((++count & 0x0F) == 0) {
++			/**
++			 * FIXME: Use schedule_hr_timeout here for
++			 * newer kernels and lower CPU utilization.
++			 */
++
++			__set_current_state(TASK_RUNNING);
++			schedule();
++			__set_current_state((interruptible) ?
++					    TASK_INTERRUPTIBLE :
++					    TASK_UNINTERRUPTIBLE);
++		}
++		if (interruptible && signal_pending(current)) {
++			ret = -ERESTARTSYS;
++			break;
++		}
++	}
++	finish_wait(&dev_priv->fence_queue, &__wait);
++	if (ret == 0 && fifo_idle) {
++		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
++		iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
++	}
++	wake_up_all(&dev_priv->fence_queue);
++	if (fifo_idle)
++		up_read(&fifo_state->rwsem);
++
++	return ret;
++}
++
++int vmw_wait_fence(struct vmw_private *dev_priv,
++		   bool lazy, uint32_t sequence,
++		   bool interruptible, unsigned long timeout)
++{
++	long ret;
++	unsigned long irq_flags;
++	struct vmw_fifo_state *fifo = &dev_priv->fifo;
++
++	if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
++		return 0;
++
++	if (likely(vmw_fence_signaled(dev_priv, sequence)))
++		return 0;
++
++	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
++
++	if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
++		return vmw_fallback_wait(dev_priv, lazy, true, sequence,
++					 interruptible, timeout);
++
++	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
++		return vmw_fallback_wait(dev_priv, lazy, false, sequence,
++					 interruptible, timeout);
++
++	mutex_lock(&dev_priv->hw_mutex);
++	if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
++		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
++		outl(SVGA_IRQFLAG_ANY_FENCE,
++		     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
++		vmw_write(dev_priv, SVGA_REG_IRQMASK,
++			  vmw_read(dev_priv, SVGA_REG_IRQMASK) |
++			  SVGA_IRQFLAG_ANY_FENCE);
++		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
++	}
++	mutex_unlock(&dev_priv->hw_mutex);
++
++	if (interruptible)
++		ret = wait_event_interruptible_timeout
++		    (dev_priv->fence_queue,
++		     vmw_fence_signaled(dev_priv, sequence),
++		     timeout);
++	else
++		ret = wait_event_timeout
++		    (dev_priv->fence_queue,
++		     vmw_fence_signaled(dev_priv, sequence),
++		     timeout);
++
++	if (unlikely(ret == 0))
++		ret = -EBUSY;
++	else if (likely(ret > 0))
++		ret = 0;
++
++	mutex_lock(&dev_priv->hw_mutex);
++	if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
++		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
++		vmw_write(dev_priv, SVGA_REG_IRQMASK,
++			  vmw_read(dev_priv, SVGA_REG_IRQMASK) &
++			  ~SVGA_IRQFLAG_ANY_FENCE);
++		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
++	}
++	mutex_unlock(&dev_priv->hw_mutex);
++
++	return ret;
++}
++
++void vmw_irq_preinstall(struct drm_device *dev)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	uint32_t status;
++
++	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
++		return;
++
++	spin_lock_init(&dev_priv->irq_lock);
++	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
++	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
++}
++
++int vmw_irq_postinstall(struct drm_device *dev)
++{
++	return 0;
++}
++
++void vmw_irq_uninstall(struct drm_device *dev)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	uint32_t status;
++
++	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
++		return;
++
++	mutex_lock(&dev_priv->hw_mutex);
++	vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
++	mutex_unlock(&dev_priv->hw_mutex);
++
++	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
++	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
++}
++
++#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
++
++int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
++			 struct drm_file *file_priv)
++{
++	struct drm_vmw_fence_wait_arg *arg =
++	    (struct drm_vmw_fence_wait_arg *)data;
++	unsigned long timeout;
++
++	if (!arg->cookie_valid) {
++		arg->cookie_valid = 1;
++		arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
++	}
++
++	timeout = jiffies;
++	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
++		return -EBUSY;
++
++	timeout = (unsigned long)arg->kernel_cookie - timeout;
++	return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
++}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+new file mode 100644
+index 0000000..31f9afe
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+@@ -0,0 +1,880 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "vmwgfx_kms.h"
++
++/* Might need a hrtimer here? */
++#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
++
++
++void vmw_display_unit_cleanup(struct vmw_display_unit *du)
++{
++	if (du->cursor_surface)
++		vmw_surface_unreference(&du->cursor_surface);
++	if (du->cursor_dmabuf)
++		vmw_dmabuf_unreference(&du->cursor_dmabuf);
++	drm_crtc_cleanup(&du->crtc);
++	drm_encoder_cleanup(&du->encoder);
++	drm_connector_cleanup(&du->connector);
++}
++
++/*
++ * Display Unit Cursor functions
++ */
++
++int vmw_cursor_update_image(struct vmw_private *dev_priv,
++			    u32 *image, u32 width, u32 height,
++			    u32 hotspotX, u32 hotspotY)
++{
++	struct {
++		u32 cmd;
++		SVGAFifoCmdDefineAlphaCursor cursor;
++	} *cmd;
++	u32 image_size = width * height * 4;
++	u32 cmd_size = sizeof(*cmd) + image_size;
++
++	if (!image)
++		return -EINVAL;
++
++	cmd = vmw_fifo_reserve(dev_priv, cmd_size);
++	if (unlikely(cmd == NULL)) {
++		DRM_ERROR("Fifo reserve failed.\n");
++		return -ENOMEM;
++	}
++
++	memset(cmd, 0, sizeof(*cmd));
++
++	memcpy(&cmd[1], image, image_size);
++
++	cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
++	cmd->cursor.id = cpu_to_le32(0);
++	cmd->cursor.width = cpu_to_le32(width);
++	cmd->cursor.height = cpu_to_le32(height);
++	cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
++	cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
++
++	vmw_fifo_commit(dev_priv, cmd_size);
++
++	return 0;
++}
++
++void vmw_cursor_update_position(struct vmw_private *dev_priv,
++				bool show, int x, int y)
++{
++	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
++	uint32_t count;
++
++	iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
++	iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
++	iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
++	count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
++	iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
++}
++
++int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
++			   uint32_t handle, uint32_t width, uint32_t height)
++{
++	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
++	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
++	struct vmw_surface *surface = NULL;
++	struct vmw_dma_buffer *dmabuf = NULL;
++	int ret;
++
++	if (handle) {
++		ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
++						     handle, &surface);
++		if (!ret) {
++			if (!surface->snooper.image) {
++				DRM_ERROR("surface not suitable for cursor\n");
++				return -EINVAL;
++			}
++		} else {
++			ret = vmw_user_dmabuf_lookup(tfile,
++						     handle, &dmabuf);
++			if (ret) {
++				DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
++				return -EINVAL;
++			}
++		}
++	}
++
++	/* takedown old cursor */
++	if (du->cursor_surface) {
++		du->cursor_surface->snooper.crtc = NULL;
++		vmw_surface_unreference(&du->cursor_surface);
++	}
++	if (du->cursor_dmabuf)
++		vmw_dmabuf_unreference(&du->cursor_dmabuf);
++
++	/* setup new image */
++	if (surface) {
++		/* vmw_user_surface_lookup takes one reference */
++		du->cursor_surface = surface;
++
++		du->cursor_surface->snooper.crtc = crtc;
++		du->cursor_age = du->cursor_surface->snooper.age;
++		vmw_cursor_update_image(dev_priv, surface->snooper.image,
++					64, 64, du->hotspot_x, du->hotspot_y);
++	} else if (dmabuf) {
++		struct ttm_bo_kmap_obj map;
++		unsigned long kmap_offset;
++		unsigned long kmap_num;
++		void *virtual;
++		bool dummy;
++
++		/* vmw_user_surface_lookup takes one reference */
++		du->cursor_dmabuf = dmabuf;
++
++		kmap_offset = 0;
++		kmap_num = (64*64*4) >> PAGE_SHIFT;
++
++		ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
++		if (unlikely(ret != 0)) {
++			DRM_ERROR("reserve failed\n");
++			return -EINVAL;
++		}
++
++		ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
++		if (unlikely(ret != 0))
++			goto err_unreserve;
++
++		virtual = ttm_kmap_obj_virtual(&map, &dummy);
++		vmw_cursor_update_image(dev_priv, virtual, 64, 64,
++					du->hotspot_x, du->hotspot_y);
++
++		ttm_bo_kunmap(&map);
++err_unreserve:
++		ttm_bo_unreserve(&dmabuf->base);
++
++	} else {
++		vmw_cursor_update_position(dev_priv, false, 0, 0);
++		return 0;
++	}
++
++	vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
++
++	return 0;
++}
++
++int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
++{
++	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
++	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
++	bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
++
++	du->cursor_x = x + crtc->x;
++	du->cursor_y = y + crtc->y;
++
++	vmw_cursor_update_position(dev_priv, shown,
++				   du->cursor_x, du->cursor_y);
++
++	return 0;
++}
++
++void vmw_kms_cursor_snoop(struct vmw_surface *srf,
++			  struct ttm_object_file *tfile,
++			  struct ttm_buffer_object *bo,
++			  SVGA3dCmdHeader *header)
++{
++	struct ttm_bo_kmap_obj map;
++	unsigned long kmap_offset;
++	unsigned long kmap_num;
++	SVGA3dCopyBox *box;
++	unsigned box_count;
++	void *virtual;
++	bool dummy;
++	struct vmw_dma_cmd {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdSurfaceDMA dma;
++	} *cmd;
++	int ret;
++
++	cmd = container_of(header, struct vmw_dma_cmd, header);
++
++	/* No snooper installed */
++	if (!srf->snooper.image)
++		return;
++
++	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
++		DRM_ERROR("face and mipmap for cursors should never != 0\n");
++		return;
++	}
++
++	if (cmd->header.size < 64) {
++		DRM_ERROR("at least one full copy box must be given\n");
++		return;
++	}
++
++	box = (SVGA3dCopyBox *)&cmd[1];
++	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
++			sizeof(SVGA3dCopyBox);
++
++	if (cmd->dma.guest.pitch != (64 * 4) ||
++	    cmd->dma.guest.ptr.offset % PAGE_SIZE ||
++	    box->x != 0    || box->y != 0    || box->z != 0    ||
++	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
++	    box->w != 64   || box->h != 64   || box->d != 1    ||
++	    box_count != 1) {
++		/* TODO handle none page aligned offsets */
++		/* TODO handle partial uploads and pitch != 256 */
++		/* TODO handle more then one copy (size != 64) */
++		DRM_ERROR("lazy programer, cant handle wierd stuff\n");
++		return;
++	}
++
++	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
++	kmap_num = (64*64*4) >> PAGE_SHIFT;
++
++	ret = ttm_bo_reserve(bo, true, false, false, 0);
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("reserve failed\n");
++		return;
++	}
++
++	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
++	if (unlikely(ret != 0))
++		goto err_unreserve;
++
++	virtual = ttm_kmap_obj_virtual(&map, &dummy);
++
++	memcpy(srf->snooper.image, virtual, 64*64*4);
++	srf->snooper.age++;
++
++	/* we can't call this function from this function since execbuf has
++	 * reserved fifo space.
++	 *
++	 * if (srf->snooper.crtc)
++	 *	vmw_ldu_crtc_cursor_update_image(dev_priv,
++	 *					 srf->snooper.image, 64, 64,
++	 *					 du->hotspot_x, du->hotspot_y);
++	 */
++
++	ttm_bo_kunmap(&map);
++err_unreserve:
++	ttm_bo_unreserve(bo);
++}
++
++void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
++{
++	struct drm_device *dev = dev_priv->dev;
++	struct vmw_display_unit *du;
++	struct drm_crtc *crtc;
++
++	mutex_lock(&dev->mode_config.mutex);
++
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		du = vmw_crtc_to_du(crtc);
++		if (!du->cursor_surface ||
++		    du->cursor_age == du->cursor_surface->snooper.age)
++			continue;
++
++		du->cursor_age = du->cursor_surface->snooper.age;
++		vmw_cursor_update_image(dev_priv,
++					du->cursor_surface->snooper.image,
++					64, 64, du->hotspot_x, du->hotspot_y);
++	}
++
++	mutex_unlock(&dev->mode_config.mutex);
++}
++
++/*
++ * Generic framebuffer code
++ */
++
++int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
++				  struct drm_file *file_priv,
++				  unsigned int *handle)
++{
++	if (handle)
++		handle = 0;
++
++	return 0;
++}
++
++/*
++ * Surface framebuffer code
++ */
++
++#define vmw_framebuffer_to_vfbs(x) \
++	container_of(x, struct vmw_framebuffer_surface, base.base)
++
++struct vmw_framebuffer_surface {
++	struct vmw_framebuffer base;
++	struct vmw_surface *surface;
++	struct delayed_work d_work;
++	struct mutex work_lock;
++	bool present_fs;
++};
++
++void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
++{
++	struct vmw_framebuffer_surface *vfb =
++		vmw_framebuffer_to_vfbs(framebuffer);
++
++	cancel_delayed_work_sync(&vfb->d_work);
++	drm_framebuffer_cleanup(framebuffer);
++	vmw_surface_unreference(&vfb->surface);
++
++	kfree(framebuffer);
++}
++
++static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
++{
++	struct delayed_work *d_work =
++		container_of(work, struct delayed_work, work);
++	struct vmw_framebuffer_surface *vfbs =
++		container_of(d_work, struct vmw_framebuffer_surface, d_work);
++	struct vmw_surface *surf = vfbs->surface;
++	struct drm_framebuffer *framebuffer = &vfbs->base.base;
++	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
++
++	struct {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdPresent body;
++		SVGA3dCopyRect cr;
++	} *cmd;
++
++	mutex_lock(&vfbs->work_lock);
++	if (!vfbs->present_fs)
++		goto out_unlock;
++
++	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
++	if (unlikely(cmd == NULL))
++		goto out_resched;
++
++	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
++	cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
++	cmd->body.sid = cpu_to_le32(surf->res.id);
++	cmd->cr.x = cpu_to_le32(0);
++	cmd->cr.y = cpu_to_le32(0);
++	cmd->cr.srcx = cmd->cr.x;
++	cmd->cr.srcy = cmd->cr.y;
++	cmd->cr.w = cpu_to_le32(framebuffer->width);
++	cmd->cr.h = cpu_to_le32(framebuffer->height);
++	vfbs->present_fs = false;
++	vmw_fifo_commit(dev_priv, sizeof(*cmd));
++out_resched:
++	/**
++	 * Will not re-add if already pending.
++	 */
++	schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
++out_unlock:
++	mutex_unlock(&vfbs->work_lock);
++}
++
++
++int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
++				  unsigned flags, unsigned color,
++				  struct drm_clip_rect *clips,
++				  unsigned num_clips)
++{
++	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
++	struct vmw_framebuffer_surface *vfbs =
++		vmw_framebuffer_to_vfbs(framebuffer);
++	struct vmw_surface *surf = vfbs->surface;
++	struct drm_clip_rect norect;
++	SVGA3dCopyRect *cr;
++	int i, inc = 1;
++
++	struct {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdPresent body;
++		SVGA3dCopyRect cr;
++	} *cmd;
++
++	if (!num_clips ||
++	    !(dev_priv->fifo.capabilities &
++	      SVGA_FIFO_CAP_SCREEN_OBJECT)) {
++		int ret;
++
++		mutex_lock(&vfbs->work_lock);
++		vfbs->present_fs = true;
++		ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
++		mutex_unlock(&vfbs->work_lock);
++		if (ret) {
++			/**
++			 * No work pending, Force immediate present.
++			 */
++			vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
++		}
++		return 0;
++	}
++
++	if (!num_clips) {
++		num_clips = 1;
++		clips = &norect;
++		norect.x1 = norect.y1 = 0;
++		norect.x2 = framebuffer->width;
++		norect.y2 = framebuffer->height;
++	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
++		num_clips /= 2;
++		inc = 2; /* skip source rects */
++	}
++
++	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
++	if (unlikely(cmd == NULL)) {
++		DRM_ERROR("Fifo reserve failed.\n");
++		return -ENOMEM;
++	}
++
++	memset(cmd, 0, sizeof(*cmd));
++
++	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
++	cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
++	cmd->body.sid = cpu_to_le32(surf->res.id);
++
++	for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
++		cr->x = cpu_to_le16(clips->x1);
++		cr->y = cpu_to_le16(clips->y1);
++		cr->srcx = cr->x;
++		cr->srcy = cr->y;
++		cr->w = cpu_to_le16(clips->x2 - clips->x1);
++		cr->h = cpu_to_le16(clips->y2 - clips->y1);
++	}
++
++	vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
++
++	return 0;
++}
++
++static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
++	.destroy = vmw_framebuffer_surface_destroy,
++	.dirty = vmw_framebuffer_surface_dirty,
++	.create_handle = vmw_framebuffer_create_handle,
++};
++
++int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
++				    struct vmw_surface *surface,
++				    struct vmw_framebuffer **out,
++				    unsigned width, unsigned height)
++
++{
++	struct drm_device *dev = dev_priv->dev;
++	struct vmw_framebuffer_surface *vfbs;
++	int ret;
++
++	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
++	if (!vfbs) {
++		ret = -ENOMEM;
++		goto out_err1;
++	}
++
++	ret = drm_framebuffer_init(dev, &vfbs->base.base,
++				   &vmw_framebuffer_surface_funcs);
++	if (ret)
++		goto out_err2;
++
++	if (!vmw_surface_reference(surface)) {
++		DRM_ERROR("failed to reference surface %p\n", surface);
++		goto out_err3;
++	}
++
++	/* XXX get the first 3 from the surface info */
++	vfbs->base.base.bits_per_pixel = 32;
++	vfbs->base.base.pitch = width * 32 / 4;
++	vfbs->base.base.depth = 24;
++	vfbs->base.base.width = width;
++	vfbs->base.base.height = height;
++	vfbs->base.pin = NULL;
++	vfbs->base.unpin = NULL;
++	vfbs->surface = surface;
++	mutex_init(&vfbs->work_lock);
++	INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
++	*out = &vfbs->base;
++
++	return 0;
++
++out_err3:
++	drm_framebuffer_cleanup(&vfbs->base.base);
++out_err2:
++	kfree(vfbs);
++out_err1:
++	return ret;
++}
++
++/*
++ * Dmabuf framebuffer code
++ */
++
++#define vmw_framebuffer_to_vfbd(x) \
++	container_of(x, struct vmw_framebuffer_dmabuf, base.base)
++
++struct vmw_framebuffer_dmabuf {
++	struct vmw_framebuffer base;
++	struct vmw_dma_buffer *buffer;
++};
++
++void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
++{
++	struct vmw_framebuffer_dmabuf *vfbd =
++		vmw_framebuffer_to_vfbd(framebuffer);
++
++	drm_framebuffer_cleanup(framebuffer);
++	vmw_dmabuf_unreference(&vfbd->buffer);
++
++	kfree(vfbd);
++}
++
++int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
++				 unsigned flags, unsigned color,
++				 struct drm_clip_rect *clips,
++				 unsigned num_clips)
++{
++	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
++	struct drm_clip_rect norect;
++	struct {
++		uint32_t header;
++		SVGAFifoCmdUpdate body;
++	} *cmd;
++	int i, increment = 1;
++
++	if (!num_clips) {
++		num_clips = 1;
++		clips = &norect;
++		norect.x1 = norect.y1 = 0;
++		norect.x2 = framebuffer->width;
++		norect.y2 = framebuffer->height;
++	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
++		num_clips /= 2;
++		increment = 2;
++	}
++
++	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
++	if (unlikely(cmd == NULL)) {
++		DRM_ERROR("Fifo reserve failed.\n");
++		return -ENOMEM;
++	}
++
++	for (i = 0; i < num_clips; i++, clips += increment) {
++		cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
++		cmd[i].body.x = cpu_to_le32(clips->x1);
++		cmd[i].body.y = cpu_to_le32(clips->y1);
++		cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
++		cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
++	}
++
++	vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
++
++	return 0;
++}
++
++static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
++	.destroy = vmw_framebuffer_dmabuf_destroy,
++	.dirty = vmw_framebuffer_dmabuf_dirty,
++	.create_handle = vmw_framebuffer_create_handle,
++};
++
++static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
++{
++	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
++	struct vmw_framebuffer_dmabuf *vfbd =
++		vmw_framebuffer_to_vfbd(&vfb->base);
++	int ret;
++
++	vmw_overlay_pause_all(dev_priv);
++
++	ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
++
++	if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
++		vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
++
++		vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
++		vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
++		vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
++		vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
++		vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
++		vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
++		vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
++		vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
++	} else
++		WARN_ON(true);
++
++	vmw_overlay_resume_all(dev_priv);
++
++	return 0;
++}
++
++static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
++{
++	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
++	struct vmw_framebuffer_dmabuf *vfbd =
++		vmw_framebuffer_to_vfbd(&vfb->base);
++
++	if (!vfbd->buffer) {
++		WARN_ON(!vfbd->buffer);
++		return 0;
++	}
++
++	return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
++}
++
++int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
++				   struct vmw_dma_buffer *dmabuf,
++				   struct vmw_framebuffer **out,
++				   unsigned width, unsigned height)
++
++{
++	struct drm_device *dev = dev_priv->dev;
++	struct vmw_framebuffer_dmabuf *vfbd;
++	int ret;
++
++	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
++	if (!vfbd) {
++		ret = -ENOMEM;
++		goto out_err1;
++	}
++
++	ret = drm_framebuffer_init(dev, &vfbd->base.base,
++				   &vmw_framebuffer_dmabuf_funcs);
++	if (ret)
++		goto out_err2;
++
++	if (!vmw_dmabuf_reference(dmabuf)) {
++		DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
++		goto out_err3;
++	}
++
++	/* XXX get the first 3 from the surface info */
++	vfbd->base.base.bits_per_pixel = 32;
++	vfbd->base.base.pitch = width * 32 / 4;
++	vfbd->base.base.depth = 24;
++	vfbd->base.base.width = width;
++	vfbd->base.base.height = height;
++	vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
++	vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
++	vfbd->buffer = dmabuf;
++	*out = &vfbd->base;
++
++	return 0;
++
++out_err3:
++	drm_framebuffer_cleanup(&vfbd->base.base);
++out_err2:
++	kfree(vfbd);
++out_err1:
++	return ret;
++}
++
++/*
++ * Generic Kernel modesetting functions
++ */
++
++static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
++						 struct drm_file *file_priv,
++						 struct drm_mode_fb_cmd *mode_cmd)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++	struct vmw_framebuffer *vfb = NULL;
++	struct vmw_surface *surface = NULL;
++	struct vmw_dma_buffer *bo = NULL;
++	int ret;
++
++	ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
++					     mode_cmd->handle, &surface);
++	if (ret)
++		goto try_dmabuf;
++
++	if (!surface->scanout)
++		goto err_not_scanout;
++
++	ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
++					      mode_cmd->width, mode_cmd->height);
++
++	/* vmw_user_surface_lookup takes one ref so does new_fb */
++	vmw_surface_unreference(&surface);
++
++	if (ret) {
++		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
++		return NULL;
++	}
++	return &vfb->base;
++
++try_dmabuf:
++	DRM_INFO("%s: trying buffer\n", __func__);
++
++	ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
++	if (ret) {
++		DRM_ERROR("failed to find buffer: %i\n", ret);
++		return NULL;
++	}
++
++	ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
++					     mode_cmd->width, mode_cmd->height);
++
++	/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
++	vmw_dmabuf_unreference(&bo);
++
++	if (ret) {
++		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
++		return NULL;
++	}
++
++	return &vfb->base;
++
++err_not_scanout:
++	DRM_ERROR("surface not marked as scanout\n");
++	/* vmw_user_surface_lookup takes one ref */
++	vmw_surface_unreference(&surface);
++
++	return NULL;
++}
++
++static int vmw_kms_fb_changed(struct drm_device *dev)
++{
++	return 0;
++}
++
++static struct drm_mode_config_funcs vmw_kms_funcs = {
++	.fb_create = vmw_kms_fb_create,
++	.fb_changed = vmw_kms_fb_changed,
++};
++
++int vmw_kms_init(struct vmw_private *dev_priv)
++{
++	struct drm_device *dev = dev_priv->dev;
++	int ret;
++
++	drm_mode_config_init(dev);
++	dev->mode_config.funcs = &vmw_kms_funcs;
++	dev->mode_config.min_width = 1;
++	dev->mode_config.min_height = 1;
++	dev->mode_config.max_width = dev_priv->fb_max_width;
++	dev->mode_config.max_height = dev_priv->fb_max_height;
++
++	ret = vmw_kms_init_legacy_display_system(dev_priv);
++
++	return 0;
++}
++
++int vmw_kms_close(struct vmw_private *dev_priv)
++{
++	/*
++	 * Docs says we should take the lock before calling this function
++	 * but since it destroys encoders and our destructor calls
++	 * drm_encoder_cleanup which takes the lock we deadlock.
++	 */
++	drm_mode_config_cleanup(dev_priv->dev);
++	vmw_kms_close_legacy_display_system(dev_priv);
++	return 0;
++}
++
++int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
++				struct drm_file *file_priv)
++{
++	struct drm_vmw_cursor_bypass_arg *arg = data;
++	struct vmw_display_unit *du;
++	struct drm_mode_object *obj;
++	struct drm_crtc *crtc;
++	int ret = 0;
++
++
++	mutex_lock(&dev->mode_config.mutex);
++	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
++
++		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++			du = vmw_crtc_to_du(crtc);
++			du->hotspot_x = arg->xhot;
++			du->hotspot_y = arg->yhot;
++		}
++
++		mutex_unlock(&dev->mode_config.mutex);
++		return 0;
++	}
++
++	obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
++	if (!obj) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	crtc = obj_to_crtc(obj);
++	du = vmw_crtc_to_du(crtc);
++
++	du->hotspot_x = arg->xhot;
++	du->hotspot_y = arg->yhot;
++
++out:
++	mutex_unlock(&dev->mode_config.mutex);
++
++	return ret;
++}
++
++int vmw_kms_save_vga(struct vmw_private *vmw_priv)
++{
++	/*
++	 * setup a single multimon monitor with the size
++	 * of 0x0, this stops the UI from resizing when we
++	 * change the framebuffer size
++	 */
++	if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
++		vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
++		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
++	}
++
++	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
++	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
++	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
++	vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
++	vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
++	vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
++	vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
++	vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
++
++	return 0;
++}
++
++int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
++{
++	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
++	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
++	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
++	vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
++	vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
++	vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
++	vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
++	vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
++
++	/* TODO check for multimon */
++	vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+new file mode 100644
+index 0000000..8b95249
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+@@ -0,0 +1,102 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#ifndef VMWGFX_KMS_H_
++#define VMWGFX_KMS_H_
++
++#include "drmP.h"
++#include "vmwgfx_drv.h"
++
++
++#define vmw_framebuffer_to_vfb(x) \
++	container_of(x, struct vmw_framebuffer, base)
++
++/**
++ * Base class for framebuffers
++ *
++ * @pin is called the when ever a crtc uses this framebuffer
++ * @unpin is called
++ */
++struct vmw_framebuffer {
++	struct drm_framebuffer base;
++	int (*pin)(struct vmw_framebuffer *fb);
++	int (*unpin)(struct vmw_framebuffer *fb);
++};
++
++
++#define vmw_crtc_to_du(x) \
++	container_of(x, struct vmw_display_unit, crtc)
++
++/*
++ * Basic cursor manipulation
++ */
++int vmw_cursor_update_image(struct vmw_private *dev_priv,
++			    u32 *image, u32 width, u32 height,
++			    u32 hotspotX, u32 hotspotY);
++void vmw_cursor_update_position(struct vmw_private *dev_priv,
++				bool show, int x, int y);
++
++/**
++ * Base class display unit.
++ *
++ * Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
++ * so the display unit is all of them at the same time. This is true for both
++ * legacy multimon and screen objects.
++ */
++struct vmw_display_unit {
++	struct drm_crtc crtc;
++	struct drm_encoder encoder;
++	struct drm_connector connector;
++
++	struct vmw_surface *cursor_surface;
++	struct vmw_dma_buffer *cursor_dmabuf;
++	size_t cursor_age;
++
++	int cursor_x;
++	int cursor_y;
++
++	int hotspot_x;
++	int hotspot_y;
++
++	unsigned unit;
++};
++
++/*
++ * Shared display unit functions - vmwgfx_kms.c
++ */
++void vmw_display_unit_cleanup(struct vmw_display_unit *du);
++int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
++			   uint32_t handle, uint32_t width, uint32_t height);
++int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
++
++/*
++ * Legacy display unit functions - vmwgfx_ldu.h
++ */
++int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
++int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
++
++#endif
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+new file mode 100644
+index 0000000..9089159
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+@@ -0,0 +1,516 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "vmwgfx_kms.h"
++
++#define vmw_crtc_to_ldu(x) \
++	container_of(x, struct vmw_legacy_display_unit, base.crtc)
++#define vmw_encoder_to_ldu(x) \
++	container_of(x, struct vmw_legacy_display_unit, base.encoder)
++#define vmw_connector_to_ldu(x) \
++	container_of(x, struct vmw_legacy_display_unit, base.connector)
++
++struct vmw_legacy_display {
++	struct list_head active;
++
++	unsigned num_active;
++
++	struct vmw_framebuffer *fb;
++};
++
++/**
++ * Display unit using the legacy register interface.
++ */
++struct vmw_legacy_display_unit {
++	struct vmw_display_unit base;
++
++	struct list_head active;
++
++	unsigned unit;
++};
++
++static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
++{
++	list_del_init(&ldu->active);
++	vmw_display_unit_cleanup(&ldu->base);
++	kfree(ldu);
++}
++
++
++/*
++ * Legacy Display Unit CRTC functions
++ */
++
++static void vmw_ldu_crtc_save(struct drm_crtc *crtc)
++{
++}
++
++static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
++{
++}
++
++static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
++				   u16 *r, u16 *g, u16 *b,
++				   uint32_t size)
++{
++}
++
++static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
++{
++	vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
++}
++
++static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
++{
++	struct vmw_legacy_display *lds = dev_priv->ldu_priv;
++	struct vmw_legacy_display_unit *entry;
++	struct drm_crtc *crtc;
++	int i = 0;
++
++	/* to stop the screen from changing size on resize */
++	vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
++	for (i = 0; i < lds->num_active; i++) {
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
++	}
++
++	/* Now set the mode */
++	vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
++	i = 0;
++	list_for_each_entry(entry, &lds->active, active) {
++		crtc = &entry->base.crtc;
++
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
++		vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
++
++		i++;
++	}
++
++	return 0;
++}
++
++static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
++			      struct vmw_legacy_display_unit *ldu)
++{
++	struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
++	if (list_empty(&ldu->active))
++		return 0;
++
++	list_del_init(&ldu->active);
++	if (--(ld->num_active) == 0) {
++		BUG_ON(!ld->fb);
++		if (ld->fb->unpin)
++			ld->fb->unpin(ld->fb);
++		ld->fb = NULL;
++	}
++
++	return 0;
++}
++
++static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
++			      struct vmw_legacy_display_unit *ldu,
++			      struct vmw_framebuffer *vfb)
++{
++	struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
++	struct vmw_legacy_display_unit *entry;
++	struct list_head *at;
++
++	if (!list_empty(&ldu->active))
++		return 0;
++
++	at = &ld->active;
++	list_for_each_entry(entry, &ld->active, active) {
++		if (entry->unit > ldu->unit)
++			break;
++
++		at = &entry->active;
++	}
++
++	list_add(&ldu->active, at);
++	if (ld->num_active++ == 0) {
++		BUG_ON(ld->fb);
++		if (vfb->pin)
++			vfb->pin(vfb);
++		ld->fb = vfb;
++	}
++
++	return 0;
++}
++
++static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
++{
++	struct vmw_private *dev_priv;
++	struct vmw_legacy_display_unit *ldu;
++	struct drm_connector *connector;
++	struct drm_display_mode *mode;
++	struct drm_encoder *encoder;
++	struct vmw_framebuffer *vfb;
++	struct drm_framebuffer *fb;
++	struct drm_crtc *crtc;
++
++	if (!set)
++		return -EINVAL;
++
++	if (!set->crtc)
++		return -EINVAL;
++
++	/* get the ldu */
++	crtc = set->crtc;
++	ldu = vmw_crtc_to_ldu(crtc);
++	vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
++	dev_priv = vmw_priv(crtc->dev);
++
++	if (set->num_connectors > 1) {
++		DRM_ERROR("to many connectors\n");
++		return -EINVAL;
++	}
++
++	if (set->num_connectors == 1 &&
++	    set->connectors[0] != &ldu->base.connector) {
++		DRM_ERROR("connector doesn't match %p %p\n",
++			set->connectors[0], &ldu->base.connector);
++		return -EINVAL;
++	}
++
++	/* ldu only supports one fb active at the time */
++	if (dev_priv->ldu_priv->fb && vfb &&
++	    dev_priv->ldu_priv->fb != vfb) {
++		DRM_ERROR("Multiple framebuffers not supported\n");
++		return -EINVAL;
++	}
++
++	/* since they always map one to one these are safe */
++	connector = &ldu->base.connector;
++	encoder = &ldu->base.encoder;
++
++	/* should we turn the crtc off? */
++	if (set->num_connectors == 0 || !set->mode || !set->fb) {
++
++		connector->encoder = NULL;
++		encoder->crtc = NULL;
++		crtc->fb = NULL;
++
++		vmw_ldu_del_active(dev_priv, ldu);
++
++		vmw_ldu_commit_list(dev_priv);
++
++		return 0;
++	}
++
++
++	/* we now know we want to set a mode */
++	mode = set->mode;
++	fb = set->fb;
++
++	if (set->x + mode->hdisplay > fb->width ||
++	    set->y + mode->vdisplay > fb->height) {
++		DRM_ERROR("set outside of framebuffer\n");
++		return -EINVAL;
++	}
++
++	vmw_fb_off(dev_priv);
++
++	crtc->fb = fb;
++	encoder->crtc = crtc;
++	connector->encoder = encoder;
++	crtc->x = set->x;
++	crtc->y = set->y;
++	crtc->mode = *mode;
++
++	vmw_ldu_add_active(dev_priv, ldu, vfb);
++
++	vmw_ldu_commit_list(dev_priv);
++
++	return 0;
++}
++
++static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
++	.save = vmw_ldu_crtc_save,
++	.restore = vmw_ldu_crtc_restore,
++	.cursor_set = vmw_du_crtc_cursor_set,
++	.cursor_move = vmw_du_crtc_cursor_move,
++	.gamma_set = vmw_ldu_crtc_gamma_set,
++	.destroy = vmw_ldu_crtc_destroy,
++	.set_config = vmw_ldu_crtc_set_config,
++};
++
++/*
++ * Legacy Display Unit encoder functions
++ */
++
++static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
++{
++	vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
++}
++
++static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
++	.destroy = vmw_ldu_encoder_destroy,
++};
++
++/*
++ * Legacy Display Unit connector functions
++ */
++
++static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode)
++{
++}
++
++static void vmw_ldu_connector_save(struct drm_connector *connector)
++{
++}
++
++static void vmw_ldu_connector_restore(struct drm_connector *connector)
++{
++}
++
++static enum drm_connector_status
++	vmw_ldu_connector_detect(struct drm_connector *connector)
++{
++	/* XXX vmwctrl should control connection status */
++	if (vmw_connector_to_ldu(connector)->base.unit == 0)
++		return connector_status_connected;
++	return connector_status_disconnected;
++}
++
++static struct drm_display_mode vmw_ldu_connector_builtin[] = {
++	/* 640x480 at 60Hz */
++	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
++		   752, 800, 0, 480, 489, 492, 525, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 800x600 at 60Hz */
++	{ DRM_MODE("800x600",
++		   DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
++		   40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628,
++		   0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1024x768 at 60Hz */
++	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
++		   1184, 1344, 0, 768, 771, 777, 806, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1152x864 at 75Hz */
++	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
++		   1344, 1600, 0, 864, 865, 868, 900, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x768 at 60Hz */
++	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
++		   1472, 1664, 0, 768, 771, 778, 798, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x800 at 60Hz */
++	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
++		   1480, 1680, 0, 800, 803, 809, 831, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
++	/* 1280x960 at 60Hz */
++	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
++		   1488, 1800, 0, 960, 961, 964, 1000, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1280x1024 at 60Hz */
++	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
++		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1360x768 at 60Hz */
++	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
++		   1536, 1792, 0, 768, 771, 777, 795, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1440x1050 at 60Hz */
++	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
++		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1440x900 at 60Hz */
++	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
++		   1672, 1904, 0, 900, 903, 909, 934, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1600x1200 at 60Hz */
++	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
++		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
++		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1680x1050 at 60Hz */
++	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
++		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1792x1344 at 60Hz */
++	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
++		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1853x1392 at 60Hz */
++	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
++		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1200 at 60Hz */
++	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
++		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 1920x1440 at 60Hz */
++	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
++		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* 2560x1600 at 60Hz */
++	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
++		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
++		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
++	/* Terminate */
++	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
++};
++
++static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
++					uint32_t max_width, uint32_t max_height)
++{
++	struct drm_device *dev = connector->dev;
++	struct drm_display_mode *mode = NULL;
++	int i;
++
++	for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
++		if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
++		    vmw_ldu_connector_builtin[i].vdisplay > max_height)
++			continue;
++
++		mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
++		if (!mode)
++			return 0;
++		mode->vrefresh = drm_mode_vrefresh(mode);
++
++		drm_mode_probed_add(connector, mode);
++	}
++
++	drm_mode_connector_list_update(connector);
++
++	return 1;
++}
++
++static int vmw_ldu_connector_set_property(struct drm_connector *connector,
++					  struct drm_property *property,
++					  uint64_t val)
++{
++	return 0;
++}
++
++static void vmw_ldu_connector_destroy(struct drm_connector *connector)
++{
++	vmw_ldu_destroy(vmw_connector_to_ldu(connector));
++}
++
++static struct drm_connector_funcs vmw_legacy_connector_funcs = {
++	.dpms = vmw_ldu_connector_dpms,
++	.save = vmw_ldu_connector_save,
++	.restore = vmw_ldu_connector_restore,
++	.detect = vmw_ldu_connector_detect,
++	.fill_modes = vmw_ldu_connector_fill_modes,
++	.set_property = vmw_ldu_connector_set_property,
++	.destroy = vmw_ldu_connector_destroy,
++};
++
++static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
++{
++	struct vmw_legacy_display_unit *ldu;
++	struct drm_device *dev = dev_priv->dev;
++	struct drm_connector *connector;
++	struct drm_encoder *encoder;
++	struct drm_crtc *crtc;
++
++	ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
++	if (!ldu)
++		return -ENOMEM;
++
++	ldu->unit = unit;
++	crtc = &ldu->base.crtc;
++	encoder = &ldu->base.encoder;
++	connector = &ldu->base.connector;
++
++	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
++			   DRM_MODE_CONNECTOR_LVDS);
++	/* Initial status */
++	if (unit == 0)
++		connector->status = connector_status_connected;
++	else
++		connector->status = connector_status_disconnected;
++
++	drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
++			 DRM_MODE_ENCODER_LVDS);
++	drm_mode_connector_attach_encoder(connector, encoder);
++	encoder->possible_crtcs = (1 << unit);
++	encoder->possible_clones = 0;
++
++	INIT_LIST_HEAD(&ldu->active);
++
++	drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
++
++	drm_connector_attach_property(connector,
++				      dev->mode_config.dirty_info_property,
++				      1);
++
++	return 0;
++}
++
++int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
++{
++	if (dev_priv->ldu_priv) {
++		DRM_INFO("ldu system already on\n");
++		return -EINVAL;
++	}
++
++	dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
++
++	if (!dev_priv->ldu_priv)
++		return -ENOMEM;
++
++	INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
++	dev_priv->ldu_priv->num_active = 0;
++	dev_priv->ldu_priv->fb = NULL;
++
++	drm_mode_create_dirty_info_property(dev_priv->dev);
++
++	vmw_ldu_init(dev_priv, 0);
++	vmw_ldu_init(dev_priv, 1);
++	vmw_ldu_init(dev_priv, 2);
++	vmw_ldu_init(dev_priv, 3);
++	vmw_ldu_init(dev_priv, 4);
++	vmw_ldu_init(dev_priv, 5);
++	vmw_ldu_init(dev_priv, 6);
++	vmw_ldu_init(dev_priv, 7);
++
++	return 0;
++}
++
++int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
++{
++	if (!dev_priv->ldu_priv)
++		return -ENOSYS;
++
++	BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
++
++	kfree(dev_priv->ldu_priv);
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+new file mode 100644
+index 0000000..5b6eabe
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+@@ -0,0 +1,625 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++
++#include "drmP.h"
++#include "vmwgfx_drv.h"
++
++#include "ttm/ttm_placement.h"
++
++#include "svga_overlay.h"
++#include "svga_escape.h"
++
++#define VMW_MAX_NUM_STREAMS 1
++
++struct vmw_stream {
++	struct vmw_dma_buffer *buf;
++	bool claimed;
++	bool paused;
++	struct drm_vmw_control_stream_arg saved;
++};
++
++/**
++ * Overlay control
++ */
++struct vmw_overlay {
++	/*
++	 * Each stream is a single overlay. In Xv these are called ports.
++	 */
++	struct mutex mutex;
++	struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
++};
++
++static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	return dev_priv ? dev_priv->overlay_priv : NULL;
++}
++
++struct vmw_escape_header {
++	uint32_t cmd;
++	SVGAFifoCmdEscape body;
++};
++
++struct vmw_escape_video_flush {
++	struct vmw_escape_header escape;
++	SVGAEscapeVideoFlush flush;
++};
++
++static inline void fill_escape(struct vmw_escape_header *header,
++			       uint32_t size)
++{
++	header->cmd = SVGA_CMD_ESCAPE;
++	header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
++	header->body.size = size;
++}
++
++static inline void fill_flush(struct vmw_escape_video_flush *cmd,
++			      uint32_t stream_id)
++{
++	fill_escape(&cmd->escape, sizeof(cmd->flush));
++	cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
++	cmd->flush.streamId = stream_id;
++}
++
++/**
++ * Pin or unpin a buffer in vram.
++ *
++ * @dev_priv:  Driver private.
++ * @buf:  DMA buffer to pin or unpin.
++ * @pin:  Pin buffer in vram if true.
++ * @interruptible:  Use interruptible wait.
++ *
++ * Takes the current masters ttm lock in read.
++ *
++ * Returns
++ * -ERESTARTSYS if interrupted by a signal.
++ */
++static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
++				  struct vmw_dma_buffer *buf,
++				  bool pin, bool interruptible)
++{
++	struct ttm_buffer_object *bo = &buf->base;
++	struct ttm_placement *overlay_placement = &vmw_vram_placement;
++	int ret;
++
++	ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
++	if (unlikely(ret != 0))
++		return ret;
++
++	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
++	if (unlikely(ret != 0))
++		goto err;
++
++	if (pin)
++		overlay_placement = &vmw_vram_ne_placement;
++
++	ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
++
++	ttm_bo_unreserve(bo);
++
++err:
++	ttm_read_unlock(&dev_priv->active_master->lock);
++
++	return ret;
++}
++
++/**
++ * Send put command to hw.
++ *
++ * Returns
++ * -ERESTARTSYS if interrupted by a signal.
++ */
++static int vmw_overlay_send_put(struct vmw_private *dev_priv,
++				struct vmw_dma_buffer *buf,
++				struct drm_vmw_control_stream_arg *arg,
++				bool interruptible)
++{
++	struct {
++		struct vmw_escape_header escape;
++		struct {
++			struct {
++				uint32_t cmdType;
++				uint32_t streamId;
++			} header;
++			struct {
++				uint32_t registerId;
++				uint32_t value;
++			} items[SVGA_VIDEO_PITCH_3 + 1];
++		} body;
++		struct vmw_escape_video_flush flush;
++	} *cmds;
++	uint32_t offset;
++	int i, ret;
++
++	for (;;) {
++		cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
++		if (cmds)
++			break;
++
++		ret = vmw_fallback_wait(dev_priv, false, true, 0,
++					interruptible, 3*HZ);
++		if (interruptible && ret == -ERESTARTSYS)
++			return ret;
++		else
++			BUG_ON(ret != 0);
++	}
++
++	fill_escape(&cmds->escape, sizeof(cmds->body));
++	cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
++	cmds->body.header.streamId = arg->stream_id;
++
++	for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
++		cmds->body.items[i].registerId = i;
++
++	offset = buf->base.offset + arg->offset;
++
++	cmds->body.items[SVGA_VIDEO_ENABLED].value     = true;
++	cmds->body.items[SVGA_VIDEO_FLAGS].value       = arg->flags;
++	cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
++	cmds->body.items[SVGA_VIDEO_FORMAT].value      = arg->format;
++	cmds->body.items[SVGA_VIDEO_COLORKEY].value    = arg->color_key;
++	cmds->body.items[SVGA_VIDEO_SIZE].value        = arg->size;
++	cmds->body.items[SVGA_VIDEO_WIDTH].value       = arg->width;
++	cmds->body.items[SVGA_VIDEO_HEIGHT].value      = arg->height;
++	cmds->body.items[SVGA_VIDEO_SRC_X].value       = arg->src.x;
++	cmds->body.items[SVGA_VIDEO_SRC_Y].value       = arg->src.y;
++	cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value   = arg->src.w;
++	cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value  = arg->src.h;
++	cmds->body.items[SVGA_VIDEO_DST_X].value       = arg->dst.x;
++	cmds->body.items[SVGA_VIDEO_DST_Y].value       = arg->dst.y;
++	cmds->body.items[SVGA_VIDEO_DST_WIDTH].value   = arg->dst.w;
++	cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value  = arg->dst.h;
++	cmds->body.items[SVGA_VIDEO_PITCH_1].value     = arg->pitch[0];
++	cmds->body.items[SVGA_VIDEO_PITCH_2].value     = arg->pitch[1];
++	cmds->body.items[SVGA_VIDEO_PITCH_3].value     = arg->pitch[2];
++
++	fill_flush(&cmds->flush, arg->stream_id);
++
++	vmw_fifo_commit(dev_priv, sizeof(*cmds));
++
++	return 0;
++}
++
++/**
++ * Send stop command to hw.
++ *
++ * Returns
++ * -ERESTARTSYS if interrupted by a signal.
++ */
++static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
++				 uint32_t stream_id,
++				 bool interruptible)
++{
++	struct {
++		struct vmw_escape_header escape;
++		SVGAEscapeVideoSetRegs body;
++		struct vmw_escape_video_flush flush;
++	} *cmds;
++	int ret;
++
++	for (;;) {
++		cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
++		if (cmds)
++			break;
++
++		ret = vmw_fallback_wait(dev_priv, false, true, 0,
++					interruptible, 3*HZ);
++		if (interruptible && ret == -ERESTARTSYS)
++			return ret;
++		else
++			BUG_ON(ret != 0);
++	}
++
++	fill_escape(&cmds->escape, sizeof(cmds->body));
++	cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
++	cmds->body.header.streamId = stream_id;
++	cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
++	cmds->body.items[0].value = false;
++	fill_flush(&cmds->flush, stream_id);
++
++	vmw_fifo_commit(dev_priv, sizeof(*cmds));
++
++	return 0;
++}
++
++/**
++ * Stop or pause a stream.
++ *
++ * If the stream is paused the no evict flag is removed from the buffer
++ * but left in vram. This allows for instance mode_set to evict it
++ * should it need to.
++ *
++ * The caller must hold the overlay lock.
++ *
++ * @stream_id which stream to stop/pause.
++ * @pause true to pause, false to stop completely.
++ */
++static int vmw_overlay_stop(struct vmw_private *dev_priv,
++			    uint32_t stream_id, bool pause,
++			    bool interruptible)
++{
++	struct vmw_overlay *overlay = dev_priv->overlay_priv;
++	struct vmw_stream *stream = &overlay->stream[stream_id];
++	int ret;
++
++	/* no buffer attached the stream is completely stopped */
++	if (!stream->buf)
++		return 0;
++
++	/* If the stream is paused this is already done */
++	if (!stream->paused) {
++		ret = vmw_overlay_send_stop(dev_priv, stream_id,
++					    interruptible);
++		if (ret)
++			return ret;
++
++		/* We just remove the NO_EVICT flag so no -ENOMEM */
++		ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
++					     interruptible);
++		if (interruptible && ret == -ERESTARTSYS)
++			return ret;
++		else
++			BUG_ON(ret != 0);
++	}
++
++	if (!pause) {
++		vmw_dmabuf_unreference(&stream->buf);
++		stream->paused = false;
++	} else {
++		stream->paused = true;
++	}
++
++	return 0;
++}
++
++/**
++ * Update a stream and send any put or stop fifo commands needed.
++ *
++ * The caller must hold the overlay lock.
++ *
++ * Returns
++ * -ENOMEM if buffer doesn't fit in vram.
++ * -ERESTARTSYS if interrupted.
++ */
++static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
++				     struct vmw_dma_buffer *buf,
++				     struct drm_vmw_control_stream_arg *arg,
++				     bool interruptible)
++{
++	struct vmw_overlay *overlay = dev_priv->overlay_priv;
++	struct vmw_stream *stream = &overlay->stream[arg->stream_id];
++	int ret = 0;
++
++	if (!buf)
++		return -EINVAL;
++
++	DRM_DEBUG("   %s: old %p, new %p, %spaused\n", __func__,
++		  stream->buf, buf, stream->paused ? "" : "not ");
++
++	if (stream->buf != buf) {
++		ret = vmw_overlay_stop(dev_priv, arg->stream_id,
++				       false, interruptible);
++		if (ret)
++			return ret;
++	} else if (!stream->paused) {
++		/* If the buffers match and not paused then just send
++		 * the put command, no need to do anything else.
++		 */
++		ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
++		if (ret == 0)
++			stream->saved = *arg;
++		else
++			BUG_ON(!interruptible);
++
++		return ret;
++	}
++
++	/* We don't start the old stream if we are interrupted.
++	 * Might return -ENOMEM if it can't fit the buffer in vram.
++	 */
++	ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
++	if (ret)
++		return ret;
++
++	ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
++	if (ret) {
++		/* This one needs to happen no matter what. We only remove
++		 * the NO_EVICT flag so this is safe from -ENOMEM.
++		 */
++		BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
++		return ret;
++	}
++
++	if (stream->buf != buf)
++		stream->buf = vmw_dmabuf_reference(buf);
++	stream->saved = *arg;
++
++	return 0;
++}
++
++/**
++ * Stop all streams.
++ *
++ * Used by the fb code when starting.
++ *
++ * Takes the overlay lock.
++ */
++int vmw_overlay_stop_all(struct vmw_private *dev_priv)
++{
++	struct vmw_overlay *overlay = dev_priv->overlay_priv;
++	int i, ret;
++
++	if (!overlay)
++		return 0;
++
++	mutex_lock(&overlay->mutex);
++
++	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
++		struct vmw_stream *stream = &overlay->stream[i];
++		if (!stream->buf)
++			continue;
++
++		ret = vmw_overlay_stop(dev_priv, i, false, false);
++		WARN_ON(ret != 0);
++	}
++
++	mutex_unlock(&overlay->mutex);
++
++	return 0;
++}
++
++/**
++ * Try to resume all paused streams.
++ *
++ * Used by the kms code after moving a new scanout buffer to vram.
++ *
++ * Takes the overlay lock.
++ */
++int vmw_overlay_resume_all(struct vmw_private *dev_priv)
++{
++	struct vmw_overlay *overlay = dev_priv->overlay_priv;
++	int i, ret;
++
++	if (!overlay)
++		return 0;
++
++	mutex_lock(&overlay->mutex);
++
++	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
++		struct vmw_stream *stream = &overlay->stream[i];
++		if (!stream->paused)
++			continue;
++
++		ret = vmw_overlay_update_stream(dev_priv, stream->buf,
++						&stream->saved, false);
++		if (ret != 0)
++			DRM_INFO("%s: *warning* failed to resume stream %i\n",
++				 __func__, i);
++	}
++
++	mutex_unlock(&overlay->mutex);
++
++	return 0;
++}
++
++/**
++ * Pauses all active streams.
++ *
++ * Used by the kms code when moving a new scanout buffer to vram.
++ *
++ * Takes the overlay lock.
++ */
++int vmw_overlay_pause_all(struct vmw_private *dev_priv)
++{
++	struct vmw_overlay *overlay = dev_priv->overlay_priv;
++	int i, ret;
++
++	if (!overlay)
++		return 0;
++
++	mutex_lock(&overlay->mutex);
++
++	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
++		if (overlay->stream[i].paused)
++			DRM_INFO("%s: *warning* stream %i already paused\n",
++				 __func__, i);
++		ret = vmw_overlay_stop(dev_priv, i, true, false);
++		WARN_ON(ret != 0);
++	}
++
++	mutex_unlock(&overlay->mutex);
++
++	return 0;
++}
++
++int vmw_overlay_ioctl(struct drm_device *dev, void *data,
++		      struct drm_file *file_priv)
++{
++	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct vmw_overlay *overlay = dev_priv->overlay_priv;
++	struct drm_vmw_control_stream_arg *arg =
++	    (struct drm_vmw_control_stream_arg *)data;
++	struct vmw_dma_buffer *buf;
++	struct vmw_resource *res;
++	int ret;
++
++	if (!overlay)
++		return -ENOSYS;
++
++	ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
++	if (ret)
++		return ret;
++
++	mutex_lock(&overlay->mutex);
++
++	if (!arg->enabled) {
++		ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
++		goto out_unlock;
++	}
++
++	ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
++	if (ret)
++		goto out_unlock;
++
++	ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
++
++	vmw_dmabuf_unreference(&buf);
++
++out_unlock:
++	mutex_unlock(&overlay->mutex);
++	vmw_resource_unreference(&res);
++
++	return ret;
++}
++
++int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
++{
++	if (!dev_priv->overlay_priv)
++		return 0;
++
++	return VMW_MAX_NUM_STREAMS;
++}
++
++int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
++{
++	struct vmw_overlay *overlay = dev_priv->overlay_priv;
++	int i, k;
++
++	if (!overlay)
++		return 0;
++
++	mutex_lock(&overlay->mutex);
++
++	for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
++		if (!overlay->stream[i].claimed)
++			k++;
++
++	mutex_unlock(&overlay->mutex);
++
++	return k;
++}
++
++int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
++{
++	struct vmw_overlay *overlay = dev_priv->overlay_priv;
++	int i;
++
++	if (!overlay)
++		return -ENOSYS;
++
++	mutex_lock(&overlay->mutex);
++
++	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
++
++		if (overlay->stream[i].claimed)
++			continue;
++
++		overlay->stream[i].claimed = true;
++		*out = i;
++		mutex_unlock(&overlay->mutex);
++		return 0;
++	}
++
++	mutex_unlock(&overlay->mutex);
++	return -ESRCH;
++}
++
++int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
++{
++	struct vmw_overlay *overlay = dev_priv->overlay_priv;
++
++	BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
++
++	if (!overlay)
++		return -ENOSYS;
++
++	mutex_lock(&overlay->mutex);
++
++	WARN_ON(!overlay->stream[stream_id].claimed);
++	vmw_overlay_stop(dev_priv, stream_id, false, false);
++	overlay->stream[stream_id].claimed = false;
++
++	mutex_unlock(&overlay->mutex);
++	return 0;
++}
++
++int vmw_overlay_init(struct vmw_private *dev_priv)
++{
++	struct vmw_overlay *overlay;
++	int i;
++
++	if (dev_priv->overlay_priv)
++		return -EINVAL;
++
++	if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
++	     (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
++		DRM_INFO("hardware doesn't support overlays\n");
++		return -ENOSYS;
++	}
++
++	overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
++	if (!overlay)
++		return -ENOMEM;
++
++	memset(overlay, 0, sizeof(*overlay));
++	mutex_init(&overlay->mutex);
++	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
++		overlay->stream[i].buf = NULL;
++		overlay->stream[i].paused = false;
++		overlay->stream[i].claimed = false;
++	}
++
++	dev_priv->overlay_priv = overlay;
++
++	return 0;
++}
++
++int vmw_overlay_close(struct vmw_private *dev_priv)
++{
++	struct vmw_overlay *overlay = dev_priv->overlay_priv;
++	bool forgotten_buffer = false;
++	int i;
++
++	if (!overlay)
++		return -ENOSYS;
++
++	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
++		if (overlay->stream[i].buf) {
++			forgotten_buffer = true;
++			vmw_overlay_stop(dev_priv, i, false, false);
++		}
++	}
++
++	WARN_ON(forgotten_buffer);
++
++	dev_priv->overlay_priv = NULL;
++	kfree(overlay);
++
++	return 0;
++}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+new file mode 100644
+index 0000000..9d0dd3a
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+@@ -0,0 +1,57 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++/**
++ * This file contains virtual hardware defines for kernel space.
++ */
++
++#ifndef _VMWGFX_REG_H_
++#define _VMWGFX_REG_H_
++
++#include <linux/types.h>
++
++#define VMWGFX_INDEX_PORT     0x0
++#define VMWGFX_VALUE_PORT     0x1
++#define VMWGFX_IRQSTATUS_PORT 0x8
++
++struct svga_guest_mem_descriptor {
++	__le32 ppn;
++	__le32 num_pages;
++};
++
++struct svga_fifo_cmd_fence {
++	__le32 fence;
++};
++
++#define SVGA_SYNC_GENERIC         1
++#define SVGA_SYNC_FIFOFULL        2
++
++#include "svga_types.h"
++
++#include "svga3d_reg.h"
++
++#endif
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+new file mode 100644
+index 0000000..f8fbbc6
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -0,0 +1,1187 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "vmwgfx_drv.h"
++#include "vmwgfx_drm.h"
++#include "ttm/ttm_object.h"
++#include "ttm/ttm_placement.h"
++#include "drmP.h"
++
++#define VMW_RES_CONTEXT ttm_driver_type0
++#define VMW_RES_SURFACE ttm_driver_type1
++#define VMW_RES_STREAM ttm_driver_type2
++
++struct vmw_user_context {
++	struct ttm_base_object base;
++	struct vmw_resource res;
++};
++
++struct vmw_user_surface {
++	struct ttm_base_object base;
++	struct vmw_surface srf;
++};
++
++struct vmw_user_dma_buffer {
++	struct ttm_base_object base;
++	struct vmw_dma_buffer dma;
++};
++
++struct vmw_bo_user_rep {
++	uint32_t handle;
++	uint64_t map_handle;
++};
++
++struct vmw_stream {
++	struct vmw_resource res;
++	uint32_t stream_id;
++};
++
++struct vmw_user_stream {
++	struct ttm_base_object base;
++	struct vmw_stream stream;
++};
++
++static inline struct vmw_dma_buffer *
++vmw_dma_buffer(struct ttm_buffer_object *bo)
++{
++	return container_of(bo, struct vmw_dma_buffer, base);
++}
++
++static inline struct vmw_user_dma_buffer *
++vmw_user_dma_buffer(struct ttm_buffer_object *bo)
++{
++	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
++	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
++}
++
++struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
++{
++	kref_get(&res->kref);
++	return res;
++}
++
++static void vmw_resource_release(struct kref *kref)
++{
++	struct vmw_resource *res =
++	    container_of(kref, struct vmw_resource, kref);
++	struct vmw_private *dev_priv = res->dev_priv;
++
++	idr_remove(res->idr, res->id);
++	write_unlock(&dev_priv->resource_lock);
++
++	if (likely(res->hw_destroy != NULL))
++		res->hw_destroy(res);
++
++	if (res->res_free != NULL)
++		res->res_free(res);
++	else
++		kfree(res);
++
++	write_lock(&dev_priv->resource_lock);
++}
++
++void vmw_resource_unreference(struct vmw_resource **p_res)
++{
++	struct vmw_resource *res = *p_res;
++	struct vmw_private *dev_priv = res->dev_priv;
++
++	*p_res = NULL;
++	write_lock(&dev_priv->resource_lock);
++	kref_put(&res->kref, vmw_resource_release);
++	write_unlock(&dev_priv->resource_lock);
++}
++
++static int vmw_resource_init(struct vmw_private *dev_priv,
++			     struct vmw_resource *res,
++			     struct idr *idr,
++			     enum ttm_object_type obj_type,
++			     void (*res_free) (struct vmw_resource *res))
++{
++	int ret;
++
++	kref_init(&res->kref);
++	res->hw_destroy = NULL;
++	res->res_free = res_free;
++	res->res_type = obj_type;
++	res->idr = idr;
++	res->avail = false;
++	res->dev_priv = dev_priv;
++
++	do {
++		if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
++			return -ENOMEM;
++
++		write_lock(&dev_priv->resource_lock);
++		ret = idr_get_new_above(idr, res, 1, &res->id);
++		write_unlock(&dev_priv->resource_lock);
++
++	} while (ret == -EAGAIN);
++
++	return ret;
++}
++
++/**
++ * vmw_resource_activate
++ *
++ * @res:        Pointer to the newly created resource
++ * @hw_destroy: Destroy function. NULL if none.
++ *
++ * Activate a resource after the hardware has been made aware of it.
++ * Set tye destroy function to @destroy. Typically this frees the
++ * resource and destroys the hardware resources associated with it.
++ * Activate basically means that the function vmw_resource_lookup will
++ * find it.
++ */
++
++static void vmw_resource_activate(struct vmw_resource *res,
++				  void (*hw_destroy) (struct vmw_resource *))
++{
++	struct vmw_private *dev_priv = res->dev_priv;
++
++	write_lock(&dev_priv->resource_lock);
++	res->avail = true;
++	res->hw_destroy = hw_destroy;
++	write_unlock(&dev_priv->resource_lock);
++}
++
++struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
++					 struct idr *idr, int id)
++{
++	struct vmw_resource *res;
++
++	read_lock(&dev_priv->resource_lock);
++	res = idr_find(idr, id);
++	if (res && res->avail)
++		kref_get(&res->kref);
++	else
++		res = NULL;
++	read_unlock(&dev_priv->resource_lock);
++
++	if (unlikely(res == NULL))
++		return NULL;
++
++	return res;
++}
++
++/**
++ * Context management:
++ */
++
++static void vmw_hw_context_destroy(struct vmw_resource *res)
++{
++
++	struct vmw_private *dev_priv = res->dev_priv;
++	struct {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdDestroyContext body;
++	} *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
++
++	if (unlikely(cmd == NULL)) {
++		DRM_ERROR("Failed reserving FIFO space for surface "
++			  "destruction.\n");
++		return;
++	}
++
++	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
++	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
++	cmd->body.cid = cpu_to_le32(res->id);
++
++	vmw_fifo_commit(dev_priv, sizeof(*cmd));
++}
++
++static int vmw_context_init(struct vmw_private *dev_priv,
++			    struct vmw_resource *res,
++			    void (*res_free) (struct vmw_resource *res))
++{
++	int ret;
++
++	struct {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdDefineContext body;
++	} *cmd;
++
++	ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
++				VMW_RES_CONTEXT, res_free);
++
++	if (unlikely(ret != 0)) {
++		if (res_free == NULL)
++			kfree(res);
++		else
++			res_free(res);
++		return ret;
++	}
++
++	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
++	if (unlikely(cmd == NULL)) {
++		DRM_ERROR("Fifo reserve failed.\n");
++		vmw_resource_unreference(&res);
++		return -ENOMEM;
++	}
++
++	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
++	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
++	cmd->body.cid = cpu_to_le32(res->id);
++
++	vmw_fifo_commit(dev_priv, sizeof(*cmd));
++	vmw_resource_activate(res, vmw_hw_context_destroy);
++	return 0;
++}
++
++struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
++{
++	struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
++	int ret;
++
++	if (unlikely(res == NULL))
++		return NULL;
++
++	ret = vmw_context_init(dev_priv, res, NULL);
++	return (ret == 0) ? res : NULL;
++}
++
++/**
++ * User-space context management:
++ */
++
++static void vmw_user_context_free(struct vmw_resource *res)
++{
++	struct vmw_user_context *ctx =
++	    container_of(res, struct vmw_user_context, res);
++
++	kfree(ctx);
++}
++
++/**
++ * This function is called when user space has no more references on the
++ * base object. It releases the base-object's reference on the resource object.
++ */
++
++static void vmw_user_context_base_release(struct ttm_base_object **p_base)
++{
++	struct ttm_base_object *base = *p_base;
++	struct vmw_user_context *ctx =
++	    container_of(base, struct vmw_user_context, base);
++	struct vmw_resource *res = &ctx->res;
++
++	*p_base = NULL;
++	vmw_resource_unreference(&res);
++}
++
++int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
++			      struct drm_file *file_priv)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct vmw_resource *res;
++	struct vmw_user_context *ctx;
++	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
++	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++	int ret = 0;
++
++	res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
++	if (unlikely(res == NULL))
++		return -EINVAL;
++
++	if (res->res_free != &vmw_user_context_free) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	ctx = container_of(res, struct vmw_user_context, res);
++	if (ctx->base.tfile != tfile && !ctx->base.shareable) {
++		ret = -EPERM;
++		goto out;
++	}
++
++	ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
++out:
++	vmw_resource_unreference(&res);
++	return ret;
++}
++
++int vmw_context_define_ioctl(struct drm_device *dev, void *data,
++			     struct drm_file *file_priv)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
++	struct vmw_resource *res;
++	struct vmw_resource *tmp;
++	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
++	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++	int ret;
++
++	if (unlikely(ctx == NULL))
++		return -ENOMEM;
++
++	res = &ctx->res;
++	ctx->base.shareable = false;
++	ctx->base.tfile = NULL;
++
++	ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
++	if (unlikely(ret != 0))
++		return ret;
++
++	tmp = vmw_resource_reference(&ctx->res);
++	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
++				   &vmw_user_context_base_release, NULL);
++
++	if (unlikely(ret != 0)) {
++		vmw_resource_unreference(&tmp);
++		goto out_err;
++	}
++
++	arg->cid = res->id;
++out_err:
++	vmw_resource_unreference(&res);
++	return ret;
++
++}
++
++int vmw_context_check(struct vmw_private *dev_priv,
++		      struct ttm_object_file *tfile,
++		      int id)
++{
++	struct vmw_resource *res;
++	int ret = 0;
++
++	read_lock(&dev_priv->resource_lock);
++	res = idr_find(&dev_priv->context_idr, id);
++	if (res && res->avail) {
++		struct vmw_user_context *ctx =
++			container_of(res, struct vmw_user_context, res);
++		if (ctx->base.tfile != tfile && !ctx->base.shareable)
++			ret = -EPERM;
++	} else
++		ret = -EINVAL;
++	read_unlock(&dev_priv->resource_lock);
++
++	return ret;
++}
++
++
++/**
++ * Surface management.
++ */
++
++static void vmw_hw_surface_destroy(struct vmw_resource *res)
++{
++
++	struct vmw_private *dev_priv = res->dev_priv;
++	struct {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdDestroySurface body;
++	} *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
++
++	if (unlikely(cmd == NULL)) {
++		DRM_ERROR("Failed reserving FIFO space for surface "
++			  "destruction.\n");
++		return;
++	}
++
++	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
++	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
++	cmd->body.sid = cpu_to_le32(res->id);
++
++	vmw_fifo_commit(dev_priv, sizeof(*cmd));
++}
++
++void vmw_surface_res_free(struct vmw_resource *res)
++{
++	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
++
++	kfree(srf->sizes);
++	kfree(srf->snooper.image);
++	kfree(srf);
++}
++
++int vmw_surface_init(struct vmw_private *dev_priv,
++		     struct vmw_surface *srf,
++		     void (*res_free) (struct vmw_resource *res))
++{
++	int ret;
++	struct {
++		SVGA3dCmdHeader header;
++		SVGA3dCmdDefineSurface body;
++	} *cmd;
++	SVGA3dSize *cmd_size;
++	struct vmw_resource *res = &srf->res;
++	struct drm_vmw_size *src_size;
++	size_t submit_size;
++	uint32_t cmd_len;
++	int i;
++
++	BUG_ON(res_free == NULL);
++	ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
++				VMW_RES_SURFACE, res_free);
++
++	if (unlikely(ret != 0)) {
++		res_free(res);
++		return ret;
++	}
++
++	submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
++	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
++
++	cmd = vmw_fifo_reserve(dev_priv, submit_size);
++	if (unlikely(cmd == NULL)) {
++		DRM_ERROR("Fifo reserve failed for create surface.\n");
++		vmw_resource_unreference(&res);
++		return -ENOMEM;
++	}
++
++	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
++	cmd->header.size = cpu_to_le32(cmd_len);
++	cmd->body.sid = cpu_to_le32(res->id);
++	cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
++	cmd->body.format = cpu_to_le32(srf->format);
++	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
++		cmd->body.face[i].numMipLevels =
++		    cpu_to_le32(srf->mip_levels[i]);
++	}
++
++	cmd += 1;
++	cmd_size = (SVGA3dSize *) cmd;
++	src_size = srf->sizes;
++
++	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
++		cmd_size->width = cpu_to_le32(src_size->width);
++		cmd_size->height = cpu_to_le32(src_size->height);
++		cmd_size->depth = cpu_to_le32(src_size->depth);
++	}
++
++	vmw_fifo_commit(dev_priv, submit_size);
++	vmw_resource_activate(res, vmw_hw_surface_destroy);
++	return 0;
++}
++
++static void vmw_user_surface_free(struct vmw_resource *res)
++{
++	struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
++	struct vmw_user_surface *user_srf =
++	    container_of(srf, struct vmw_user_surface, srf);
++
++	kfree(srf->sizes);
++	kfree(srf->snooper.image);
++	kfree(user_srf);
++}
++
++int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
++				   struct ttm_object_file *tfile,
++				   uint32_t handle, struct vmw_surface **out)
++{
++	struct vmw_resource *res;
++	struct vmw_surface *srf;
++	struct vmw_user_surface *user_srf;
++	struct ttm_base_object *base;
++	int ret = -EINVAL;
++
++	base = ttm_base_object_lookup(tfile, handle);
++	if (unlikely(base == NULL))
++		return -EINVAL;
++
++	if (unlikely(base->object_type != VMW_RES_SURFACE))
++		goto out_bad_resource;
++
++	user_srf = container_of(base, struct vmw_user_surface, base);
++	srf = &user_srf->srf;
++	res = &srf->res;
++
++	read_lock(&dev_priv->resource_lock);
++
++	if (!res->avail || res->res_free != &vmw_user_surface_free) {
++		read_unlock(&dev_priv->resource_lock);
++		goto out_bad_resource;
++	}
++
++	kref_get(&res->kref);
++	read_unlock(&dev_priv->resource_lock);
++
++	*out = srf;
++	ret = 0;
++
++out_bad_resource:
++	ttm_base_object_unref(&base);
++
++	return ret;
++}
++
++static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
++{
++	struct ttm_base_object *base = *p_base;
++	struct vmw_user_surface *user_srf =
++	    container_of(base, struct vmw_user_surface, base);
++	struct vmw_resource *res = &user_srf->srf.res;
++
++	*p_base = NULL;
++	vmw_resource_unreference(&res);
++}
++
++int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
++			      struct drm_file *file_priv)
++{
++	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
++	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++
++	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
++}
++
++int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
++			     struct drm_file *file_priv)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct vmw_user_surface *user_srf =
++	    kmalloc(sizeof(*user_srf), GFP_KERNEL);
++	struct vmw_surface *srf;
++	struct vmw_resource *res;
++	struct vmw_resource *tmp;
++	union drm_vmw_surface_create_arg *arg =
++	    (union drm_vmw_surface_create_arg *)data;
++	struct drm_vmw_surface_create_req *req = &arg->req;
++	struct drm_vmw_surface_arg *rep = &arg->rep;
++	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++	struct drm_vmw_size __user *user_sizes;
++	int ret;
++	int i;
++
++	if (unlikely(user_srf == NULL))
++		return -ENOMEM;
++
++	srf = &user_srf->srf;
++	res = &srf->res;
++
++	srf->flags = req->flags;
++	srf->format = req->format;
++	srf->scanout = req->scanout;
++	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
++	srf->num_sizes = 0;
++	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
++		srf->num_sizes += srf->mip_levels[i];
++
++	if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
++	    DRM_VMW_MAX_MIP_LEVELS) {
++		ret = -EINVAL;
++		goto out_err0;
++	}
++
++	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
++	if (unlikely(srf->sizes == NULL)) {
++		ret = -ENOMEM;
++		goto out_err0;
++	}
++
++	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
++	    req->size_addr;
++
++	ret = copy_from_user(srf->sizes, user_sizes,
++			     srf->num_sizes * sizeof(*srf->sizes));
++	if (unlikely(ret != 0))
++		goto out_err1;
++
++	if (srf->scanout &&
++	    srf->num_sizes == 1 &&
++	    srf->sizes[0].width == 64 &&
++	    srf->sizes[0].height == 64 &&
++	    srf->format == SVGA3D_A8R8G8B8) {
++
++		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
++		/* clear the image */
++		if (srf->snooper.image) {
++			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
++		} else {
++			DRM_ERROR("Failed to allocate cursor_image\n");
++			ret = -ENOMEM;
++			goto out_err1;
++		}
++	} else {
++		srf->snooper.image = NULL;
++	}
++	srf->snooper.crtc = NULL;
++
++	user_srf->base.shareable = false;
++	user_srf->base.tfile = NULL;
++
++	/**
++	 * From this point, the generic resource management functions
++	 * destroy the object on failure.
++	 */
++
++	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
++	if (unlikely(ret != 0))
++		return ret;
++
++	tmp = vmw_resource_reference(&srf->res);
++	ret = ttm_base_object_init(tfile, &user_srf->base,
++				   req->shareable, VMW_RES_SURFACE,
++				   &vmw_user_surface_base_release, NULL);
++
++	if (unlikely(ret != 0)) {
++		vmw_resource_unreference(&tmp);
++		vmw_resource_unreference(&res);
++		return ret;
++	}
++
++	rep->sid = user_srf->base.hash.key;
++	if (rep->sid == SVGA3D_INVALID_ID)
++		DRM_ERROR("Created bad Surface ID.\n");
++
++	vmw_resource_unreference(&res);
++	return 0;
++out_err1:
++	kfree(srf->sizes);
++out_err0:
++	kfree(user_srf);
++	return ret;
++}
++
++int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
++				struct drm_file *file_priv)
++{
++	union drm_vmw_surface_reference_arg *arg =
++	    (union drm_vmw_surface_reference_arg *)data;
++	struct drm_vmw_surface_arg *req = &arg->req;
++	struct drm_vmw_surface_create_req *rep = &arg->rep;
++	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++	struct vmw_surface *srf;
++	struct vmw_user_surface *user_srf;
++	struct drm_vmw_size __user *user_sizes;
++	struct ttm_base_object *base;
++	int ret = -EINVAL;
++
++	base = ttm_base_object_lookup(tfile, req->sid);
++	if (unlikely(base == NULL)) {
++		DRM_ERROR("Could not find surface to reference.\n");
++		return -EINVAL;
++	}
++
++	if (unlikely(base->object_type != VMW_RES_SURFACE))
++		goto out_bad_resource;
++
++	user_srf = container_of(base, struct vmw_user_surface, base);
++	srf = &user_srf->srf;
++
++	ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Could not add a reference to a surface.\n");
++		goto out_no_reference;
++	}
++
++	rep->flags = srf->flags;
++	rep->format = srf->format;
++	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
++	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
++	    rep->size_addr;
++
++	if (user_sizes)
++		ret = copy_to_user(user_sizes, srf->sizes,
++				   srf->num_sizes * sizeof(*srf->sizes));
++	if (unlikely(ret != 0))
++		DRM_ERROR("copy_to_user failed %p %u\n",
++			  user_sizes, srf->num_sizes);
++out_bad_resource:
++out_no_reference:
++	ttm_base_object_unref(&base);
++
++	return ret;
++}
++
++int vmw_surface_check(struct vmw_private *dev_priv,
++		      struct ttm_object_file *tfile,
++		      uint32_t handle, int *id)
++{
++	struct ttm_base_object *base;
++	struct vmw_user_surface *user_srf;
++
++	int ret = -EPERM;
++
++	base = ttm_base_object_lookup(tfile, handle);
++	if (unlikely(base == NULL))
++		return -EINVAL;
++
++	if (unlikely(base->object_type != VMW_RES_SURFACE))
++		goto out_bad_surface;
++
++	user_srf = container_of(base, struct vmw_user_surface, base);
++	*id = user_srf->srf.res.id;
++	ret = 0;
++
++out_bad_surface:
++	/**
++	 * FIXME: May deadlock here when called from the
++	 * command parsing code.
++	 */
++
++	ttm_base_object_unref(&base);
++	return ret;
++}
++
++/**
++ * Buffer management.
++ */
++
++static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
++				  unsigned long num_pages)
++{
++	static size_t bo_user_size = ~0;
++
++	size_t page_array_size =
++	    (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
++
++	if (unlikely(bo_user_size == ~0)) {
++		bo_user_size = glob->ttm_bo_extra_size +
++		    ttm_round_pot(sizeof(struct vmw_dma_buffer));
++	}
++
++	return bo_user_size + page_array_size;
++}
++
++void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
++{
++	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
++	struct ttm_bo_global *glob = bo->glob;
++	struct vmw_private *dev_priv =
++		container_of(bo->bdev, struct vmw_private, bdev);
++
++	if (vmw_bo->gmr_bound) {
++		vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
++		spin_lock(&glob->lru_lock);
++		ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
++		spin_unlock(&glob->lru_lock);
++		vmw_bo->gmr_bound = false;
++	}
++}
++
++void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
++{
++	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
++	struct ttm_bo_global *glob = bo->glob;
++
++	vmw_dmabuf_gmr_unbind(bo);
++	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
++	kfree(vmw_bo);
++}
++
++int vmw_dmabuf_init(struct vmw_private *dev_priv,
++		    struct vmw_dma_buffer *vmw_bo,
++		    size_t size, struct ttm_placement *placement,
++		    bool interruptible,
++		    void (*bo_free) (struct ttm_buffer_object *bo))
++{
++	struct ttm_bo_device *bdev = &dev_priv->bdev;
++	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
++	size_t acc_size;
++	int ret;
++
++	BUG_ON(!bo_free);
++
++	acc_size =
++	    vmw_dmabuf_acc_size(bdev->glob,
++				(size + PAGE_SIZE - 1) >> PAGE_SHIFT);
++
++	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
++	if (unlikely(ret != 0)) {
++		/* we must free the bo here as
++		 * ttm_buffer_object_init does so as well */
++		bo_free(&vmw_bo->base);
++		return ret;
++	}
++
++	memset(vmw_bo, 0, sizeof(*vmw_bo));
++
++	INIT_LIST_HEAD(&vmw_bo->gmr_lru);
++	INIT_LIST_HEAD(&vmw_bo->validate_list);
++	vmw_bo->gmr_id = 0;
++	vmw_bo->gmr_bound = false;
++
++	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
++			  ttm_bo_type_device, placement,
++			  0, 0, interruptible,
++			  NULL, acc_size, bo_free);
++	return ret;
++}
++
++static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
++{
++	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
++	struct ttm_bo_global *glob = bo->glob;
++
++	vmw_dmabuf_gmr_unbind(bo);
++	ttm_mem_global_free(glob->mem_glob, bo->acc_size);
++	kfree(vmw_user_bo);
++}
++
++static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
++{
++	struct vmw_user_dma_buffer *vmw_user_bo;
++	struct ttm_base_object *base = *p_base;
++	struct ttm_buffer_object *bo;
++
++	*p_base = NULL;
++
++	if (unlikely(base == NULL))
++		return;
++
++	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
++	bo = &vmw_user_bo->dma.base;
++	ttm_bo_unref(&bo);
++}
++
++int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
++			   struct drm_file *file_priv)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	union drm_vmw_alloc_dmabuf_arg *arg =
++	    (union drm_vmw_alloc_dmabuf_arg *)data;
++	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
++	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
++	struct vmw_user_dma_buffer *vmw_user_bo;
++	struct ttm_buffer_object *tmp;
++	struct vmw_master *vmaster = vmw_master(file_priv->master);
++	int ret;
++
++	vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
++	if (unlikely(vmw_user_bo == NULL))
++		return -ENOMEM;
++
++	ret = ttm_read_lock(&vmaster->lock, true);
++	if (unlikely(ret != 0)) {
++		kfree(vmw_user_bo);
++		return ret;
++	}
++
++	ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
++			      &vmw_vram_sys_placement, true,
++			      &vmw_user_dmabuf_destroy);
++	if (unlikely(ret != 0))
++		return ret;
++
++	tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
++	ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
++				   &vmw_user_bo->base,
++				   false,
++				   ttm_buffer_type,
++				   &vmw_user_dmabuf_release, NULL);
++	if (unlikely(ret != 0)) {
++		ttm_bo_unref(&tmp);
++	} else {
++		rep->handle = vmw_user_bo->base.hash.key;
++		rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
++		rep->cur_gmr_id = vmw_user_bo->base.hash.key;
++		rep->cur_gmr_offset = 0;
++	}
++	ttm_bo_unref(&tmp);
++
++	ttm_read_unlock(&vmaster->lock);
++
++	return 0;
++}
++
++int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
++			   struct drm_file *file_priv)
++{
++	struct drm_vmw_unref_dmabuf_arg *arg =
++	    (struct drm_vmw_unref_dmabuf_arg *)data;
++
++	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
++					 arg->handle,
++					 TTM_REF_USAGE);
++}
++
++uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
++				  uint32_t cur_validate_node)
++{
++	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
++
++	if (likely(vmw_bo->on_validate_list))
++		return vmw_bo->cur_validate_node;
++
++	vmw_bo->cur_validate_node = cur_validate_node;
++	vmw_bo->on_validate_list = true;
++
++	return cur_validate_node;
++}
++
++void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
++{
++	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
++
++	vmw_bo->on_validate_list = false;
++}
++
++uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
++{
++	struct vmw_dma_buffer *vmw_bo;
++
++	if (bo->mem.mem_type == TTM_PL_VRAM)
++		return SVGA_GMR_FRAMEBUFFER;
++
++	vmw_bo = vmw_dma_buffer(bo);
++
++	return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
++}
++
++void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
++{
++	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
++	vmw_bo->gmr_bound = true;
++	vmw_bo->gmr_id = id;
++}
++
++int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
++			   uint32_t handle, struct vmw_dma_buffer **out)
++{
++	struct vmw_user_dma_buffer *vmw_user_bo;
++	struct ttm_base_object *base;
++
++	base = ttm_base_object_lookup(tfile, handle);
++	if (unlikely(base == NULL)) {
++		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
++		       (unsigned long)handle);
++		return -ESRCH;
++	}
++
++	if (unlikely(base->object_type != ttm_buffer_type)) {
++		ttm_base_object_unref(&base);
++		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
++		       (unsigned long)handle);
++		return -EINVAL;
++	}
++
++	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
++	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
++	ttm_base_object_unref(&base);
++	*out = &vmw_user_bo->dma;
++
++	return 0;
++}
++
++/**
++ * TODO: Implement a gmr id eviction mechanism. Currently we just fail
++ * when we're out of ids, causing GMR space to be allocated
++ * out of VRAM.
++ */
++
++int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
++{
++	struct ttm_bo_global *glob = dev_priv->bdev.glob;
++	int id;
++	int ret;
++
++	do {
++		if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
++			return -ENOMEM;
++
++		spin_lock(&glob->lru_lock);
++		ret = ida_get_new(&dev_priv->gmr_ida, &id);
++		spin_unlock(&glob->lru_lock);
++	} while (ret == -EAGAIN);
++
++	if (unlikely(ret != 0))
++		return ret;
++
++	if (unlikely(id >= dev_priv->max_gmr_ids)) {
++		spin_lock(&glob->lru_lock);
++		ida_remove(&dev_priv->gmr_ida, id);
++		spin_unlock(&glob->lru_lock);
++		return -EBUSY;
++	}
++
++	*p_id = (uint32_t) id;
++	return 0;
++}
++
++/*
++ * Stream managment
++ */
++
++static void vmw_stream_destroy(struct vmw_resource *res)
++{
++	struct vmw_private *dev_priv = res->dev_priv;
++	struct vmw_stream *stream;
++	int ret;
++
++	DRM_INFO("%s: unref\n", __func__);
++	stream = container_of(res, struct vmw_stream, res);
++
++	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
++	WARN_ON(ret != 0);
++}
++
++static int vmw_stream_init(struct vmw_private *dev_priv,
++			   struct vmw_stream *stream,
++			   void (*res_free) (struct vmw_resource *res))
++{
++	struct vmw_resource *res = &stream->res;
++	int ret;
++
++	ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
++				VMW_RES_STREAM, res_free);
++
++	if (unlikely(ret != 0)) {
++		if (res_free == NULL)
++			kfree(stream);
++		else
++			res_free(&stream->res);
++		return ret;
++	}
++
++	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
++	if (ret) {
++		vmw_resource_unreference(&res);
++		return ret;
++	}
++
++	DRM_INFO("%s: claimed\n", __func__);
++
++	vmw_resource_activate(&stream->res, vmw_stream_destroy);
++	return 0;
++}
++
++/**
++ * User-space context management:
++ */
++
++static void vmw_user_stream_free(struct vmw_resource *res)
++{
++	struct vmw_user_stream *stream =
++	    container_of(res, struct vmw_user_stream, stream.res);
++
++	kfree(stream);
++}
++
++/**
++ * This function is called when user space has no more references on the
++ * base object. It releases the base-object's reference on the resource object.
++ */
++
++static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
++{
++	struct ttm_base_object *base = *p_base;
++	struct vmw_user_stream *stream =
++	    container_of(base, struct vmw_user_stream, base);
++	struct vmw_resource *res = &stream->stream.res;
++
++	*p_base = NULL;
++	vmw_resource_unreference(&res);
++}
++
++int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
++			   struct drm_file *file_priv)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct vmw_resource *res;
++	struct vmw_user_stream *stream;
++	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
++	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++	int ret = 0;
++
++	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
++	if (unlikely(res == NULL))
++		return -EINVAL;
++
++	if (res->res_free != &vmw_user_stream_free) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	stream = container_of(res, struct vmw_user_stream, stream.res);
++	if (stream->base.tfile != tfile) {
++		ret = -EINVAL;
++		goto out;
++	}
++
++	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
++out:
++	vmw_resource_unreference(&res);
++	return ret;
++}
++
++int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
++			   struct drm_file *file_priv)
++{
++	struct vmw_private *dev_priv = vmw_priv(dev);
++	struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
++	struct vmw_resource *res;
++	struct vmw_resource *tmp;
++	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
++	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++	int ret;
++
++	if (unlikely(stream == NULL))
++		return -ENOMEM;
++
++	res = &stream->stream.res;
++	stream->base.shareable = false;
++	stream->base.tfile = NULL;
++
++	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
++	if (unlikely(ret != 0))
++		return ret;
++
++	tmp = vmw_resource_reference(res);
++	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
++				   &vmw_user_stream_base_release, NULL);
++
++	if (unlikely(ret != 0)) {
++		vmw_resource_unreference(&tmp);
++		goto out_err;
++	}
++
++	arg->stream_id = res->id;
++out_err:
++	vmw_resource_unreference(&res);
++	return ret;
++}
++
++int vmw_user_stream_lookup(struct vmw_private *dev_priv,
++			   struct ttm_object_file *tfile,
++			   uint32_t *inout_id, struct vmw_resource **out)
++{
++	struct vmw_user_stream *stream;
++	struct vmw_resource *res;
++	int ret;
++
++	res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
++	if (unlikely(res == NULL))
++		return -EINVAL;
++
++	if (res->res_free != &vmw_user_stream_free) {
++		ret = -EINVAL;
++		goto err_ref;
++	}
++
++	stream = container_of(res, struct vmw_user_stream, stream.res);
++	if (stream->base.tfile != tfile) {
++		ret = -EPERM;
++		goto err_ref;
++	}
++
++	*inout_id = stream->stream.stream_id;
++	*out = res;
++	return 0;
++err_ref:
++	vmw_resource_unreference(&res);
++	return ret;
++}
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+new file mode 100644
+index 0000000..e3df4ad
+--- /dev/null
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+@@ -0,0 +1,99 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#include "drmP.h"
++#include "vmwgfx_drv.h"
++
++int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++	struct drm_file *file_priv;
++	struct vmw_private *dev_priv;
++
++	if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
++		if (vmw_fifo_mmap(filp, vma) == 0)
++			return 0;
++		return drm_mmap(filp, vma);
++	}
++
++	file_priv = (struct drm_file *)filp->private_data;
++	dev_priv = vmw_priv(file_priv->minor->dev);
++	return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
++}
++
++static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref)
++{
++	DRM_INFO("global init.\n");
++	return ttm_mem_global_init(ref->object);
++}
++
++static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref)
++{
++	ttm_mem_global_release(ref->object);
++}
++
++int vmw_ttm_global_init(struct vmw_private *dev_priv)
++{
++	struct ttm_global_reference *global_ref;
++	int ret;
++
++	global_ref = &dev_priv->mem_global_ref;
++	global_ref->global_type = TTM_GLOBAL_TTM_MEM;
++	global_ref->size = sizeof(struct ttm_mem_global);
++	global_ref->init = &vmw_ttm_mem_global_init;
++	global_ref->release = &vmw_ttm_mem_global_release;
++
++	ret = ttm_global_item_ref(global_ref);
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Failed setting up TTM memory accounting.\n");
++		return ret;
++	}
++
++	dev_priv->bo_global_ref.mem_glob =
++		dev_priv->mem_global_ref.object;
++	global_ref = &dev_priv->bo_global_ref.ref;
++	global_ref->global_type = TTM_GLOBAL_TTM_BO;
++	global_ref->size = sizeof(struct ttm_bo_global);
++	global_ref->init = &ttm_bo_global_init;
++	global_ref->release = &ttm_bo_global_release;
++		ret = ttm_global_item_ref(global_ref);
++
++	if (unlikely(ret != 0)) {
++		DRM_ERROR("Failed setting up TTM buffer objects.\n");
++		goto out_no_bo;
++	}
++
++	return 0;
++out_no_bo:
++	ttm_global_item_unref(&dev_priv->mem_global_ref);
++	return ret;
++}
++
++void vmw_ttm_global_release(struct vmw_private *dev_priv)
++{
++	ttm_global_item_unref(&dev_priv->bo_global_ref.ref);
++	ttm_global_item_unref(&dev_priv->mem_global_ref);
++}
+diff --git a/include/drm/Kbuild b/include/drm/Kbuild
+index b940fdf..bd3a1c2 100644
+--- a/include/drm/Kbuild
++++ b/include/drm/Kbuild
+@@ -7,4 +7,6 @@ unifdef-y += r128_drm.h
+ unifdef-y += radeon_drm.h
+ unifdef-y += sis_drm.h
+ unifdef-y += savage_drm.h
++unifdef-y += vmwgfx_drm.h
+ unifdef-y += via_drm.h
++unifdef-y += nouveau_drm.h
+diff --git a/include/drm/drm.h b/include/drm/drm.h
+index 7cb50bd..e3f46e0 100644
+--- a/include/drm/drm.h
++++ b/include/drm/drm.h
+@@ -36,17 +36,27 @@
+ #ifndef _DRM_H_
+ #define _DRM_H_
+ 
++#if defined(__linux__)
++
+ #include <linux/types.h>
+-#include <asm/ioctl.h>		/* For _IO* macros */
+-#define DRM_IOCTL_NR(n)		_IOC_NR(n)
+-#define DRM_IOC_VOID		_IOC_NONE
+-#define DRM_IOC_READ		_IOC_READ
+-#define DRM_IOC_WRITE		_IOC_WRITE
+-#define DRM_IOC_READWRITE	_IOC_READ|_IOC_WRITE
+-#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
++#include <asm/ioctl.h>
++typedef unsigned int drm_handle_t;
+ 
+-#define DRM_MAJOR       226
+-#define DRM_MAX_MINOR   15
++#else /* One of the BSDs */
++
++#include <sys/ioccom.h>
++#include <sys/types.h>
++typedef int8_t   __s8;
++typedef uint8_t  __u8;
++typedef int16_t  __s16;
++typedef uint16_t __u16;
++typedef int32_t  __s32;
++typedef uint32_t __u32;
++typedef int64_t  __s64;
++typedef uint64_t __u64;
++typedef unsigned long drm_handle_t;
++
++#endif
+ 
+ #define DRM_NAME	"drm"	  /**< Name in kernel, /dev, and /proc */
+ #define DRM_MIN_ORDER	5	  /**< At least 2^5 bytes = 32 bytes */
+@@ -59,7 +69,6 @@
+ #define _DRM_LOCK_IS_CONT(lock)	   ((lock) & _DRM_LOCK_CONT)
+ #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
+ 
+-typedef unsigned int drm_handle_t;
+ typedef unsigned int drm_context_t;
+ typedef unsigned int drm_drawable_t;
+ typedef unsigned int drm_magic_t;
+@@ -454,6 +463,7 @@ struct drm_irq_busid {
+ enum drm_vblank_seq_type {
+ 	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
+ 	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
++	_DRM_VBLANK_EVENT = 0x4000000,   /**< Send event instead of blocking */
+ 	_DRM_VBLANK_FLIP = 0x8000000,   /**< Scheduled buffer swap should flip */
+ 	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
+ 	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
+@@ -461,8 +471,8 @@ enum drm_vblank_seq_type {
+ };
+ 
+ #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
+-#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
+-				_DRM_VBLANK_NEXTONMISS)
++#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
++				_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
+ 
+ struct drm_wait_vblank_request {
+ 	enum drm_vblank_seq_type type;
+@@ -686,6 +696,8 @@ struct drm_gem_open {
+ #define DRM_IOCTL_MODE_GETFB		DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
+ #define DRM_IOCTL_MODE_ADDFB		DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
+ #define DRM_IOCTL_MODE_RMFB		DRM_IOWR(0xAF, unsigned int)
++#define DRM_IOCTL_MODE_PAGE_FLIP	DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
++#define DRM_IOCTL_MODE_DIRTYFB		DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+ 
+ /**
+  * Device specific ioctls should only be in their respective headers
+@@ -698,6 +710,35 @@ struct drm_gem_open {
+ #define DRM_COMMAND_BASE                0x40
+ #define DRM_COMMAND_END			0xA0
+ 
++/**
++ * Header for events written back to userspace on the drm fd.  The
++ * type defines the type of event, the length specifies the total
++ * length of the event (including the header), and user_data is
++ * typically a 64 bit value passed with the ioctl that triggered the
++ * event.  A read on the drm fd will always only return complete
++ * events, that is, if for example the read buffer is 100 bytes, and
++ * there are two 64 byte events pending, only one will be returned.
++ *
++ * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
++ * up are chipset specific.
++ */
++struct drm_event {
++	__u32 type;
++	__u32 length;
++};
++
++#define DRM_EVENT_VBLANK 0x01
++#define DRM_EVENT_FLIP_COMPLETE 0x02
++
++struct drm_event_vblank {
++	struct drm_event base;
++	__u64 user_data;
++	__u32 tv_sec;
++	__u32 tv_usec;
++	__u32 sequence;
++	__u32 reserved;
++};
++
+ /* typedef area */
+ #ifndef __KERNEL__
+ typedef struct drm_clip_rect drm_clip_rect_t;
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index 7ad3faa..ffac157 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -245,16 +245,6 @@ extern void drm_ut_debug_printk(unsigned int request_level,
+ 
+ #endif
+ 
+-#define DRM_PROC_LIMIT (PAGE_SIZE-80)
+-
+-#define DRM_PROC_PRINT(fmt, arg...)					\
+-   len += sprintf(&buf[len], fmt , ##arg);				\
+-   if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
+-
+-#define DRM_PROC_PRINT_RET(ret, fmt, arg...)				\
+-   len += sprintf(&buf[len], fmt , ##arg);				\
+-   if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
+-
+ /*@}*/
+ 
+ /***********************************************************************/
+@@ -265,19 +255,8 @@ extern void drm_ut_debug_printk(unsigned int request_level,
+ 
+ #define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
+ #define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
+-#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
+ 
+ #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
+-/**
+- * Get the private SAREA mapping.
+- *
+- * \param _dev DRM device.
+- * \param _ctx context number.
+- * \param _map output mapping.
+- */
+-#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do {	\
+-	(_map) = (_dev)->context_sareas[_ctx];		\
+-} while(0)
+ 
+ /**
+  * Test that the hardware lock is held by the caller, returning otherwise.
+@@ -297,18 +276,6 @@ do {										\
+ } while (0)
+ 
+ /**
+- * Copy and IOCTL return string to user space
+- */
+-#define DRM_COPY( name, value )						\
+-	len = strlen( value );						\
+-	if ( len > name##_len ) len = name##_len;			\
+-	name##_len = strlen( value );					\
+-	if ( len && name ) {						\
+-		if ( copy_to_user( name, value, len ) )			\
+-			return -EFAULT;					\
+-	}
+-
+-/**
+  * Ioctl function type.
+  *
+  * \param inode device inode.
+@@ -322,10 +289,14 @@ typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
+ 			       unsigned long arg);
+ 
++#define DRM_IOCTL_NR(n)                _IOC_NR(n)
++#define DRM_MAJOR       226
++
+ #define DRM_AUTH	0x1
+ #define	DRM_MASTER	0x2
+ #define DRM_ROOT_ONLY	0x4
+ #define DRM_CONTROL_ALLOW 0x8
++#define DRM_UNLOCKED	0x10
+ 
+ struct drm_ioctl_desc {
+ 	unsigned int cmd;
+@@ -426,6 +397,14 @@ struct drm_buf_entry {
+ 	struct drm_freelist freelist;
+ };
+ 
++/* Event queued up for userspace to read */
++struct drm_pending_event {
++	struct drm_event *event;
++	struct list_head link;
++	struct drm_file *file_priv;
++	void (*destroy)(struct drm_pending_event *event);
++};
++
+ /** File private data */
+ struct drm_file {
+ 	int authenticated;
+@@ -449,6 +428,10 @@ struct drm_file {
+ 	struct drm_master *master; /* master this node is currently associated with
+ 				      N.B. not always minor->master */
+ 	struct list_head fbs;
++
++	wait_queue_head_t event_wait;
++	struct list_head event_list;
++	int event_space;
+ };
+ 
+ /** Wait queue */
+@@ -795,6 +778,15 @@ struct drm_driver {
+ 	/* Master routines */
+ 	int (*master_create)(struct drm_device *dev, struct drm_master *master);
+ 	void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
++	/**
++	 * master_set is called whenever the minor master is set.
++	 * master_drop is called whenever the minor master is dropped.
++	 */
++
++	int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
++			  bool from_open);
++	void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
++			    bool from_release);
+ 
+ 	int (*proc_init)(struct drm_minor *minor);
+ 	void (*proc_cleanup)(struct drm_minor *minor);
+@@ -900,6 +892,12 @@ struct drm_minor {
+ 	struct drm_mode_group mode_group;
+ };
+ 
++struct drm_pending_vblank_event {
++	struct drm_pending_event base;
++	int pipe;
++	struct drm_event_vblank event;
++};
++
+ /**
+  * DRM device structure. This structure represent a complete card that
+  * may contain multiple heads.
+@@ -999,6 +997,12 @@ struct drm_device {
+ 
+ 	u32 max_vblank_count;           /**< size of vblank counter register */
+ 
++	/**
++	 * List of events
++	 */
++	struct list_head vblank_event_list;
++	spinlock_t event_lock;
++
+ 	/*@} */
+ 	cycles_t ctx_start;
+ 	cycles_t lck_start;
+@@ -1125,8 +1129,8 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
+ 				/* Driver support (drm_drv.h) */
+ extern int drm_init(struct drm_driver *driver);
+ extern void drm_exit(struct drm_driver *driver);
+-extern int drm_ioctl(struct inode *inode, struct file *filp,
+-		     unsigned int cmd, unsigned long arg);
++extern long drm_ioctl(struct file *filp,
++		      unsigned int cmd, unsigned long arg);
+ extern long drm_compat_ioctl(struct file *filp,
+ 			     unsigned int cmd, unsigned long arg);
+ extern int drm_lastclose(struct drm_device *dev);
+@@ -1135,6 +1139,8 @@ extern int drm_lastclose(struct drm_device *dev);
+ extern int drm_open(struct inode *inode, struct file *filp);
+ extern int drm_stub_open(struct inode *inode, struct file *filp);
+ extern int drm_fasync(int fd, struct file *filp, int on);
++extern ssize_t drm_read(struct file *filp, char __user *buffer,
++			size_t count, loff_t *offset);
+ extern int drm_release(struct inode *inode, struct file *filp);
+ 
+ 				/* Mapping support (drm_vm.h) */
+@@ -1520,14 +1526,27 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
+ 
+ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
+ {
++	if (size != 0 && nmemb > ULONG_MAX / size)
++		return NULL;
++
+ 	if (size * nmemb <= PAGE_SIZE)
+ 	    return kcalloc(nmemb, size, GFP_KERNEL);
+ 
++	return __vmalloc(size * nmemb,
++			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
++}
++
++/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
++static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
++{
+ 	if (size != 0 && nmemb > ULONG_MAX / size)
+ 		return NULL;
+ 
++	if (size * nmemb <= PAGE_SIZE)
++	    return kmalloc(nmemb * size, GFP_KERNEL);
++
+ 	return __vmalloc(size * nmemb,
+-			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
++			 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+ }
+ 
+ static __inline void drm_free_large(void *ptr)
+diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
+index b69347b..fdf43ab 100644
+--- a/include/drm/drm_crtc.h
++++ b/include/drm/drm_crtc.h
+@@ -123,7 +123,7 @@ struct drm_display_mode {
+ 	int type;
+ 
+ 	/* Proposed mode values */
+-	int clock;
++	int clock;		/* in kHz */
+ 	int hdisplay;
+ 	int hsync_start;
+ 	int hsync_end;
+@@ -164,8 +164,8 @@ struct drm_display_mode {
+ 	int *private;
+ 	int private_flags;
+ 
+-	int vrefresh;
+-	float hsync;
++	int vrefresh;		/* in Hz */
++	int hsync;		/* in kHz */
+ };
+ 
+ enum drm_connector_status {
+@@ -242,6 +242,21 @@ struct drm_framebuffer_funcs {
+ 	int (*create_handle)(struct drm_framebuffer *fb,
+ 			     struct drm_file *file_priv,
+ 			     unsigned int *handle);
++	/**
++	 * Optinal callback for the dirty fb ioctl.
++	 *
++	 * Userspace can notify the driver via this callback
++	 * that a area of the framebuffer has changed and should
++	 * be flushed to the display hardware.
++	 *
++	 * See documentation in drm_mode.h for the struct
++	 * drm_mode_fb_dirty_cmd for more information as all
++	 * the semantics and arguments have a one to one mapping
++	 * on this function.
++	 */
++	int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags,
++		     unsigned color, struct drm_clip_rect *clips,
++		     unsigned num_clips);
+ };
+ 
+ struct drm_framebuffer {
+@@ -256,7 +271,7 @@ struct drm_framebuffer {
+ 	unsigned int depth;
+ 	int bits_per_pixel;
+ 	int flags;
+-	void *fbdev;
++	struct fb_info *fbdev;
+ 	u32 pseudo_palette[17];
+ 	struct list_head filp_head;
+ 	/* if you are using the helper */
+@@ -290,6 +305,7 @@ struct drm_property {
+ struct drm_crtc;
+ struct drm_connector;
+ struct drm_encoder;
++struct drm_pending_vblank_event;
+ 
+ /**
+  * drm_crtc_funcs - control CRTCs for a given device
+@@ -333,6 +349,19 @@ struct drm_crtc_funcs {
+ 	void (*destroy)(struct drm_crtc *crtc);
+ 
+ 	int (*set_config)(struct drm_mode_set *set);
++
++	/*
++	 * Flip to the given framebuffer.  This implements the page
++	 * flip ioctl descibed in drm_mode.h, specifically, the
++	 * implementation must return immediately and block all
++	 * rendering to the current fb until the flip has completed.
++	 * If userspace set the event flag in the ioctl, the event
++	 * argument will point to an event to send back when the flip
++	 * completes, otherwise it will be NULL.
++	 */
++	int (*page_flip)(struct drm_crtc *crtc,
++			 struct drm_framebuffer *fb,
++			 struct drm_pending_vblank_event *event);
+ };
+ 
+ /**
+@@ -596,6 +625,7 @@ struct drm_mode_config {
+ 	/* Optional properties */
+ 	struct drm_property *scaling_mode_property;
+ 	struct drm_property *dithering_mode_property;
++	struct drm_property *dirty_info_property;
+ };
+ 
+ #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
+@@ -667,6 +697,7 @@ extern void drm_mode_validate_size(struct drm_device *dev,
+ extern void drm_mode_prune_invalid(struct drm_device *dev,
+ 				   struct list_head *mode_list, bool verbose);
+ extern void drm_mode_sort(struct list_head *mode_list);
++extern int drm_mode_hsync(struct drm_display_mode *mode);
+ extern int drm_mode_vrefresh(struct drm_display_mode *mode);
+ extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
+ 				  int adjust_flags);
+@@ -703,6 +734,7 @@ extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats
+ 				     char *formats[]);
+ extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+ extern int drm_mode_create_dithering_property(struct drm_device *dev);
++extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
+ extern char *drm_get_encoder_name(struct drm_encoder *encoder);
+ 
+ extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+@@ -711,7 +743,8 @@ extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
+ 					   struct drm_encoder *encoder);
+ extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
+ 					 int gamma_size);
+-extern void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type);
++extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
++		uint32_t id, uint32_t type);
+ /* IOCTLs */
+ extern int drm_mode_getresources(struct drm_device *dev,
+ 				 void *data, struct drm_file *file_priv);
+@@ -730,6 +763,8 @@ extern int drm_mode_rmfb(struct drm_device *dev,
+ 			 void *data, struct drm_file *file_priv);
+ extern int drm_mode_getfb(struct drm_device *dev,
+ 			  void *data, struct drm_file *file_priv);
++extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
++				  void *data, struct drm_file *file_priv);
+ extern int drm_mode_addmode_ioctl(struct drm_device *dev,
+ 				  void *data, struct drm_file *file_priv);
+ extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
+@@ -756,6 +791,8 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
+ extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
+ 				    void *data, struct drm_file *file_priv);
+ extern bool drm_detect_hdmi_monitor(struct edid *edid);
++extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
++				    void *data, struct drm_file *file_priv);
+ extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
+ 				int hdisplay, int vdisplay, int vrefresh,
+ 				bool reduced, bool interlaced, bool margins);
+diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
+new file mode 100644
+index 0000000..a49e791
+--- /dev/null
++++ b/include/drm/drm_dp_helper.h
+@@ -0,0 +1,180 @@
++/*
++ * Copyright © 2008 Keith Packard
++ *
++ * Permission to use, copy, modify, distribute, and sell this software and its
++ * documentation for any purpose is hereby granted without fee, provided that
++ * the above copyright notice appear in all copies and that both that copyright
++ * notice and this permission notice appear in supporting documentation, and
++ * that the name of the copyright holders not be used in advertising or
++ * publicity pertaining to distribution of the software without specific,
++ * written prior permission.  The copyright holders make no representations
++ * about the suitability of this software for any purpose.  It is provided "as
++ * is" without express or implied warranty.
++ *
++ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
++ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
++ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
++ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
++ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
++ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
++ * OF THIS SOFTWARE.
++ */
++
++#ifndef _DRM_DP_HELPER_H_
++#define _DRM_DP_HELPER_H_
++
++/* From the VESA DisplayPort spec */
++
++#define AUX_NATIVE_WRITE	0x8
++#define AUX_NATIVE_READ		0x9
++#define AUX_I2C_WRITE		0x0
++#define AUX_I2C_READ		0x1
++#define AUX_I2C_STATUS		0x2
++#define AUX_I2C_MOT		0x4
++
++#define AUX_NATIVE_REPLY_ACK	(0x0 << 4)
++#define AUX_NATIVE_REPLY_NACK	(0x1 << 4)
++#define AUX_NATIVE_REPLY_DEFER	(0x2 << 4)
++#define AUX_NATIVE_REPLY_MASK	(0x3 << 4)
++
++#define AUX_I2C_REPLY_ACK	(0x0 << 6)
++#define AUX_I2C_REPLY_NACK	(0x1 << 6)
++#define AUX_I2C_REPLY_DEFER	(0x2 << 6)
++#define AUX_I2C_REPLY_MASK	(0x3 << 6)
++
++/* AUX CH addresses */
++/* DPCD */
++#define DP_DPCD_REV                         0x000
++
++#define DP_MAX_LINK_RATE                    0x001
++
++#define DP_MAX_LANE_COUNT                   0x002
++# define DP_MAX_LANE_COUNT_MASK		    0x1f
++# define DP_ENHANCED_FRAME_CAP		    (1 << 7)
++
++#define DP_MAX_DOWNSPREAD                   0x003
++# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING  (1 << 6)
++
++#define DP_NORP                             0x004
++
++#define DP_DOWNSTREAMPORT_PRESENT           0x005
++# define DP_DWN_STRM_PORT_PRESENT           (1 << 0)
++# define DP_DWN_STRM_PORT_TYPE_MASK         0x06
++/* 00b = DisplayPort */
++/* 01b = Analog */
++/* 10b = TMDS or HDMI */
++/* 11b = Other */
++# define DP_FORMAT_CONVERSION               (1 << 3)
++
++#define DP_MAIN_LINK_CHANNEL_CODING         0x006
++
++/* link configuration */
++#define	DP_LINK_BW_SET		            0x100
++# define DP_LINK_BW_1_62		    0x06
++# define DP_LINK_BW_2_7			    0x0a
++
++#define DP_LANE_COUNT_SET	            0x101
++# define DP_LANE_COUNT_MASK		    0x0f
++# define DP_LANE_COUNT_ENHANCED_FRAME_EN    (1 << 7)
++
++#define DP_TRAINING_PATTERN_SET	            0x102
++# define DP_TRAINING_PATTERN_DISABLE	    0
++# define DP_TRAINING_PATTERN_1		    1
++# define DP_TRAINING_PATTERN_2		    2
++# define DP_TRAINING_PATTERN_MASK	    0x3
++
++# define DP_LINK_QUAL_PATTERN_DISABLE	    (0 << 2)
++# define DP_LINK_QUAL_PATTERN_D10_2	    (1 << 2)
++# define DP_LINK_QUAL_PATTERN_ERROR_RATE    (2 << 2)
++# define DP_LINK_QUAL_PATTERN_PRBS7	    (3 << 2)
++# define DP_LINK_QUAL_PATTERN_MASK	    (3 << 2)
++
++# define DP_RECOVERED_CLOCK_OUT_EN	    (1 << 4)
++# define DP_LINK_SCRAMBLING_DISABLE	    (1 << 5)
++
++# define DP_SYMBOL_ERROR_COUNT_BOTH	    (0 << 6)
++# define DP_SYMBOL_ERROR_COUNT_DISPARITY    (1 << 6)
++# define DP_SYMBOL_ERROR_COUNT_SYMBOL	    (2 << 6)
++# define DP_SYMBOL_ERROR_COUNT_MASK	    (3 << 6)
++
++#define DP_TRAINING_LANE0_SET		    0x103
++#define DP_TRAINING_LANE1_SET		    0x104
++#define DP_TRAINING_LANE2_SET		    0x105
++#define DP_TRAINING_LANE3_SET		    0x106
++
++# define DP_TRAIN_VOLTAGE_SWING_MASK	    0x3
++# define DP_TRAIN_VOLTAGE_SWING_SHIFT	    0
++# define DP_TRAIN_MAX_SWING_REACHED	    (1 << 2)
++# define DP_TRAIN_VOLTAGE_SWING_400	    (0 << 0)
++# define DP_TRAIN_VOLTAGE_SWING_600	    (1 << 0)
++# define DP_TRAIN_VOLTAGE_SWING_800	    (2 << 0)
++# define DP_TRAIN_VOLTAGE_SWING_1200	    (3 << 0)
++
++# define DP_TRAIN_PRE_EMPHASIS_MASK	    (3 << 3)
++# define DP_TRAIN_PRE_EMPHASIS_0	    (0 << 3)
++# define DP_TRAIN_PRE_EMPHASIS_3_5	    (1 << 3)
++# define DP_TRAIN_PRE_EMPHASIS_6	    (2 << 3)
++# define DP_TRAIN_PRE_EMPHASIS_9_5	    (3 << 3)
++
++# define DP_TRAIN_PRE_EMPHASIS_SHIFT	    3
++# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED  (1 << 5)
++
++#define DP_DOWNSPREAD_CTRL		    0x107
++# define DP_SPREAD_AMP_0_5		    (1 << 4)
++
++#define DP_MAIN_LINK_CHANNEL_CODING_SET	    0x108
++# define DP_SET_ANSI_8B10B		    (1 << 0)
++
++#define DP_LANE0_1_STATUS		    0x202
++#define DP_LANE2_3_STATUS		    0x203
++# define DP_LANE_CR_DONE		    (1 << 0)
++# define DP_LANE_CHANNEL_EQ_DONE	    (1 << 1)
++# define DP_LANE_SYMBOL_LOCKED		    (1 << 2)
++
++#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE |		\
++			    DP_LANE_CHANNEL_EQ_DONE |	\
++			    DP_LANE_SYMBOL_LOCKED)
++
++#define DP_LANE_ALIGN_STATUS_UPDATED	    0x204
++
++#define DP_INTERLANE_ALIGN_DONE		    (1 << 0)
++#define DP_DOWNSTREAM_PORT_STATUS_CHANGED   (1 << 6)
++#define DP_LINK_STATUS_UPDATED		    (1 << 7)
++
++#define DP_SINK_STATUS			    0x205
++
++#define DP_RECEIVE_PORT_0_STATUS	    (1 << 0)
++#define DP_RECEIVE_PORT_1_STATUS	    (1 << 1)
++
++#define DP_ADJUST_REQUEST_LANE0_1	    0x206
++#define DP_ADJUST_REQUEST_LANE2_3	    0x207
++# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK  0x03
++# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
++# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK   0x0c
++# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT  2
++# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK  0x30
++# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
++# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK   0xc0
++# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT  6
++
++#define DP_SET_POWER                        0x600
++# define DP_SET_POWER_D0                    0x1
++# define DP_SET_POWER_D3                    0x2
++
++#define MODE_I2C_START	1
++#define MODE_I2C_WRITE	2
++#define MODE_I2C_READ	4
++#define MODE_I2C_STOP	8
++
++struct i2c_algo_dp_aux_data {
++	bool running;
++	u16 address;
++	int (*aux_ch) (struct i2c_adapter *adapter,
++		       int mode, uint8_t write_byte,
++		       uint8_t *read_byte);
++};
++
++int
++i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
++
++#endif /* _DRM_DP_HELPER_H_ */
+diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
+index 7d6c9a2..d33c3e0 100644
+--- a/include/drm/drm_edid.h
++++ b/include/drm/drm_edid.h
+@@ -106,6 +106,10 @@ struct detailed_data_color_point {
+ 	u8 wpindex2[3];
+ } __attribute__((packed));
+ 
++struct cvt_timing {
++	u8 code[3];
++} __attribute__((packed));
++
+ struct detailed_non_pixel {
+ 	u8 pad1;
+ 	u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
+@@ -117,9 +121,13 @@ struct detailed_non_pixel {
+ 		struct detailed_data_monitor_range range;
+ 		struct detailed_data_wpindex color;
+ 		struct std_timing timings[5];
++		struct cvt_timing cvt[4];
+ 	} data;
+ } __attribute__((packed));
+ 
++#define EDID_DETAIL_EST_TIMINGS 0xf7
++#define EDID_DETAIL_CVT_3BYTE 0xf8
++#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
+ #define EDID_DETAIL_STD_MODES 0xfa
+ #define EDID_DETAIL_MONITOR_CPDATA 0xfb
+ #define EDID_DETAIL_MONITOR_NAME 0xfc
+diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
+index 62329f9..4c10be3 100644
+--- a/include/drm/drm_mm.h
++++ b/include/drm/drm_mm.h
+@@ -66,6 +66,13 @@ extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+ 						    unsigned long size,
+ 						    unsigned alignment,
+ 						    int atomic);
++extern struct drm_mm_node *drm_mm_get_block_range_generic(
++						struct drm_mm_node *node,
++						unsigned long size,
++						unsigned alignment,
++						unsigned long start,
++						unsigned long end,
++						int atomic);
+ static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
+ 						   unsigned long size,
+ 						   unsigned alignment)
+@@ -78,11 +85,38 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa
+ {
+ 	return drm_mm_get_block_generic(parent, size, alignment, 1);
+ }
++static inline struct drm_mm_node *drm_mm_get_block_range(
++						struct drm_mm_node *parent,
++						unsigned long size,
++						unsigned alignment,
++						unsigned long start,
++						unsigned long end)
++{
++	return drm_mm_get_block_range_generic(parent, size, alignment,
++						start, end, 0);
++}
++static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
++						struct drm_mm_node *parent,
++						unsigned long size,
++						unsigned alignment,
++						unsigned long start,
++						unsigned long end)
++{
++	return drm_mm_get_block_range_generic(parent, size, alignment,
++						start, end, 1);
++}
+ extern void drm_mm_put_block(struct drm_mm_node *cur);
+ extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ 					      unsigned long size,
+ 					      unsigned alignment,
+ 					      int best_match);
++extern struct drm_mm_node *drm_mm_search_free_in_range(
++						const struct drm_mm *mm,
++						unsigned long size,
++						unsigned alignment,
++						unsigned long start,
++						unsigned long end,
++						int best_match);
+ extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
+ 		       unsigned long size);
+ extern void drm_mm_takedown(struct drm_mm *mm);
+@@ -99,6 +133,7 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+ 	return block->mm;
+ }
+ 
++extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
+ #ifdef CONFIG_DEBUG_FS
+ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
+ #endif
+diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
+index 1f90841..c5ba163 100644
+--- a/include/drm/drm_mode.h
++++ b/include/drm/drm_mode.h
+@@ -27,9 +27,6 @@
+ #ifndef _DRM_MODE_H
+ #define _DRM_MODE_H
+ 
+-#include <linux/kernel.h>
+-#include <linux/types.h>
+-
+ #define DRM_DISPLAY_INFO_LEN	32
+ #define DRM_CONNECTOR_NAME_LEN	32
+ #define DRM_DISPLAY_MODE_LEN	32
+@@ -78,12 +75,17 @@
+ #define DRM_MODE_DITHERING_OFF	0
+ #define DRM_MODE_DITHERING_ON	1
+ 
++/* Dirty info options */
++#define DRM_MODE_DIRTY_OFF      0
++#define DRM_MODE_DIRTY_ON       1
++#define DRM_MODE_DIRTY_ANNOTATE 2
++
+ struct drm_mode_modeinfo {
+ 	__u32 clock;
+ 	__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
+ 	__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
+ 
+-	__u32 vrefresh; /* vertical refresh * 1000 */
++	__u32 vrefresh;
+ 
+ 	__u32 flags;
+ 	__u32 type;
+@@ -158,6 +160,7 @@ struct drm_mode_get_encoder {
+ #define DRM_MODE_CONNECTOR_HDMIA	11
+ #define DRM_MODE_CONNECTOR_HDMIB	12
+ #define DRM_MODE_CONNECTOR_TV		13
++#define DRM_MODE_CONNECTOR_eDP		14
+ 
+ struct drm_mode_get_connector {
+ 
+@@ -225,6 +228,45 @@ struct drm_mode_fb_cmd {
+ 	__u32 handle;
+ };
+ 
++#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
++#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
++#define DRM_MODE_FB_DIRTY_FLAGS         0x03
++
++/*
++ * Mark a region of a framebuffer as dirty.
++ *
++ * Some hardware does not automatically update display contents
++ * as a hardware or software draw to a framebuffer. This ioctl
++ * allows userspace to tell the kernel and the hardware what
++ * regions of the framebuffer have changed.
++ *
++ * The kernel or hardware is free to update more then just the
++ * region specified by the clip rects. The kernel or hardware
++ * may also delay and/or coalesce several calls to dirty into a
++ * single update.
++ *
++ * Userspace may annotate the updates, the annotates are a
++ * promise made by the caller that the change is either a copy
++ * of pixels or a fill of a single color in the region specified.
++ *
++ * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
++ * the number of updated regions are half of num_clips given,
++ * where the clip rects are paired in src and dst. The width and
++ * height of each one of the pairs must match.
++ *
++ * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
++ * promises that the region specified of the clip rects is filled
++ * completely with a single color as given in the color argument.
++ */
++
++struct drm_mode_fb_dirty_cmd {
++	__u32 fb_id;
++	__u32 flags;
++	__u32 color;
++	__u32 num_clips;
++	__u64 clips_ptr;
++};
++
+ struct drm_mode_mode_cmd {
+ 	__u32 connector_id;
+ 	struct drm_mode_modeinfo mode;
+@@ -268,4 +310,37 @@ struct drm_mode_crtc_lut {
+ 	__u64 blue;
+ };
+ 
++#define DRM_MODE_PAGE_FLIP_EVENT 0x01
++#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
++
++/*
++ * Request a page flip on the specified crtc.
++ *
++ * This ioctl will ask KMS to schedule a page flip for the specified
++ * crtc.  Once any pending rendering targeting the specified fb (as of
++ * ioctl time) has completed, the crtc will be reprogrammed to display
++ * that fb after the next vertical refresh.  The ioctl returns
++ * immediately, but subsequent rendering to the current fb will block
++ * in the execbuffer ioctl until the page flip happens.  If a page
++ * flip is already pending as the ioctl is called, EBUSY will be
++ * returned.
++ *
++ * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
++ * request that drm sends back a vblank event (see drm.h: struct
++ * drm_event_vblank) when the page flip is done.  The user_data field
++ * passed in with this ioctl will be returned as the user_data field
++ * in the vblank event struct.
++ *
++ * The reserved field must be zero until we figure out something
++ * clever to use it for.
++ */
++
++struct drm_mode_crtc_page_flip {
++	__u32 crtc_id;
++	__u32 fb_id;
++	__u32 flags;
++	__u32 reserved;
++	__u64 user_data;
++};
++
+ #endif
+diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h
+new file mode 100644
+index 0000000..8390b43
+--- /dev/null
++++ b/include/drm/i2c/ch7006.h
+@@ -0,0 +1,86 @@
++/*
++ * Copyright (C) 2009 Francisco Jerez.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining
++ * a copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sublicense, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial
++ * portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
++ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
++ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
++ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ */
++
++#ifndef __DRM_I2C_CH7006_H__
++#define __DRM_I2C_CH7006_H__
++
++/**
++ * struct ch7006_encoder_params
++ *
++ * Describes how the ch7006 is wired up with the GPU. It should be
++ * used as the @params parameter of its @set_config method.
++ *
++ * See "http://www.chrontel.com/pdf/7006.pdf" for their precise
++ * meaning.
++ */
++struct ch7006_encoder_params {
++	enum {
++		CH7006_FORMAT_RGB16 = 0,
++		CH7006_FORMAT_YCrCb24m16,
++		CH7006_FORMAT_RGB24m16,
++		CH7006_FORMAT_RGB15,
++		CH7006_FORMAT_RGB24m12C,
++		CH7006_FORMAT_RGB24m12I,
++		CH7006_FORMAT_RGB24m8,
++		CH7006_FORMAT_RGB16m8,
++		CH7006_FORMAT_RGB15m8,
++		CH7006_FORMAT_YCrCb24m8,
++	} input_format;
++
++	enum {
++		CH7006_CLOCK_SLAVE = 0,
++		CH7006_CLOCK_MASTER,
++	} clock_mode;
++
++	enum {
++		CH7006_CLOCK_EDGE_NEG = 0,
++		CH7006_CLOCK_EDGE_POS,
++	} clock_edge;
++
++	int xcm, pcm;
++
++	enum {
++		CH7006_SYNC_SLAVE = 0,
++		CH7006_SYNC_MASTER,
++	} sync_direction;
++
++	enum {
++		CH7006_SYNC_SEPARATED = 0,
++		CH7006_SYNC_EMBEDDED,
++	} sync_encoding;
++
++	enum {
++		CH7006_POUT_1_8V = 0,
++		CH7006_POUT_3_3V,
++	} pout_level;
++
++	enum {
++		CH7006_ACTIVE_HSYNC = 0,
++		CH7006_ACTIVE_DSTART,
++	} active_detect;
++};
++
++#endif
+diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
+index 7e0cb1d..b64a8d7 100644
+--- a/include/drm/i915_drm.h
++++ b/include/drm/i915_drm.h
+@@ -27,11 +27,11 @@
+ #ifndef _I915_DRM_H_
+ #define _I915_DRM_H_
+ 
++#include "drm.h"
++
+ /* Please note that modifications to all structs defined here are
+  * subject to backwards-compatibility constraints.
+  */
+-#include <linux/types.h>
+-#include "drm.h"
+ 
+ /* Each region is a minimum of 16k, and there are at most 255 of them.
+  */
+@@ -186,6 +186,9 @@ typedef struct _drm_i915_sarea {
+ #define DRM_I915_GEM_MMAP_GTT	0x24
+ #define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
+ #define DRM_I915_GEM_MADVISE	0x26
++#define DRM_I915_OVERLAY_PUT_IMAGE	0x27
++#define DRM_I915_OVERLAY_ATTRS	0x28
++#define DRM_I915_GEM_EXECBUFFER2	0x29
+ 
+ #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
+ #define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
+@@ -205,6 +208,7 @@ typedef struct _drm_i915_sarea {
+ #define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
+ #define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
+ #define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
++#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
+ #define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
+ #define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
+ #define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+@@ -221,8 +225,10 @@ typedef struct _drm_i915_sarea {
+ #define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+ #define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
+ #define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
+-#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id)
++#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
+ #define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
++#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_ATTRS, struct drm_intel_overlay_put_image)
++#define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
+ 
+ /* Allow drivers to submit batchbuffers directly to hardware, relying
+  * on the security mechanisms provided by hardware.
+@@ -266,6 +272,9 @@ typedef struct drm_i915_irq_wait {
+ #define I915_PARAM_CHIPSET_ID            4
+ #define I915_PARAM_HAS_GEM               5
+ #define I915_PARAM_NUM_FENCES_AVAIL      6
++#define I915_PARAM_HAS_OVERLAY           7
++#define I915_PARAM_HAS_PAGEFLIPPING	 8
++#define I915_PARAM_HAS_EXECBUF2          9
+ 
+ typedef struct drm_i915_getparam {
+ 	int param;
+@@ -561,6 +570,57 @@ struct drm_i915_gem_execbuffer {
+ 	__u64 cliprects_ptr;
+ };
+ 
++struct drm_i915_gem_exec_object2 {
++	/**
++	 * User's handle for a buffer to be bound into the GTT for this
++	 * operation.
++	 */
++	__u32 handle;
++
++	/** Number of relocations to be performed on this buffer */
++	__u32 relocation_count;
++	/**
++	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
++	 * the relocations to be performed in this buffer.
++	 */
++	__u64 relocs_ptr;
++
++	/** Required alignment in graphics aperture */
++	__u64 alignment;
++
++	/**
++	 * Returned value of the updated offset of the object, for future
++	 * presumed_offset writes.
++	 */
++	__u64 offset;
++
++#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
++	__u64 flags;
++	__u64 rsvd1;
++	__u64 rsvd2;
++};
++
++struct drm_i915_gem_execbuffer2 {
++	/**
++	 * List of gem_exec_object2 structs
++	 */
++	__u64 buffers_ptr;
++	__u32 buffer_count;
++
++	/** Offset in the batchbuffer to start execution from. */
++	__u32 batch_start_offset;
++	/** Bytes used in batchbuffer from batch_start_offset */
++	__u32 batch_len;
++	__u32 DR1;
++	__u32 DR4;
++	__u32 num_cliprects;
++	/** This is a struct drm_clip_rect *cliprects */
++	__u64 cliprects_ptr;
++	__u64 flags; /* currently unused */
++	__u64 rsvd1;
++	__u64 rsvd2;
++};
++
+ struct drm_i915_gem_pin {
+ 	/** Handle of the buffer to be pinned. */
+ 	__u32 handle;
+@@ -686,4 +746,70 @@ struct drm_i915_gem_madvise {
+ 	__u32 retained;
+ };
+ 
++/* flags */
++#define I915_OVERLAY_TYPE_MASK 		0xff
++#define I915_OVERLAY_YUV_PLANAR 	0x01
++#define I915_OVERLAY_YUV_PACKED 	0x02
++#define I915_OVERLAY_RGB		0x03
++
++#define I915_OVERLAY_DEPTH_MASK		0xff00
++#define I915_OVERLAY_RGB24		0x1000
++#define I915_OVERLAY_RGB16		0x2000
++#define I915_OVERLAY_RGB15		0x3000
++#define I915_OVERLAY_YUV422		0x0100
++#define I915_OVERLAY_YUV411		0x0200
++#define I915_OVERLAY_YUV420		0x0300
++#define I915_OVERLAY_YUV410		0x0400
++
++#define I915_OVERLAY_SWAP_MASK		0xff0000
++#define I915_OVERLAY_NO_SWAP		0x000000
++#define I915_OVERLAY_UV_SWAP		0x010000
++#define I915_OVERLAY_Y_SWAP		0x020000
++#define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
++
++#define I915_OVERLAY_FLAGS_MASK		0xff000000
++#define I915_OVERLAY_ENABLE		0x01000000
++
++struct drm_intel_overlay_put_image {
++	/* various flags and src format description */
++	__u32 flags;
++	/* source picture description */
++	__u32 bo_handle;
++	/* stride values and offsets are in bytes, buffer relative */
++	__u16 stride_Y; /* stride for packed formats */
++	__u16 stride_UV;
++	__u32 offset_Y; /* offset for packet formats */
++	__u32 offset_U;
++	__u32 offset_V;
++	/* in pixels */
++	__u16 src_width;
++	__u16 src_height;
++	/* to compensate the scaling factors for partially covered surfaces */
++	__u16 src_scan_width;
++	__u16 src_scan_height;
++	/* output crtc description */
++	__u32 crtc_id;
++	__u16 dst_x;
++	__u16 dst_y;
++	__u16 dst_width;
++	__u16 dst_height;
++};
++
++/* flags */
++#define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
++#define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
++struct drm_intel_overlay_attrs {
++	__u32 flags;
++	__u32 color_key;
++	__s32 brightness;
++	__u32 contrast;
++	__u32 saturation;
++	__u32 gamma0;
++	__u32 gamma1;
++	__u32 gamma2;
++	__u32 gamma3;
++	__u32 gamma4;
++	__u32 gamma5;
++};
++
+ #endif				/* _I915_DRM_H_ */
+diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h
+index 325fd6f..3ffbc47 100644
+--- a/include/drm/mga_drm.h
++++ b/include/drm/mga_drm.h
+@@ -35,7 +35,7 @@
+ #ifndef __MGA_DRM_H__
+ #define __MGA_DRM_H__
+ 
+-#include <linux/types.h>
++#include "drm.h"
+ 
+ /* WARNING: If you change any of these defines, make sure to change the
+  * defines in the Xserver file (mga_sarea.h)
+diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
+new file mode 100644
+index 0000000..f745948
+--- /dev/null
++++ b/include/drm/nouveau_drm.h
+@@ -0,0 +1,221 @@
++/*
++ * Copyright 2005 Stephane Marchesin.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __NOUVEAU_DRM_H__
++#define __NOUVEAU_DRM_H__
++
++#define NOUVEAU_DRM_HEADER_PATCHLEVEL 15
++
++struct drm_nouveau_channel_alloc {
++	uint32_t     fb_ctxdma_handle;
++	uint32_t     tt_ctxdma_handle;
++
++	int          channel;
++
++	/* Notifier memory */
++	uint32_t     notifier_handle;
++
++	/* DRM-enforced subchannel assignments */
++	struct {
++		uint32_t handle;
++		uint32_t grclass;
++	} subchan[8];
++	uint32_t nr_subchan;
++};
++
++struct drm_nouveau_channel_free {
++	int channel;
++};
++
++struct drm_nouveau_grobj_alloc {
++	int      channel;
++	uint32_t handle;
++	int      class;
++};
++
++struct drm_nouveau_notifierobj_alloc {
++	uint32_t channel;
++	uint32_t handle;
++	uint32_t size;
++	uint32_t offset;
++};
++
++struct drm_nouveau_gpuobj_free {
++	int      channel;
++	uint32_t handle;
++};
++
++/* FIXME : maybe unify {GET,SET}PARAMs */
++#define NOUVEAU_GETPARAM_PCI_VENDOR      3
++#define NOUVEAU_GETPARAM_PCI_DEVICE      4
++#define NOUVEAU_GETPARAM_BUS_TYPE        5
++#define NOUVEAU_GETPARAM_FB_PHYSICAL     6
++#define NOUVEAU_GETPARAM_AGP_PHYSICAL    7
++#define NOUVEAU_GETPARAM_FB_SIZE         8
++#define NOUVEAU_GETPARAM_AGP_SIZE        9
++#define NOUVEAU_GETPARAM_PCI_PHYSICAL    10
++#define NOUVEAU_GETPARAM_CHIPSET_ID      11
++#define NOUVEAU_GETPARAM_VM_VRAM_BASE    12
++#define NOUVEAU_GETPARAM_GRAPH_UNITS     13
++struct drm_nouveau_getparam {
++	uint64_t param;
++	uint64_t value;
++};
++
++struct drm_nouveau_setparam {
++	uint64_t param;
++	uint64_t value;
++};
++
++#define NOUVEAU_GEM_DOMAIN_CPU       (1 << 0)
++#define NOUVEAU_GEM_DOMAIN_VRAM      (1 << 1)
++#define NOUVEAU_GEM_DOMAIN_GART      (1 << 2)
++#define NOUVEAU_GEM_DOMAIN_MAPPABLE  (1 << 3)
++
++struct drm_nouveau_gem_info {
++	uint32_t handle;
++	uint32_t domain;
++	uint64_t size;
++	uint64_t offset;
++	uint64_t map_handle;
++	uint32_t tile_mode;
++	uint32_t tile_flags;
++};
++
++struct drm_nouveau_gem_new {
++	struct drm_nouveau_gem_info info;
++	uint32_t channel_hint;
++	uint32_t align;
++};
++
++struct drm_nouveau_gem_pushbuf_bo {
++	uint64_t user_priv;
++	uint32_t handle;
++	uint32_t read_domains;
++	uint32_t write_domains;
++	uint32_t valid_domains;
++	uint32_t presumed_ok;
++	uint32_t presumed_domain;
++	uint64_t presumed_offset;
++};
++
++#define NOUVEAU_GEM_RELOC_LOW  (1 << 0)
++#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
++#define NOUVEAU_GEM_RELOC_OR   (1 << 2)
++struct drm_nouveau_gem_pushbuf_reloc {
++	uint32_t bo_index;
++	uint32_t reloc_index;
++	uint32_t flags;
++	uint32_t data;
++	uint32_t vor;
++	uint32_t tor;
++};
++
++#define NOUVEAU_GEM_MAX_BUFFERS 1024
++#define NOUVEAU_GEM_MAX_RELOCS 1024
++
++struct drm_nouveau_gem_pushbuf {
++	uint32_t channel;
++	uint32_t nr_dwords;
++	uint32_t nr_buffers;
++	uint32_t nr_relocs;
++	uint64_t dwords;
++	uint64_t buffers;
++	uint64_t relocs;
++};
++
++struct drm_nouveau_gem_pushbuf_call {
++	uint32_t channel;
++	uint32_t handle;
++	uint32_t offset;
++	uint32_t nr_buffers;
++	uint32_t nr_relocs;
++	uint32_t nr_dwords;
++	uint64_t buffers;
++	uint64_t relocs;
++	uint32_t suffix0;
++	uint32_t suffix1;
++	/* below only accessed for CALL2 */
++	uint64_t vram_available;
++	uint64_t gart_available;
++};
++
++struct drm_nouveau_gem_pin {
++	uint32_t handle;
++	uint32_t domain;
++	uint64_t offset;
++};
++
++struct drm_nouveau_gem_unpin {
++	uint32_t handle;
++};
++
++#define NOUVEAU_GEM_CPU_PREP_NOWAIT                                  0x00000001
++#define NOUVEAU_GEM_CPU_PREP_NOBLOCK                                 0x00000002
++#define NOUVEAU_GEM_CPU_PREP_WRITE                                   0x00000004
++struct drm_nouveau_gem_cpu_prep {
++	uint32_t handle;
++	uint32_t flags;
++};
++
++struct drm_nouveau_gem_cpu_fini {
++	uint32_t handle;
++};
++
++struct drm_nouveau_gem_tile {
++	uint32_t handle;
++	uint32_t offset;
++	uint32_t size;
++	uint32_t tile_mode;
++	uint32_t tile_flags;
++};
++
++enum nouveau_bus_type {
++	NV_AGP     = 0,
++	NV_PCI     = 1,
++	NV_PCIE    = 2,
++};
++
++struct drm_nouveau_sarea {
++};
++
++#define DRM_NOUVEAU_CARD_INIT          0x00
++#define DRM_NOUVEAU_GETPARAM           0x01
++#define DRM_NOUVEAU_SETPARAM           0x02
++#define DRM_NOUVEAU_CHANNEL_ALLOC      0x03
++#define DRM_NOUVEAU_CHANNEL_FREE       0x04
++#define DRM_NOUVEAU_GROBJ_ALLOC        0x05
++#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC  0x06
++#define DRM_NOUVEAU_GPUOBJ_FREE        0x07
++#define DRM_NOUVEAU_GEM_NEW            0x40
++#define DRM_NOUVEAU_GEM_PUSHBUF        0x41
++#define DRM_NOUVEAU_GEM_PUSHBUF_CALL   0x42
++#define DRM_NOUVEAU_GEM_PIN            0x43 /* !KMS only */
++#define DRM_NOUVEAU_GEM_UNPIN          0x44 /* !KMS only */
++#define DRM_NOUVEAU_GEM_CPU_PREP       0x45
++#define DRM_NOUVEAU_GEM_CPU_FINI       0x46
++#define DRM_NOUVEAU_GEM_INFO           0x47
++#define DRM_NOUVEAU_GEM_PUSHBUF_CALL2  0x48
++
++#endif /* __NOUVEAU_DRM_H__ */
+diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
+index 3b9932a..39537f3 100644
+--- a/include/drm/radeon_drm.h
++++ b/include/drm/radeon_drm.h
+@@ -33,7 +33,7 @@
+ #ifndef __RADEON_DRM_H__
+ #define __RADEON_DRM_H__
+ 
+-#include <linux/types.h>
++#include "drm.h"
+ 
+ /* WARNING: If you change any of these defines, make sure to change the
+  * defines in the X server file (radeon_sarea.h)
+diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
+index 4911461..81eb9f4 100644
+--- a/include/drm/ttm/ttm_bo_api.h
++++ b/include/drm/ttm/ttm_bo_api.h
+@@ -44,6 +44,29 @@ struct ttm_bo_device;
+ 
+ struct drm_mm_node;
+ 
++
++/**
++ * struct ttm_placement
++ *
++ * @fpfn:		first valid page frame number to put the object
++ * @lpfn:		last valid page frame number to put the object
++ * @num_placement:	number of prefered placements
++ * @placement:		prefered placements
++ * @num_busy_placement:	number of prefered placements when need to evict buffer
++ * @busy_placement:	prefered placements when need to evict buffer
++ *
++ * Structure indicating the placement you request for an object.
++ */
++struct ttm_placement {
++	unsigned	fpfn;
++	unsigned	lpfn;
++	unsigned	num_placement;
++	const uint32_t	*placement;
++	unsigned	num_busy_placement;
++	const uint32_t	*busy_placement;
++};
++
++
+ /**
+  * struct ttm_mem_reg
+  *
+@@ -109,10 +132,6 @@ struct ttm_tt;
+  * the object is destroyed.
+  * @event_queue: Queue for processes waiting on buffer object status change.
+  * @lock: spinlock protecting mostly synchronization members.
+- * @proposed_placement: Proposed placement for the buffer. Changed only by the
+- * creator prior to validation as opposed to bo->mem.proposed_flags which is
+- * changed by the implementation prior to a buffer move if it wants to outsmart
+- * the buffer creator / user. This latter happens, for example, at eviction.
+  * @mem: structure describing current placement.
+  * @persistant_swap_storage: Usually the swap storage is deleted for buffers
+  * pinned in physical memory. If this behaviour is not desired, this member
+@@ -177,7 +196,6 @@ struct ttm_buffer_object {
+ 	 * Members protected by the bo::reserved lock.
+ 	 */
+ 
+-	uint32_t proposed_placement;
+ 	struct ttm_mem_reg mem;
+ 	struct file *persistant_swap_storage;
+ 	struct ttm_tt *ttm;
+@@ -285,29 +303,30 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
+  * Note: It might be necessary to block validations before the
+  * wait by reserving the buffer.
+  * Returns -EBUSY if no_wait is true and the buffer is busy.
+- * Returns -ERESTART if interrupted by a signal.
++ * Returns -ERESTARTSYS if interrupted by a signal.
+  */
+ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
+ 		       bool interruptible, bool no_wait);
+ /**
+- * ttm_buffer_object_validate
++ * ttm_bo_validate
+  *
+  * @bo: The buffer object.
+- * @proposed_placement: Proposed_placement for the buffer object.
++ * @placement: Proposed placement for the buffer object.
+  * @interruptible: Sleep interruptible if sleeping.
+  * @no_wait: Return immediately if the buffer is busy.
+  *
+  * Changes placement and caching policy of the buffer object
+- * according to bo::proposed_flags.
++ * according proposed placement.
+  * Returns
+- * -EINVAL on invalid proposed_flags.
++ * -EINVAL on invalid proposed placement.
+  * -ENOMEM on out-of-memory condition.
+  * -EBUSY if no_wait is true and buffer busy.
+- * -ERESTART if interrupted by a signal.
++ * -ERESTARTSYS if interrupted by a signal.
+  */
+-extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
+-				      uint32_t proposed_placement,
+-				      bool interruptible, bool no_wait);
++extern int ttm_bo_validate(struct ttm_buffer_object *bo,
++				struct ttm_placement *placement,
++				bool interruptible, bool no_wait);
++
+ /**
+  * ttm_bo_unref
+  *
+@@ -328,7 +347,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo);
+  * waiting for buffer idle. This lock is recursive.
+  * Returns
+  * -EBUSY if the buffer is busy and no_wait is true.
+- * -ERESTART if interrupted by a signal.
++ * -ERESTARTSYS if interrupted by a signal.
+  */
+ 
+ extern int
+@@ -343,7 +362,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
+ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
+ 
+ /**
+- * ttm_buffer_object_init
++ * ttm_bo_init
+  *
+  * @bdev: Pointer to a ttm_bo_device struct.
+  * @bo: Pointer to a ttm_buffer_object to be initialized.
+@@ -371,20 +390,20 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
+  * Returns
+  * -ENOMEM: Out of memory.
+  * -EINVAL: Invalid placement flags.
+- * -ERESTART: Interrupted by signal while sleeping waiting for resources.
++ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+  */
+ 
+-extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
+-				  struct ttm_buffer_object *bo,
+-				  unsigned long size,
+-				  enum ttm_bo_type type,
+-				  uint32_t flags,
+-				  uint32_t page_alignment,
+-				  unsigned long buffer_start,
+-				  bool interrubtible,
+-				  struct file *persistant_swap_storage,
+-				  size_t acc_size,
+-				  void (*destroy) (struct ttm_buffer_object *));
++extern int ttm_bo_init(struct ttm_bo_device *bdev,
++			struct ttm_buffer_object *bo,
++			unsigned long size,
++			enum ttm_bo_type type,
++			struct ttm_placement *placement,
++			uint32_t page_alignment,
++			unsigned long buffer_start,
++			bool interrubtible,
++			struct file *persistant_swap_storage,
++			size_t acc_size,
++			void (*destroy) (struct ttm_buffer_object *));
+ /**
+  * ttm_bo_synccpu_object_init
+  *
+@@ -405,47 +424,43 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
+  * GEM user interface.
+  * @p_bo: On successful completion *p_bo points to the created object.
+  *
+- * This function allocates a ttm_buffer_object, and then calls
+- * ttm_buffer_object_init on that object.
+- * The destroy function is set to kfree().
++ * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
++ * on that object. The destroy function is set to kfree().
+  * Returns
+  * -ENOMEM: Out of memory.
+  * -EINVAL: Invalid placement flags.
+- * -ERESTART: Interrupted by signal while waiting for resources.
++ * -ERESTARTSYS: Interrupted by signal while waiting for resources.
+  */
+ 
+-extern int ttm_buffer_object_create(struct ttm_bo_device *bdev,
+-				    unsigned long size,
+-				    enum ttm_bo_type type,
+-				    uint32_t flags,
+-				    uint32_t page_alignment,
+-				    unsigned long buffer_start,
+-				    bool interruptible,
+-				    struct file *persistant_swap_storage,
+-				    struct ttm_buffer_object **p_bo);
++extern int ttm_bo_create(struct ttm_bo_device *bdev,
++				unsigned long size,
++				enum ttm_bo_type type,
++				struct ttm_placement *placement,
++				uint32_t page_alignment,
++				unsigned long buffer_start,
++				bool interruptible,
++				struct file *persistant_swap_storage,
++				struct ttm_buffer_object **p_bo);
+ 
+ /**
+  * ttm_bo_check_placement
+  *
+- * @bo: the buffer object.
+- * @set_flags: placement flags to set.
+- * @clr_flags: placement flags to clear.
++ * @bo:		the buffer object.
++ * @placement:	placements
+  *
+  * Performs minimal validity checking on an intended change of
+  * placement flags.
+  * Returns
+  * -EINVAL: Intended change is invalid or not allowed.
+  */
+-
+ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+-				  uint32_t set_flags, uint32_t clr_flags);
++					struct ttm_placement *placement);
+ 
+ /**
+  * ttm_bo_init_mm
+  *
+  * @bdev: Pointer to a ttm_bo_device struct.
+  * @mem_type: The memory type.
+- * @p_offset: offset for managed area in pages.
+  * @p_size: size managed area in pages.
+  *
+  * Initialize a manager for a given memory type.
+@@ -458,7 +473,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+  */
+ 
+ extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
+-			  unsigned long p_offset, unsigned long p_size);
++				unsigned long p_size);
+ /**
+  * ttm_bo_clean_mm
+  *
+@@ -503,7 +518,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+  *
+  * Returns:
+  * -EINVAL: Invalid or uninitialized memory type.
+- * -ERESTART: The call was interrupted by a signal while waiting to
++ * -ERESTARTSYS: The call was interrupted by a signal while waiting to
+  * evict a buffer.
+  */
+ 
+@@ -606,7 +621,7 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
+  * be called from the fops::read and fops::write method.
+  * Returns:
+  * See man (2) write, man(2) read. In particular,
+- * the function may return -EINTR if
++ * the function may return -ERESTARTSYS if
+  * interrupted by a signal.
+  */
+ 
+diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
+index e8cd6d2..4c4e0f8 100644
+--- a/include/drm/ttm/ttm_bo_driver.h
++++ b/include/drm/ttm/ttm_bo_driver.h
+@@ -242,12 +242,6 @@ struct ttm_mem_type_manager {
+ /**
+  * struct ttm_bo_driver
+  *
+- * @mem_type_prio: Priority array of memory types to place a buffer object in
+- * if it fits without evicting buffers from any of these memory types.
+- * @mem_busy_prio: Priority array of memory types to place a buffer object in
+- * if it needs to evict buffers to make room.
+- * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
+- * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
+  * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
+  * @invalidate_caches: Callback to invalidate read caches when a buffer object
+  * has been evicted.
+@@ -265,11 +259,6 @@ struct ttm_mem_type_manager {
+  */
+ 
+ struct ttm_bo_driver {
+-	const uint32_t *mem_type_prio;
+-	const uint32_t *mem_busy_prio;
+-	uint32_t num_mem_type_prio;
+-	uint32_t num_mem_busy_prio;
+-
+ 	/**
+ 	 * struct ttm_bo_driver member create_ttm_backend_entry
+ 	 *
+@@ -306,7 +295,8 @@ struct ttm_bo_driver {
+ 	 * finished, they'll end up in bo->mem.flags
+ 	 */
+ 
+-	 uint32_t(*evict_flags) (struct ttm_buffer_object *bo);
++	 void(*evict_flags) (struct ttm_buffer_object *bo,
++				struct ttm_placement *placement);
+ 	/**
+ 	 * struct ttm_bo_driver member move:
+ 	 *
+@@ -363,6 +353,11 @@ struct ttm_bo_driver {
+ 	/* notify the driver we are taking a fault on this BO
+ 	 * and have reserved it */
+ 	void (*fault_reserve_notify)(struct ttm_buffer_object *bo);
++
++	/**
++	 * notify the driver that we're about to swap out this bo
++	 */
++	void (*swap_notify) (struct ttm_buffer_object *bo);
+ };
+ 
+ /**
+@@ -545,6 +540,15 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
+ extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+ 
+ /**
++ * ttm_tt_populate:
++ *
++ * @ttm: The struct ttm_tt to contain the backing pages.
++ *
++ * Add backing pages to all of @ttm
++ */
++extern int ttm_tt_populate(struct ttm_tt *ttm);
++
++/**
+  * ttm_ttm_destroy:
+  *
+  * @ttm: The struct ttm_tt.
+@@ -639,12 +643,12 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
+  * -EBUSY: No space available (only if no_wait == 1).
+  * -ENOMEM: Could not allocate memory for the buffer object, either due to
+  * fragmentation or concurrent allocators.
+- * -ERESTART: An interruptible sleep was interrupted by a signal.
++ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
+  */
+ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+-			    uint32_t proposed_placement,
+-			    struct ttm_mem_reg *mem,
+-			    bool interruptible, bool no_wait);
++				struct ttm_placement *placement,
++				struct ttm_mem_reg *mem,
++				bool interruptible, bool no_wait);
+ /**
+  * ttm_bo_wait_for_cpu
+  *
+@@ -654,7 +658,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+  * Wait until a buffer object is no longer sync'ed for CPU access.
+  * Returns:
+  * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
+- * -ERESTART: An interruptible sleep was interrupted by a signal.
++ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
+  */
+ 
+ extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
+@@ -758,7 +762,7 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+  * -EAGAIN: The reservation may cause a deadlock.
+  * Release all buffer reservations, wait for @bo to become unreserved and
+  * try again. (only if use_sequence == 1).
+- * -ERESTART: A wait for the buffer to become unreserved was interrupted by
++ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+  * a signal. Release all buffer reservations and return to user-space.
+  */
+ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
+@@ -799,7 +803,7 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
+  *
+  * Returns:
+  * -EBUSY: If no_wait == 1 and the buffer is already reserved.
+- * -ERESTART: If interruptible == 1 and the process received a signal
++ * -ERESTARTSYS: If interruptible == 1 and the process received a signal
+  * while sleeping.
+  */
+ extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
+diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
+new file mode 100644
+index 0000000..cd2c475
+--- /dev/null
++++ b/include/drm/ttm/ttm_execbuf_util.h
+@@ -0,0 +1,107 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
++ */
++
++#ifndef _TTM_EXECBUF_UTIL_H_
++#define _TTM_EXECBUF_UTIL_H_
++
++#include "ttm/ttm_bo_api.h"
++#include <linux/list.h>
++
++/**
++ * struct ttm_validate_buffer
++ *
++ * @head:           list head for thread-private list.
++ * @bo:             refcounted buffer object pointer.
++ * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
++ * adding a new sync object.
++ * @reservied:      Indicates whether @bo has been reserved for validation.
++ */
++
++struct ttm_validate_buffer {
++	struct list_head head;
++	struct ttm_buffer_object *bo;
++	void *new_sync_obj_arg;
++	bool reserved;
++};
++
++/**
++ * function ttm_eu_backoff_reservation
++ *
++ * @list:     thread private list of ttm_validate_buffer structs.
++ *
++ * Undoes all buffer validation reservations for bos pointed to by
++ * the list entries.
++ */
++
++extern void ttm_eu_backoff_reservation(struct list_head *list);
++
++/**
++ * function ttm_eu_reserve_buffers
++ *
++ * @list:    thread private list of ttm_validate_buffer structs.
++ * @val_seq: A unique sequence number.
++ *
++ * Tries to reserve bos pointed to by the list entries for validation.
++ * If the function returns 0, all buffers are marked as "unfenced",
++ * taken off the lru lists and are not synced for write CPU usage.
++ *
++ * If the function detects a deadlock due to multiple threads trying to
++ * reserve the same buffers in reverse order, all threads except one will
++ * back off and retry. This function may sleep while waiting for
++ * CPU write reservations to be cleared, and for other threads to
++ * unreserve their buffers.
++ *
++ * This function may return -ERESTART or -EAGAIN if the calling process
++ * receives a signal while waiting. In that case, no buffers on the list
++ * will be reserved upon return.
++ *
++ * Buffers reserved by this function should be unreserved by
++ * a call to either ttm_eu_backoff_reservation() or
++ * ttm_eu_fence_buffer_objects() when command submission is complete or
++ * has failed.
++ */
++
++extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
++
++/**
++ * function ttm_eu_fence_buffer_objects.
++ *
++ * @list:        thread private list of ttm_validate_buffer structs.
++ * @sync_obj:    The new sync object for the buffers.
++ *
++ * This function should be called when command submission is complete, and
++ * it will add a new sync object to bos pointed to by entries on @list.
++ * It also unreserves all buffers, putting them on lru lists.
++ *
++ */
++
++extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
++
++#endif
+diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h
+new file mode 100644
+index 0000000..81ba0b0
+--- /dev/null
++++ b/include/drm/ttm/ttm_lock.h
+@@ -0,0 +1,247 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
++ */
++
++/** @file ttm_lock.h
++ * This file implements a simple replacement for the buffer manager use
++ * of the DRM heavyweight hardware lock.
++ * The lock is a read-write lock. Taking it in read mode and write mode
++ * is relatively fast, and intended for in-kernel use only.
++ *
++ * The vt mode is used only when there is a need to block all
++ * user-space processes from validating buffers.
++ * It's allowed to leave kernel space with the vt lock held.
++ * If a user-space process dies while having the vt-lock,
++ * it will be released during the file descriptor release. The vt lock
++ * excludes write lock and read lock.
++ *
++ * The suspend mode is used to lock out all TTM users when preparing for
++ * and executing suspend operations.
++ *
++ */
++
++#ifndef _TTM_LOCK_H_
++#define _TTM_LOCK_H_
++
++#include "ttm/ttm_object.h"
++#include <linux/wait.h>
++#include <asm/atomic.h>
++
++/**
++ * struct ttm_lock
++ *
++ * @base: ttm base object used solely to release the lock if the client
++ * holding the lock dies.
++ * @queue: Queue for processes waiting for lock change-of-status.
++ * @lock: Spinlock protecting some lock members.
++ * @rw: Read-write lock counter. Protected by @lock.
++ * @flags: Lock state. Protected by @lock.
++ * @kill_takers: Boolean whether to kill takers of the lock.
++ * @signal: Signal to send when kill_takers is true.
++ */
++
++struct ttm_lock {
++	struct ttm_base_object base;
++	wait_queue_head_t queue;
++	spinlock_t lock;
++	int32_t rw;
++	uint32_t flags;
++	bool kill_takers;
++	int signal;
++	struct ttm_object_file *vt_holder;
++};
++
++
++/**
++ * ttm_lock_init
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * Initializes the lock.
++ */
++extern void ttm_lock_init(struct ttm_lock *lock);
++
++/**
++ * ttm_read_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ *
++ * Releases a read lock.
++ */
++extern void ttm_read_unlock(struct ttm_lock *lock);
++
++/**
++ * ttm_read_lock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ *
++ * Takes the lock in read mode.
++ * Returns:
++ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
++ */
++extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
++
++/**
++ * ttm_read_trylock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ *
++ * Tries to take the lock in read mode. If the lock is already held
++ * in write mode, the function will return -EBUSY. If the lock is held
++ * in vt or suspend mode, the function will sleep until these modes
++ * are unlocked.
++ *
++ * Returns:
++ * -EBUSY The lock was already held in write mode.
++ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
++ */
++extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
++
++/**
++ * ttm_write_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ *
++ * Releases a write lock.
++ */
++extern void ttm_write_unlock(struct ttm_lock *lock);
++
++/**
++ * ttm_write_lock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ *
++ * Takes the lock in write mode.
++ * Returns:
++ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
++ */
++extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
++
++/**
++ * ttm_lock_downgrade
++ *
++ * @lock: Pointer to a struct ttm_lock
++ *
++ * Downgrades a write lock to a read lock.
++ */
++extern void ttm_lock_downgrade(struct ttm_lock *lock);
++
++/**
++ * ttm_suspend_lock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ *
++ * Takes the lock in suspend mode. Excludes read and write mode.
++ */
++extern void ttm_suspend_lock(struct ttm_lock *lock);
++
++/**
++ * ttm_suspend_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ *
++ * Releases a suspend lock
++ */
++extern void ttm_suspend_unlock(struct ttm_lock *lock);
++
++/**
++ * ttm_vt_lock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ * @tfile: Pointer to a struct ttm_object_file to register the lock with.
++ *
++ * Takes the lock in vt mode.
++ * Returns:
++ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
++ * -ENOMEM: Out of memory when locking.
++ */
++extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
++		       struct ttm_object_file *tfile);
++
++/**
++ * ttm_vt_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ *
++ * Releases a vt lock.
++ * Returns:
++ * -EINVAL If the lock was not held.
++ */
++extern int ttm_vt_unlock(struct ttm_lock *lock);
++
++/**
++ * ttm_write_unlock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ *
++ * Releases a write lock.
++ */
++extern void ttm_write_unlock(struct ttm_lock *lock);
++
++/**
++ * ttm_write_lock
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @interruptible: Interruptible sleeping while waiting for a lock.
++ *
++ * Takes the lock in write mode.
++ * Returns:
++ * -ERESTARTSYS If interrupted by a signal and interruptible is true.
++ */
++extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
++
++/**
++ * ttm_lock_set_kill
++ *
++ * @lock: Pointer to a struct ttm_lock
++ * @val: Boolean whether to kill processes taking the lock.
++ * @signal: Signal to send to the process taking the lock.
++ *
++ * The kill-when-taking-lock functionality is used to kill processes that keep
++ * on using the TTM functionality when its resources has been taken down, for
++ * example when the X server exits. A typical sequence would look like this:
++ * - X server takes lock in write mode.
++ * - ttm_lock_set_kill() is called with @val set to true.
++ * - As part of X server exit, TTM resources are taken down.
++ * - X server releases the lock on file release.
++ * - Another dri client wants to render, takes the lock and is killed.
++ *
++ */
++static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
++				     int signal)
++{
++	lock->kill_takers = val;
++	if (val)
++		lock->signal = signal;
++}
++
++#endif
+diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
+new file mode 100644
+index 0000000..0d9db09
+--- /dev/null
++++ b/include/drm/ttm/ttm_object.h
+@@ -0,0 +1,271 @@
++/**************************************************************************
++ *
++ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++/*
++ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
++ */
++/** @file ttm_object.h
++ *
++ * Base- and reference object implementation for the various
++ * ttm objects. Implements reference counting, minimal security checks
++ * and release on file close.
++ */
++
++#ifndef _TTM_OBJECT_H_
++#define _TTM_OBJECT_H_
++
++#include <linux/list.h>
++#include "drm_hashtab.h"
++#include <linux/kref.h>
++#include <ttm/ttm_memory.h>
++
++/**
++ * enum ttm_ref_type
++ *
++ * Describes what type of reference a ref object holds.
++ *
++ * TTM_REF_USAGE is a simple refcount on a base object.
++ *
++ * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
++ * buffer object.
++ *
++ * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
++ * buffer object.
++ *
++ */
++
++enum ttm_ref_type {
++	TTM_REF_USAGE,
++	TTM_REF_SYNCCPU_READ,
++	TTM_REF_SYNCCPU_WRITE,
++	TTM_REF_NUM
++};
++
++/**
++ * enum ttm_object_type
++ *
++ * One entry per ttm object type.
++ * Device-specific types should use the
++ * ttm_driver_typex types.
++ */
++
++enum ttm_object_type {
++	ttm_fence_type,
++	ttm_buffer_type,
++	ttm_lock_type,
++	ttm_driver_type0 = 256,
++	ttm_driver_type1,
++	ttm_driver_type2,
++	ttm_driver_type3,
++	ttm_driver_type4,
++	ttm_driver_type5
++};
++
++struct ttm_object_file;
++struct ttm_object_device;
++
++/**
++ * struct ttm_base_object
++ *
++ * @hash: hash entry for the per-device object hash.
++ * @type: derived type this object is base class for.
++ * @shareable: Other ttm_object_files can access this object.
++ *
++ * @tfile: Pointer to ttm_object_file of the creator.
++ * NULL if the object was not created by a user request.
++ * (kernel object).
++ *
++ * @refcount: Number of references to this object, not
++ * including the hash entry. A reference to a base object can
++ * only be held by a ref object.
++ *
++ * @refcount_release: A function to be called when there are
++ * no more references to this object. This function should
++ * destroy the object (or make sure destruction eventually happens),
++ * and when it is called, the object has
++ * already been taken out of the per-device hash. The parameter
++ * "base" should be set to NULL by the function.
++ *
++ * @ref_obj_release: A function to be called when a reference object
++ * with another ttm_ref_type than TTM_REF_USAGE is deleted.
++ * this function may, for example, release a lock held by a user-space
++ * process.
++ *
++ * This struct is intended to be used as a base struct for objects that
++ * are visible to user-space. It provides a global name, race-safe
++ * access and refcounting, minimal access contol and hooks for unref actions.
++ */
++
++struct ttm_base_object {
++	struct drm_hash_item hash;
++	enum ttm_object_type object_type;
++	bool shareable;
++	struct ttm_object_file *tfile;
++	struct kref refcount;
++	void (*refcount_release) (struct ttm_base_object **base);
++	void (*ref_obj_release) (struct ttm_base_object *base,
++				 enum ttm_ref_type ref_type);
++};
++
++/**
++ * ttm_base_object_init
++ *
++ * @tfile: Pointer to a struct ttm_object_file.
++ * @base: The struct ttm_base_object to initialize.
++ * @shareable: This object is shareable with other applcations.
++ * (different @tfile pointers.)
++ * @type: The object type.
++ * @refcount_release: See the struct ttm_base_object description.
++ * @ref_obj_release: See the struct ttm_base_object description.
++ *
++ * Initializes a struct ttm_base_object.
++ */
++
++extern int ttm_base_object_init(struct ttm_object_file *tfile,
++				struct ttm_base_object *base,
++				bool shareable,
++				enum ttm_object_type type,
++				void (*refcount_release) (struct ttm_base_object
++							  **),
++				void (*ref_obj_release) (struct ttm_base_object
++							 *,
++							 enum ttm_ref_type
++							 ref_type));
++
++/**
++ * ttm_base_object_lookup
++ *
++ * @tfile: Pointer to a struct ttm_object_file.
++ * @key: Hash key
++ *
++ * Looks up a struct ttm_base_object with the key @key.
++ * Also verifies that the object is visible to the application, by
++ * comparing the @tfile argument and checking the object shareable flag.
++ */
++
++extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
++						      *tfile, uint32_t key);
++
++/**
++ * ttm_base_object_unref
++ *
++ * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
++ *
++ * Decrements the base object refcount and clears the pointer pointed to by
++ * p_base.
++ */
++
++extern void ttm_base_object_unref(struct ttm_base_object **p_base);
++
++/**
++ * ttm_ref_object_add.
++ *
++ * @tfile: A struct ttm_object_file representing the application owning the
++ * ref_object.
++ * @base: The base object to reference.
++ * @ref_type: The type of reference.
++ * @existed: Upon completion, indicates that an identical reference object
++ * already existed, and the refcount was upped on that object instead.
++ *
++ * Adding a ref object to a base object is basically like referencing the
++ * base object, but a user-space application holds the reference. When the
++ * file corresponding to @tfile is closed, all its reference objects are
++ * deleted. A reference object can have different types depending on what
++ * it's intended for. It can be refcounting to prevent object destruction,
++ * When user-space takes a lock, it can add a ref object to that lock to
++ * make sure the lock is released if the application dies. A ref object
++ * will hold a single reference on a base object.
++ */
++extern int ttm_ref_object_add(struct ttm_object_file *tfile,
++			      struct ttm_base_object *base,
++			      enum ttm_ref_type ref_type, bool *existed);
++/**
++ * ttm_ref_object_base_unref
++ *
++ * @key: Key representing the base object.
++ * @ref_type: Ref type of the ref object to be dereferenced.
++ *
++ * Unreference a ref object with type @ref_type
++ * on the base object identified by @key. If there are no duplicate
++ * references, the ref object will be destroyed and the base object
++ * will be unreferenced.
++ */
++extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
++				     unsigned long key,
++				     enum ttm_ref_type ref_type);
++
++/**
++ * ttm_object_file_init - initialize a struct ttm_object file
++ *
++ * @tdev: A struct ttm_object device this file is initialized on.
++ * @hash_order: Order of the hash table used to hold the reference objects.
++ *
++ * This is typically called by the file_ops::open function.
++ */
++
++extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
++						    *tdev,
++						    unsigned int hash_order);
++
++/**
++ * ttm_object_file_release - release data held by a ttm_object_file
++ *
++ * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
++ * *p_tfile will be set to NULL by this function.
++ *
++ * Releases all data associated by a ttm_object_file.
++ * Typically called from file_ops::release. The caller must
++ * ensure that there are no concurrent users of tfile.
++ */
++
++extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
++
++/**
++ * ttm_object device init - initialize a struct ttm_object_device
++ *
++ * @hash_order: Order of hash table used to hash the base objects.
++ *
++ * This function is typically called on device initialization to prepare
++ * data structures needed for ttm base and ref objects.
++ */
++
++extern struct ttm_object_device *ttm_object_device_init
++    (struct ttm_mem_global *mem_glob, unsigned int hash_order);
++
++/**
++ * ttm_object_device_release - release data held by a ttm_object_device
++ *
++ * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
++ * *p_tdev will be set to NULL by this function.
++ *
++ * Releases all data associated by a ttm_object_device.
++ * Typically called from driver::unload before the destruction of the
++ * device private data structure.
++ */
++
++extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
++
++#endif
+diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h
+index 170786e..fd11a5b 100644
+--- a/include/drm/via_drm.h
++++ b/include/drm/via_drm.h
+@@ -24,7 +24,7 @@
+ #ifndef _VIA_DRM_H_
+ #define _VIA_DRM_H_
+ 
+-#include <linux/types.h>
++#include "drm.h"
+ 
+ /* WARNING: These defines must be the same as what the Xserver uses.
+  * if you change them, you must change the defines in the Xserver.
+diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
+new file mode 100644
+index 0000000..c7645f4
+--- /dev/null
++++ b/include/drm/vmwgfx_drm.h
+@@ -0,0 +1,588 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++#ifndef __VMWGFX_DRM_H__
++#define __VMWGFX_DRM_H__
++
++#define DRM_VMW_MAX_SURFACE_FACES 6
++#define DRM_VMW_MAX_MIP_LEVELS 24
++
++#define DRM_VMW_EXT_NAME_LEN 128
++
++#define DRM_VMW_GET_PARAM            0
++#define DRM_VMW_ALLOC_DMABUF         1
++#define DRM_VMW_UNREF_DMABUF         2
++#define DRM_VMW_CURSOR_BYPASS        3
++/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
++#define DRM_VMW_CONTROL_STREAM       4
++#define DRM_VMW_CLAIM_STREAM         5
++#define DRM_VMW_UNREF_STREAM         6
++/* guarded by DRM_VMW_PARAM_3D == 1 */
++#define DRM_VMW_CREATE_CONTEXT       7
++#define DRM_VMW_UNREF_CONTEXT        8
++#define DRM_VMW_CREATE_SURFACE       9
++#define DRM_VMW_UNREF_SURFACE        10
++#define DRM_VMW_REF_SURFACE          11
++#define DRM_VMW_EXECBUF              12
++#define DRM_VMW_FIFO_DEBUG           13
++#define DRM_VMW_FENCE_WAIT           14
++
++
++/*************************************************************************/
++/**
++ * DRM_VMW_GET_PARAM - get device information.
++ *
++ * DRM_VMW_PARAM_FIFO_OFFSET:
++ * Offset to use to map the first page of the FIFO read-only.
++ * The fifo is mapped using the mmap() system call on the drm device.
++ *
++ * DRM_VMW_PARAM_OVERLAY_IOCTL:
++ * Does the driver support the overlay ioctl.
++ */
++
++#define DRM_VMW_PARAM_NUM_STREAMS      0
++#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
++#define DRM_VMW_PARAM_3D               2
++#define DRM_VMW_PARAM_FIFO_OFFSET      3
++#define DRM_VMW_PARAM_HW_CAPS          4
++#define DRM_VMW_PARAM_FIFO_CAPS        5
++
++/**
++ * struct drm_vmw_getparam_arg
++ *
++ * @value: Returned value. //Out
++ * @param: Parameter to query. //In.
++ *
++ * Argument to the DRM_VMW_GET_PARAM Ioctl.
++ */
++
++struct drm_vmw_getparam_arg {
++	uint64_t value;
++	uint32_t param;
++	uint32_t pad64;
++};
++
++/*************************************************************************/
++/**
++ * DRM_VMW_EXTENSION - Query device extensions.
++ */
++
++/**
++ * struct drm_vmw_extension_rep
++ *
++ * @exists: The queried extension exists.
++ * @driver_ioctl_offset: Ioctl number of the first ioctl in the extension.
++ * @driver_sarea_offset: Offset to any space in the DRI SAREA
++ * used by the extension.
++ * @major: Major version number of the extension.
++ * @minor: Minor version number of the extension.
++ * @pl: Patch level version number of the extension.
++ *
++ * Output argument to the DRM_VMW_EXTENSION Ioctl.
++ */
++
++struct drm_vmw_extension_rep {
++	int32_t exists;
++	uint32_t driver_ioctl_offset;
++	uint32_t driver_sarea_offset;
++	uint32_t major;
++	uint32_t minor;
++	uint32_t pl;
++	uint32_t pad64;
++};
++
++/**
++ * union drm_vmw_extension_arg
++ *
++ * @extension - Ascii name of the extension to be queried. //In
++ * @rep - Reply as defined above. //Out
++ *
++ * Argument to the DRM_VMW_EXTENSION Ioctl.
++ */
++
++union drm_vmw_extension_arg {
++	char extension[DRM_VMW_EXT_NAME_LEN];
++	struct drm_vmw_extension_rep rep;
++};
++
++/*************************************************************************/
++/**
++ * DRM_VMW_CREATE_CONTEXT - Create a host context.
++ *
++ * Allocates a device unique context id, and queues a create context command
++ * for the host. Does not wait for host completion.
++ */
++
++/**
++ * struct drm_vmw_context_arg
++ *
++ * @cid: Device unique context ID.
++ *
++ * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
++ * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
++ */
++
++struct drm_vmw_context_arg {
++	int32_t cid;
++	uint32_t pad64;
++};
++
++/*************************************************************************/
++/**
++ * DRM_VMW_UNREF_CONTEXT - Create a host context.
++ *
++ * Frees a global context id, and queues a destroy host command for the host.
++ * Does not wait for host completion. The context ID can be used directly
++ * in the command stream and shows up as the same context ID on the host.
++ */
++
++/*************************************************************************/
++/**
++ * DRM_VMW_CREATE_SURFACE - Create a host suface.
++ *
++ * Allocates a device unique surface id, and queues a create surface command
++ * for the host. Does not wait for host completion. The surface ID can be
++ * used directly in the command stream and shows up as the same surface
++ * ID on the host.
++ */
++
++/**
++ * struct drm_wmv_surface_create_req
++ *
++ * @flags: Surface flags as understood by the host.
++ * @format: Surface format as understood by the host.
++ * @mip_levels: Number of mip levels for each face.
++ * An unused face should have 0 encoded.
++ * @size_addr: Address of a user-space array of sruct drm_vmw_size
++ * cast to an uint64_t for 32-64 bit compatibility.
++ * The size of the array should equal the total number of mipmap levels.
++ * @shareable: Boolean whether other clients (as identified by file descriptors)
++ * may reference this surface.
++ * @scanout: Boolean whether the surface is intended to be used as a
++ * scanout.
++ *
++ * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
++ * Output data from the DRM_VMW_REF_SURFACE Ioctl.
++ */
++
++struct drm_vmw_surface_create_req {
++	uint32_t flags;
++	uint32_t format;
++	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
++	uint64_t size_addr;
++	int32_t shareable;
++	int32_t scanout;
++};
++
++/**
++ * struct drm_wmv_surface_arg
++ *
++ * @sid: Surface id of created surface or surface to destroy or reference.
++ *
++ * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
++ * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
++ * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
++ */
++
++struct drm_vmw_surface_arg {
++	int32_t sid;
++	uint32_t pad64;
++};
++
++/**
++ * struct drm_vmw_size ioctl.
++ *
++ * @width - mip level width
++ * @height - mip level height
++ * @depth - mip level depth
++ *
++ * Description of a mip level.
++ * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
++ */
++
++struct drm_vmw_size {
++	uint32_t width;
++	uint32_t height;
++	uint32_t depth;
++	uint32_t pad64;
++};
++
++/**
++ * union drm_vmw_surface_create_arg
++ *
++ * @rep: Output data as described above.
++ * @req: Input data as described above.
++ *
++ * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
++ */
++
++union drm_vmw_surface_create_arg {
++	struct drm_vmw_surface_arg rep;
++	struct drm_vmw_surface_create_req req;
++};
++
++/*************************************************************************/
++/**
++ * DRM_VMW_REF_SURFACE - Reference a host surface.
++ *
++ * Puts a reference on a host surface with a give sid, as previously
++ * returned by the DRM_VMW_CREATE_SURFACE ioctl.
++ * A reference will make sure the surface isn't destroyed while we hold
++ * it and will allow the calling client to use the surface ID in the command
++ * stream.
++ *
++ * On successful return, the Ioctl returns the surface information given
++ * in the DRM_VMW_CREATE_SURFACE ioctl.
++ */
++
++/**
++ * union drm_vmw_surface_reference_arg
++ *
++ * @rep: Output data as described above.
++ * @req: Input data as described above.
++ *
++ * Argument to the DRM_VMW_REF_SURFACE Ioctl.
++ */
++
++union drm_vmw_surface_reference_arg {
++	struct drm_vmw_surface_create_req rep;
++	struct drm_vmw_surface_arg req;
++};
++
++/*************************************************************************/
++/**
++ * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
++ *
++ * Clear a reference previously put on a host surface.
++ * When all references are gone, including the one implicitly placed
++ * on creation,
++ * a destroy surface command will be queued for the host.
++ * Does not wait for completion.
++ */
++
++/*************************************************************************/
++/**
++ * DRM_VMW_EXECBUF
++ *
++ * Submit a command buffer for execution on the host, and return a
++ * fence sequence that when signaled, indicates that the command buffer has
++ * executed.
++ */
++
++/**
++ * struct drm_vmw_execbuf_arg
++ *
++ * @commands: User-space address of a command buffer cast to an uint64_t.
++ * @command-size: Size in bytes of the command buffer.
++ * @throttle-us: Sleep until software is less than @throttle_us
++ * microseconds ahead of hardware. The driver may round this value
++ * to the nearest kernel tick.
++ * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
++ * uint64_t.
++ * @version: Allows expanding the execbuf ioctl parameters without breaking
++ * backwards compatibility, since user-space will always tell the kernel
++ * which version it uses.
++ * @flags: Execbuf flags. None currently.
++ *
++ * Argument to the DRM_VMW_EXECBUF Ioctl.
++ */
++
++#define DRM_VMW_EXECBUF_VERSION 0
++
++struct drm_vmw_execbuf_arg {
++	uint64_t commands;
++	uint32_t command_size;
++	uint32_t throttle_us;
++	uint64_t fence_rep;
++	 uint32_t version;
++	 uint32_t flags;
++};
++
++/**
++ * struct drm_vmw_fence_rep
++ *
++ * @fence_seq: Fence sequence associated with a command submission.
++ * @error: This member should've been set to -EFAULT on submission.
++ * The following actions should be take on completion:
++ * error == -EFAULT: Fence communication failed. The host is synchronized.
++ * Use the last fence id read from the FIFO fence register.
++ * error != 0 && error != -EFAULT:
++ * Fence submission failed. The host is synchronized. Use the fence_seq member.
++ * error == 0: All is OK, The host may not be synchronized.
++ * Use the fence_seq member.
++ *
++ * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
++ */
++
++struct drm_vmw_fence_rep {
++	uint64_t fence_seq;
++	int32_t error;
++	uint32_t pad64;
++};
++
++/*************************************************************************/
++/**
++ * DRM_VMW_ALLOC_DMABUF
++ *
++ * Allocate a DMA buffer that is visible also to the host.
++ * NOTE: The buffer is
++ * identified by a handle and an offset, which are private to the guest, but
++ * useable in the command stream. The guest kernel may translate these
++ * and patch up the command stream accordingly. In the future, the offset may
++ * be zero at all times, or it may disappear from the interface before it is
++ * fixed.
++ *
++ * The DMA buffer may stay user-space mapped in the guest at all times,
++ * and is thus suitable for sub-allocation.
++ *
++ * DMA buffers are mapped using the mmap() syscall on the drm device.
++ */
++
++/**
++ * struct drm_vmw_alloc_dmabuf_req
++ *
++ * @size: Required minimum size of the buffer.
++ *
++ * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
++ */
++
++struct drm_vmw_alloc_dmabuf_req {
++	uint32_t size;
++	uint32_t pad64;
++};
++
++/**
++ * struct drm_vmw_dmabuf_rep
++ *
++ * @map_handle: Offset to use in the mmap() call used to map the buffer.
++ * @handle: Handle unique to this buffer. Used for unreferencing.
++ * @cur_gmr_id: GMR id to use in the command stream when this buffer is
++ * referenced. See not above.
++ * @cur_gmr_offset: Offset to use in the command stream when this buffer is
++ * referenced. See note above.
++ *
++ * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
++ */
++
++struct drm_vmw_dmabuf_rep {
++	uint64_t map_handle;
++	uint32_t handle;
++	uint32_t cur_gmr_id;
++	uint32_t cur_gmr_offset;
++	uint32_t pad64;
++};
++
++/**
++ * union drm_vmw_dmabuf_arg
++ *
++ * @req: Input data as described above.
++ * @rep: Output data as described above.
++ *
++ * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
++ */
++
++union drm_vmw_alloc_dmabuf_arg {
++	struct drm_vmw_alloc_dmabuf_req req;
++	struct drm_vmw_dmabuf_rep rep;
++};
++
++/*************************************************************************/
++/**
++ * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
++ *
++ */
++
++/**
++ * struct drm_vmw_unref_dmabuf_arg
++ *
++ * @handle: Handle indicating what buffer to free. Obtained from the
++ * DRM_VMW_ALLOC_DMABUF Ioctl.
++ *
++ * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
++ */
++
++struct drm_vmw_unref_dmabuf_arg {
++	uint32_t handle;
++	uint32_t pad64;
++};
++
++/*************************************************************************/
++/**
++ * DRM_VMW_FIFO_DEBUG - Get last FIFO submission.
++ *
++ * This IOCTL copies the last FIFO submission directly out of the FIFO buffer.
++ */
++
++/**
++ * struct drm_vmw_fifo_debug_arg
++ *
++ * @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In
++ * @debug_buffer_size: Size in bytes of debug buffer //In
++ * @used_size: Number of bytes copied to the buffer // Out
++ * @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out
++ *
++ * Argument to the DRM_VMW_FIFO_DEBUG Ioctl.
++ */
++
++struct drm_vmw_fifo_debug_arg {
++	uint64_t debug_buffer;
++	uint32_t debug_buffer_size;
++	uint32_t used_size;
++	int32_t did_not_fit;
++	uint32_t pad64;
++};
++
++struct drm_vmw_fence_wait_arg {
++	uint64_t sequence;
++	uint64_t kernel_cookie;
++	int32_t cookie_valid;
++	int32_t pad64;
++};
++
++/*************************************************************************/
++/**
++ * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
++ *
++ * This IOCTL controls the overlay units of the svga device.
++ * The SVGA overlay units does not work like regular hardware units in
++ * that they do not automaticaly read back the contents of the given dma
++ * buffer. But instead only read back for each call to this ioctl, and
++ * at any point between this call being made and a following call that
++ * either changes the buffer or disables the stream.
++ */
++
++/**
++ * struct drm_vmw_rect
++ *
++ * Defines a rectangle. Used in the overlay ioctl to define
++ * source and destination rectangle.
++ */
++
++struct drm_vmw_rect {
++	int32_t x;
++	int32_t y;
++	uint32_t w;
++	uint32_t h;
++};
++
++/**
++ * struct drm_vmw_control_stream_arg
++ *
++ * @stream_id: Stearm to control
++ * @enabled: If false all following arguments are ignored.
++ * @handle: Handle to buffer for getting data from.
++ * @format: Format of the overlay as understood by the host.
++ * @width: Width of the overlay.
++ * @height: Height of the overlay.
++ * @size: Size of the overlay in bytes.
++ * @pitch: Array of pitches, the two last are only used for YUV12 formats.
++ * @offset: Offset from start of dma buffer to overlay.
++ * @src: Source rect, must be within the defined area above.
++ * @dst: Destination rect, x and y may be negative.
++ *
++ * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
++ */
++
++struct drm_vmw_control_stream_arg {
++	uint32_t stream_id;
++	uint32_t enabled;
++
++	uint32_t flags;
++	uint32_t color_key;
++
++	uint32_t handle;
++	uint32_t offset;
++	int32_t format;
++	uint32_t size;
++	uint32_t width;
++	uint32_t height;
++	uint32_t pitch[3];
++
++	uint32_t pad64;
++	struct drm_vmw_rect src;
++	struct drm_vmw_rect dst;
++};
++
++/*************************************************************************/
++/**
++ * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
++ *
++ */
++
++#define DRM_VMW_CURSOR_BYPASS_ALL    (1 << 0)
++#define DRM_VMW_CURSOR_BYPASS_FLAGS       (1)
++
++/**
++ * struct drm_vmw_cursor_bypass_arg
++ *
++ * @flags: Flags.
++ * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
++ * @xpos: X position of cursor.
++ * @ypos: Y position of cursor.
++ * @xhot: X hotspot.
++ * @yhot: Y hotspot.
++ *
++ * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
++ */
++
++struct drm_vmw_cursor_bypass_arg {
++	uint32_t flags;
++	uint32_t crtc_id;
++	int32_t xpos;
++	int32_t ypos;
++	int32_t xhot;
++	int32_t yhot;
++};
++
++/*************************************************************************/
++/**
++ * DRM_VMW_CLAIM_STREAM - Claim a single stream.
++ */
++
++/**
++ * struct drm_vmw_context_arg
++ *
++ * @stream_id: Device unique context ID.
++ *
++ * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
++ * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
++ */
++
++struct drm_vmw_stream_arg {
++	uint32_t stream_id;
++	uint32_t pad64;
++};
++
++/*************************************************************************/
++/**
++ * DRM_VMW_UNREF_STREAM - Unclaim a stream.
++ *
++ * Return a single stream that was claimed by this process. Also makes
++ * sure that the stream has been stopped.
++ */
++
++#endif

Added: dists/sid/linux-2.6/debian/patches/features/all/radeon-autoload-without-CONFIG_DRM_RADEON_KMS.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/sid/linux-2.6/debian/patches/features/all/radeon-autoload-without-CONFIG_DRM_RADEON_KMS.patch	Tue Mar  2 00:09:46 2010	(r15304)
@@ -0,0 +1,12 @@
+--- a/drivers/gpu/drm/radeon/radeon_drv.c
++++ b/drivers/gpu/drm/radeon/radeon_drv.c
+@@ -160,9 +160,7 @@
+ 	radeon_PCI_IDS
+ };
+ 
+-#if defined(CONFIG_DRM_RADEON_KMS)
+ MODULE_DEVICE_TABLE(pci, pciidlist);
+-#endif
+ 
+ static struct drm_driver driver_old = {
+ 	.driver_features =

Modified: dists/sid/linux-2.6/debian/patches/series/10
==============================================================================
--- dists/sid/linux-2.6/debian/patches/series/10	Tue Mar  2 00:05:07 2010	(r15303)
+++ dists/sid/linux-2.6/debian/patches/series/10	Tue Mar  2 00:09:46 2010	(r15304)
@@ -1,2 +1,10 @@
 + bugfix/all/tcp-fix-ICMP-RTO-war.patch
 + bugfix/sparc/stack-alignment.patch
+- bugfix/all/radeon_block_ability_userspace_app.patch
+- bugfix/all/drm-i915-give-up-on-8xx-lid-status.patch
+- bugfix/all/drm-i915-disable-powersave.patch
+- features/all/i915-autoload-without-CONFIG_DRM_I915_KMS.patch
++ features/all/drm-2.6.32.9-2.6.33.patch
++ debian/drm-staging-2.6.32.9-2.6.33.patch
++ debian/drm-restore-private-list_sort.patch
++ features/all/radeon-autoload-without-CONFIG_DRM_RADEON_KMS.patch



More information about the Kernel-svn-changes mailing list