Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6:
  ACPI: fix "acpi=ht" boot option
  ACPI, i915: blacklist Clevo M5x0N bad_lid state
  ACPI: fix High cpu temperature with 2.6.32
  ACPI: dock: properly initialize local struct dock_station in dock_add()
  ACPI: remove Asus P2B-DS from acpi=ht blacklist
  thinkpad-acpi: wrong thermal attribute_group removed in thermal_exit()
  ACPI: acpi_bus_{scan,bus,add}: return -ENODEV if no device was found
  ACPI: Add NULL pointer check in acpi_bus_start
  ACPI: processor: only evaluate _PDC once per processor
  ACPI: processor: add kernel command line support for early _PDC eval
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 21f61b8..cc29c0f 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -338,7 +338,8 @@
 	}
 
 	mpic = mpic_alloc(np, r.start,
-			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+			MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+			MPIC_BROKEN_FRR_NIRQS,
 			0, 256, " OpenPIC  ");
 	BUG_ON(mpic == NULL);
 	of_node_put(np);
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 04160a4..a15f582 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -46,6 +46,7 @@
 	__iomem u32 *bptr_vaddr;
 	struct device_node *np;
 	int n = 0;
+	int ioremappable;
 
 	WARN_ON (nr < 0 || nr >= NR_CPUS);
 
@@ -59,21 +60,37 @@
 		return;
 	}
 
+	/*
+	 * A secondary core could be in a spinloop in the bootpage
+	 * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
+	 * The bootpage and highmem can be accessed via ioremap(), but
+	 * we need to directly access the spinloop if its in lowmem.
+	 */
+	ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
+
 	/* Map the spin table */
-	bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+	if (ioremappable)
+		bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+	else
+		bptr_vaddr = phys_to_virt(*cpu_rel_addr);
 
 	local_irq_save(flags);
 
 	out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
 	out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
 
+	if (!ioremappable)
+		flush_dcache_range((ulong)bptr_vaddr,
+				(ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
+
 	/* Wait a bit for the CPU to ack. */
 	while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
 		mdelay(1);
 
 	local_irq_restore(flags);
 
-	iounmap(bptr_vaddr);
+	if (ioremappable)
+		iounmap(bptr_vaddr);
 
 	pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
 }
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f665b05..ab6c973 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -598,6 +598,50 @@
 	return mode;
 }
 
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded.  Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size.  Technically we
+ * should be checking refresh rate too.  Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+			    struct detailed_pixel_timing *pt)
+{
+	int i;
+	static const struct {
+		int w, h;
+	} cea_interlaced[] = {
+		{ 1920, 1080 },
+		{  720,  480 },
+		{ 1440,  480 },
+		{ 2880,  480 },
+		{  720,  576 },
+		{ 1440,  576 },
+		{ 2880,  576 },
+	};
+	static const int n_sizes =
+		sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
+
+	if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+		return;
+
+	for (i = 0; i < n_sizes; i++) {
+		if ((mode->hdisplay == cea_interlaced[i].w) &&
+		    (mode->vdisplay == cea_interlaced[i].h / 2)) {
+			mode->vdisplay *= 2;
+			mode->vsync_start *= 2;
+			mode->vsync_end *= 2;
+			mode->vtotal *= 2;
+			mode->vtotal |= 1;
+		}
+	}
+
+	mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
 /**
  * drm_mode_detailed - create a new mode from an EDID detailed timing section
  * @dev: DRM device (needed to create new mode)
@@ -680,8 +724,7 @@
 
 	drm_mode_set_name(mode);
 
-	if (pt->misc & DRM_EDID_PT_INTERLACED)
-		mode->flags |= DRM_MODE_FLAG_INTERLACE;
+	drm_mode_do_interlace_quirk(mode, pt);
 
 	if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
 		pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 2cd0fad..0e9cd1d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -5861,13 +5861,12 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nvbios *bios = &dev_priv->VBIOS;
 	struct init_exec iexec = { true, false };
-	unsigned long flags;
 
-	spin_lock_irqsave(&bios->lock, flags);
+	mutex_lock(&bios->lock);
 	bios->display.output = dcbent;
 	parse_init_table(bios, table, &iexec);
 	bios->display.output = NULL;
-	spin_unlock_irqrestore(&bios->lock, flags);
+	mutex_unlock(&bios->lock);
 }
 
 static bool NVInitVBIOS(struct drm_device *dev)
@@ -5876,7 +5875,7 @@
 	struct nvbios *bios = &dev_priv->VBIOS;
 
 	memset(bios, 0, sizeof(struct nvbios));
-	spin_lock_init(&bios->lock);
+	mutex_init(&bios->lock);
 	bios->dev = dev;
 
 	if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 68446fd..fd94bd6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,7 +205,7 @@
 	struct drm_device *dev;
 	struct nouveau_bios_info pub;
 
-	spinlock_t lock;
+	struct mutex lock;
 
 	uint8_t data[NV_PROM_SIZE];
 	unsigned int length;
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 58b917c..21ac6e4 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -579,6 +579,8 @@
 				nouveau_encoder(encoder)->restore.output);
 
 	nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
+
+	nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
 }
 
 static int nv17_tv_create_resources(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 2a3df55..7f152f6 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -643,7 +643,7 @@
 	uint8_t count = U8((*ptr)++);
 	SDEBUG("   count: %d\n", count);
 	if (arg == ATOM_UNIT_MICROSEC)
-		schedule_timeout_uninterruptible(usecs_to_jiffies(count));
+		udelay(count);
 	else
 		schedule_timeout_uninterruptible(msecs_to_jiffies(count));
 }
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index af1c3ca..446b765 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -543,9 +543,6 @@
 void r600_vb_ib_put(struct radeon_device *rdev)
 {
 	radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
-	mutex_lock(&rdev->ib_pool.mutex);
-	list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
-	mutex_unlock(&rdev->ib_pool.mutex);
 	radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index f57480b..c0356bb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -96,6 +96,7 @@
  * symbol;
  */
 #define RADEON_MAX_USEC_TIMEOUT		100000	/* 100 ms */
+/* RADEON_IB_POOL_SIZE must be a power of 2 */
 #define RADEON_IB_POOL_SIZE		16
 #define RADEON_DEBUGFS_MAX_NUM_FILES	32
 #define RADEONFB_CONN_LIMIT		4
@@ -363,11 +364,12 @@
  */
 struct radeon_ib {
 	struct list_head	list;
-	unsigned long		idx;
+	unsigned		idx;
 	uint64_t		gpu_addr;
 	struct radeon_fence	*fence;
-	uint32_t	*ptr;
+	uint32_t		*ptr;
 	uint32_t		length_dw;
+	bool			free;
 };
 
 /*
@@ -377,10 +379,9 @@
 struct radeon_ib_pool {
 	struct mutex		mutex;
 	struct radeon_bo	*robj;
-	struct list_head	scheduled_ibs;
 	struct radeon_ib	ibs[RADEON_IB_POOL_SIZE];
 	bool			ready;
-	DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
+	unsigned		head_id;
 };
 
 struct radeon_cp {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 1190148..e9d0850 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -86,7 +86,7 @@
 						&p->validated);
 		}
 	}
-	return radeon_bo_list_validate(&p->validated, p->ib->fence);
+	return radeon_bo_list_validate(&p->validated);
 }
 
 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@
 {
 	unsigned i;
 
-	if (error && parser->ib) {
-		radeon_bo_list_unvalidate(&parser->validated,
-						parser->ib->fence);
-	} else {
-		radeon_bo_list_unreserve(&parser->validated);
+	if (!error && parser->ib) {
+		radeon_bo_list_fence(&parser->validated, parser->ib->fence);
 	}
+	radeon_bo_list_unreserve(&parser->validated);
 	for (i = 0; i < parser->nrelocs; i++) {
 		if (parser->relocs[i].gobj) {
 			mutex_lock(&parser->rdev->ddev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d72a71b..f1da370 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -306,11 +306,10 @@
 	}
 }
 
-int radeon_bo_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head)
 {
 	struct radeon_bo_list *lobj;
 	struct radeon_bo *bo;
-	struct radeon_fence *old_fence = NULL;
 	int r;
 
 	r = radeon_bo_list_reserve(head);
@@ -334,32 +333,27 @@
 		}
 		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
 		lobj->tiling_flags = bo->tiling_flags;
-		if (fence) {
-			old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
-			bo->tbo.sync_obj = radeon_fence_ref(fence);
-			bo->tbo.sync_obj_arg = NULL;
-		}
-		if (old_fence) {
-			radeon_fence_unref(&old_fence);
-		}
 	}
 	return 0;
 }
 
-void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
+void radeon_bo_list_fence(struct list_head *head, void *fence)
 {
 	struct radeon_bo_list *lobj;
-	struct radeon_fence *old_fence;
+	struct radeon_bo *bo;
+	struct radeon_fence *old_fence = NULL;
 
-	if (fence)
-		list_for_each_entry(lobj, head, list) {
-			old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
-			if (old_fence == fence) {
-				lobj->bo->tbo.sync_obj = NULL;
-				radeon_fence_unref(&old_fence);
-			}
+	list_for_each_entry(lobj, head, list) {
+		bo = lobj->bo;
+		spin_lock(&bo->tbo.lock);
+		old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+		bo->tbo.sync_obj = radeon_fence_ref(fence);
+		bo->tbo.sync_obj_arg = NULL;
+		spin_unlock(&bo->tbo.lock);
+		if (old_fence) {
+			radeon_fence_unref(&old_fence);
 		}
-	radeon_bo_list_unreserve(head);
+	}
 }
 
 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index a02f180..7ab43de 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -156,8 +156,8 @@
 				struct list_head *head);
 extern int radeon_bo_list_reserve(struct list_head *head);
 extern void radeon_bo_list_unreserve(struct list_head *head);
-extern int radeon_bo_list_validate(struct list_head *head, void *fence);
-extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_list_validate(struct list_head *head);
+extern void radeon_bo_list_fence(struct list_head *head, void *fence);
 extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
 				struct vm_area_struct *vma);
 extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 4d12b2d..694799f 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -41,68 +41,55 @@
 {
 	struct radeon_fence *fence;
 	struct radeon_ib *nib;
-	unsigned long i;
-	int r = 0;
+	int r = 0, i, c;
 
 	*ib = NULL;
 	r = radeon_fence_create(rdev, &fence);
 	if (r) {
-		DRM_ERROR("failed to create fence for new IB\n");
+		dev_err(rdev->dev, "failed to create fence for new IB\n");
 		return r;
 	}
 	mutex_lock(&rdev->ib_pool.mutex);
-	i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
-	if (i < RADEON_IB_POOL_SIZE) {
-		set_bit(i, rdev->ib_pool.alloc_bm);
-		rdev->ib_pool.ibs[i].length_dw = 0;
-		*ib = &rdev->ib_pool.ibs[i];
-		mutex_unlock(&rdev->ib_pool.mutex);
-		goto out;
+	for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
+		i &= (RADEON_IB_POOL_SIZE - 1);
+		if (rdev->ib_pool.ibs[i].free) {
+			nib = &rdev->ib_pool.ibs[i];
+			break;
+		}
 	}
-	if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
-		/* we go do nothings here */
+	if (nib == NULL) {
+		/* This should never happen, it means we allocated all
+		 * IB and haven't scheduled one yet, return EBUSY to
+		 * userspace hoping that on ioctl recall we get better
+		 * luck
+		 */
+		dev_err(rdev->dev, "no free indirect buffer !\n");
 		mutex_unlock(&rdev->ib_pool.mutex);
-		DRM_ERROR("all IB allocated none scheduled.\n");
-		r = -EINVAL;
-		goto out;
+		radeon_fence_unref(&fence);
+		return -EBUSY;
 	}
-	/* get the first ib on the scheduled list */
-	nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
-			 struct radeon_ib, list);
-	if (nib->fence == NULL) {
-		/* we go do nothings here */
+	rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+	nib->free = false;
+	if (nib->fence) {
 		mutex_unlock(&rdev->ib_pool.mutex);
-		DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
-		r = -EINVAL;
-		goto out;
-	}
-	mutex_unlock(&rdev->ib_pool.mutex);
-
-	r = radeon_fence_wait(nib->fence, false);
-	if (r) {
-		DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
-			  (unsigned long)nib->gpu_addr, nib->length_dw);
-		DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
-		goto out;
+		r = radeon_fence_wait(nib->fence, false);
+		if (r) {
+			dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
+				nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
+			mutex_lock(&rdev->ib_pool.mutex);
+			nib->free = true;
+			mutex_unlock(&rdev->ib_pool.mutex);
+			radeon_fence_unref(&fence);
+			return r;
+		}
+		mutex_lock(&rdev->ib_pool.mutex);
 	}
 	radeon_fence_unref(&nib->fence);
-
+	nib->fence = fence;
 	nib->length_dw = 0;
-
-	/* scheduled list is accessed here */
-	mutex_lock(&rdev->ib_pool.mutex);
-	list_del(&nib->list);
-	INIT_LIST_HEAD(&nib->list);
 	mutex_unlock(&rdev->ib_pool.mutex);
-
 	*ib = nib;
-out:
-	if (r) {
-		radeon_fence_unref(&fence);
-	} else {
-		(*ib)->fence = fence;
-	}
-	return r;
+	return 0;
 }
 
 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -114,18 +101,7 @@
 		return;
 	}
 	mutex_lock(&rdev->ib_pool.mutex);
-	if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
-		/* IB is scheduled & not signaled don't do anythings */
-		mutex_unlock(&rdev->ib_pool.mutex);
-		return;
-	}
-	list_del(&tmp->list);
-	INIT_LIST_HEAD(&tmp->list);
-	if (tmp->fence)
-		radeon_fence_unref(&tmp->fence);
-
-	tmp->length_dw = 0;
-	clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
+	tmp->free = true;
 	mutex_unlock(&rdev->ib_pool.mutex);
 }
 
@@ -135,7 +111,7 @@
 
 	if (!ib->length_dw || !rdev->cp.ready) {
 		/* TODO: Nothings in the ib we should report. */
-		DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
+		DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
 		return -EINVAL;
 	}
 
@@ -148,7 +124,8 @@
 	radeon_ring_ib_execute(rdev, ib);
 	radeon_fence_emit(rdev, ib->fence);
 	mutex_lock(&rdev->ib_pool.mutex);
-	list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
+	/* once scheduled IB is considered free and protected by the fence */
+	ib->free = true;
 	mutex_unlock(&rdev->ib_pool.mutex);
 	radeon_ring_unlock_commit(rdev);
 	return 0;
@@ -164,7 +141,6 @@
 	if (rdev->ib_pool.robj)
 		return 0;
 	/* Allocate 1M object buffer */
-	INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
 	r = radeon_bo_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
 				true, RADEON_GEM_DOMAIN_GTT,
 				&rdev->ib_pool.robj);
@@ -195,9 +171,9 @@
 		rdev->ib_pool.ibs[i].ptr = ptr + offset;
 		rdev->ib_pool.ibs[i].idx = i;
 		rdev->ib_pool.ibs[i].length_dw = 0;
-		INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
+		rdev->ib_pool.ibs[i].free = true;
 	}
-	bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+	rdev->ib_pool.head_id = 0;
 	rdev->ib_pool.ready = true;
 	DRM_INFO("radeon: ib pool ready.\n");
 	if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +190,6 @@
 		return;
 	}
 	mutex_lock(&rdev->ib_pool.mutex);
-	bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
 	if (rdev->ib_pool.robj) {
 		r = radeon_bo_reserve(rdev->ib_pool.robj, false);
 		if (likely(r == 0)) {
@@ -363,7 +338,7 @@
 	if (ib == NULL) {
 		return 0;
 	}
-	seq_printf(m, "IB %04lu\n", ib->idx);
+	seq_printf(m, "IB %04u\n", ib->idx);
 	seq_printf(m, "IB fence %p\n", ib->fence);
 	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
 	for (i = 0; i < ib->length_dw; i++) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index a6e8f68..0c9c081 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -348,22 +348,19 @@
 		 */
 
 		DRM_INFO("It appears like vesafb is loaded. "
-			 "Ignore above error if any. Entering stealth mode.\n");
+			 "Ignore above error if any.\n");
 		ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
 		if (unlikely(ret != 0)) {
 			DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
 			goto out_no_device;
 		}
-		vmw_kms_init(dev_priv);
-		vmw_overlay_init(dev_priv);
-	} else {
-		ret = vmw_request_device(dev_priv);
-		if (unlikely(ret != 0))
-			goto out_no_device;
-		vmw_kms_init(dev_priv);
-		vmw_overlay_init(dev_priv);
-		vmw_fb_init(dev_priv);
 	}
+	ret = vmw_request_device(dev_priv);
+	if (unlikely(ret != 0))
+		goto out_no_device;
+	vmw_kms_init(dev_priv);
+	vmw_overlay_init(dev_priv);
+	vmw_fb_init(dev_priv);
 
 	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
 	register_pm_notifier(&dev_priv->pm_nb);
@@ -406,17 +403,15 @@
 
 	unregister_pm_notifier(&dev_priv->pm_nb);
 
-	if (!dev_priv->stealth) {
-		vmw_fb_close(dev_priv);
-		vmw_kms_close(dev_priv);
-		vmw_overlay_close(dev_priv);
-		vmw_release_device(dev_priv);
-		pci_release_regions(dev->pdev);
-	} else {
-		vmw_kms_close(dev_priv);
-		vmw_overlay_close(dev_priv);
+	vmw_fb_close(dev_priv);
+	vmw_kms_close(dev_priv);
+	vmw_overlay_close(dev_priv);
+	vmw_release_device(dev_priv);
+	if (dev_priv->stealth)
 		pci_release_region(dev->pdev, 2);
-	}
+	else
+		pci_release_regions(dev->pdev);
+
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 		drm_irq_uninstall(dev_priv->dev);
 	if (dev->devname == vmw_devname)
@@ -585,11 +580,6 @@
 	int ret = 0;
 
 	DRM_INFO("Master set.\n");
-	if (dev_priv->stealth) {
-		ret = vmw_request_device(dev_priv);
-		if (unlikely(ret != 0))
-			return ret;
-	}
 
 	if (active) {
 		BUG_ON(active != &dev_priv->fbdev_master);
@@ -649,18 +639,11 @@
 
 	ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
 
-	if (dev_priv->stealth) {
-		ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
-		if (unlikely(ret != 0))
-			DRM_ERROR("Unable to clean VRAM on master drop.\n");
-		vmw_release_device(dev_priv);
-	}
 	dev_priv->active_master = &dev_priv->fbdev_master;
 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
 	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
 
-	if (!dev_priv->stealth)
-		vmw_fb_on(dev_priv);
+	vmw_fb_on(dev_priv);
 }
 
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 4f4f643..a933670 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,6 +559,9 @@
 	info->pixmap.scan_align = 1;
 #endif
 
+	info->aperture_base = vmw_priv->vram_start;
+	info->aperture_size = vmw_priv->vram_size;
+
 	/*
 	 * Dirty & Deferred IO
 	 */