drm/ttm: restructure to allow driver to plug in alternate memory manager

Nouveau will need this on GeForce 8 and up to account for the GPU
reordering physical VRAM for some memory types.

Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Acked-by: Thomas Hellström <thellstrom@vmware.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b256d4a..f3cf6f0 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,7 @@
 ccflags-y := -Iinclude/drm
 ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
 	ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
-	ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
+	ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
+	ttm_bo_manager.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 4bf69c4..f999e36 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -74,6 +74,7 @@
 {
 	struct ttm_agp_backend *agp_be =
 	    container_of(backend, struct ttm_agp_backend, backend);
+	struct drm_mm_node *node = bo_mem->mm_node;
 	struct agp_memory *mem = agp_be->mem;
 	int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
 	int ret;
@@ -81,7 +82,7 @@
 	mem->is_flushed = 1;
 	mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
 
-	ret = agp_bind_memory(mem, bo_mem->mm_node->start);
+	ret = agp_bind_memory(mem, node->start);
 	if (ret)
 		printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 80d37b4..af7b57a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -84,11 +84,8 @@
 		man->available_caching);
 	printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
 		man->default_caching);
-	if (mem_type != TTM_PL_SYSTEM) {
-		spin_lock(&bdev->glob->lru_lock);
-		drm_mm_debug_table(&man->manager, TTM_PFX);
-		spin_unlock(&bdev->glob->lru_lock);
-	}
+	if (mem_type != TTM_PL_SYSTEM)
+		(*man->func->debug)(man, TTM_PFX);
 }
 
 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
@@ -421,7 +418,7 @@
 
 	if (bo->mem.mm_node) {
 		spin_lock(&bo->lock);
-		bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
+		bo->offset = (bo->mem.start << PAGE_SHIFT) +
 		    bdev->man[bo->mem.mem_type].gpu_offset;
 		bo->cur_placement = bo->mem.placement;
 		spin_unlock(&bo->lock);
@@ -724,52 +721,12 @@
 	return ret;
 }
 
-static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
-				struct ttm_mem_type_manager *man,
-				struct ttm_placement *placement,
-				struct ttm_mem_reg *mem,
-				struct drm_mm_node **node)
-{
-	struct ttm_bo_global *glob = bo->glob;
-	unsigned long lpfn;
-	int ret;
-
-	lpfn = placement->lpfn;
-	if (!lpfn)
-		lpfn = man->size;
-	*node = NULL;
-	do {
-		ret = drm_mm_pre_get(&man->manager);
-		if (unlikely(ret))
-			return ret;
-
-		spin_lock(&glob->lru_lock);
-		*node = drm_mm_search_free_in_range(&man->manager,
-					mem->num_pages, mem->page_alignment,
-					placement->fpfn, lpfn, 1);
-		if (unlikely(*node == NULL)) {
-			spin_unlock(&glob->lru_lock);
-			return 0;
-		}
-		*node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
-							mem->page_alignment,
-							placement->fpfn,
-							lpfn);
-		spin_unlock(&glob->lru_lock);
-	} while (*node == NULL);
-	return 0;
-}
-
 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
 {
-	struct ttm_bo_global *glob = bo->glob;
+	struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
 
-	if (mem->mm_node) {
-		spin_lock(&glob->lru_lock);
-		drm_mm_put_block(mem->mm_node);
-		spin_unlock(&glob->lru_lock);
-		mem->mm_node = NULL;
-	}
+	if (mem->mm_node)
+		(*man->func->put_node)(man, mem);
 }
 EXPORT_SYMBOL(ttm_bo_mem_put);
 
@@ -788,14 +745,13 @@
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_global *glob = bdev->glob;
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-	struct drm_mm_node *node;
 	int ret;
 
 	do {
-		ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
+		ret = (*man->func->get_node)(man, bo, placement, mem);
 		if (unlikely(ret != 0))
 			return ret;
-		if (node)
+		if (mem->mm_node)
 			break;
 		spin_lock(&glob->lru_lock);
 		if (list_empty(&man->lru)) {
@@ -808,9 +764,8 @@
 		if (unlikely(ret != 0))
 			return ret;
 	} while (1);
-	if (node == NULL)
+	if (mem->mm_node == NULL)
 		return -ENOMEM;
-	mem->mm_node = node;
 	mem->mem_type = mem_type;
 	return 0;
 }
@@ -884,7 +839,6 @@
 	bool type_found = false;
 	bool type_ok = false;
 	bool has_erestartsys = false;
-	struct drm_mm_node *node = NULL;
 	int i, ret;
 
 	mem->mm_node = NULL;
@@ -918,17 +872,15 @@
 
 		if (man->has_type && man->use_type) {
 			type_found = true;
-			ret = ttm_bo_man_get_node(bo, man, placement, mem,
-							&node);
+			ret = (*man->func->get_node)(man, bo, placement, mem);
 			if (unlikely(ret))
 				return ret;
 		}
-		if (node)
+		if (mem->mm_node)
 			break;
 	}
 
-	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
-		mem->mm_node = node;
+	if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
 		mem->mem_type = mem_type;
 		mem->placement = cur_flags;
 		return 0;
@@ -998,7 +950,6 @@
 			bool interruptible, bool no_wait_reserve,
 			bool no_wait_gpu)
 {
-	struct ttm_bo_global *glob = bo->glob;
 	int ret = 0;
 	struct ttm_mem_reg mem;
 
@@ -1026,11 +977,8 @@
 		goto out_unlock;
 	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
 out_unlock:
-	if (ret && mem.mm_node) {
-		spin_lock(&glob->lru_lock);
-		drm_mm_put_block(mem.mm_node);
-		spin_unlock(&glob->lru_lock);
-	}
+	if (ret && mem.mm_node)
+		ttm_bo_mem_put(bo, &mem);
 	return ret;
 }
 
@@ -1038,11 +986,10 @@
 			     struct ttm_mem_reg *mem)
 {
 	int i;
-	struct drm_mm_node *node = mem->mm_node;
 
-	if (node && placement->lpfn != 0 &&
-	    (node->start < placement->fpfn ||
-	     node->start + node->size > placement->lpfn))
+	if (mem->mm_node && placement->lpfn != 0 &&
+	    (mem->start < placement->fpfn ||
+	     mem->start + mem->num_pages > placement->lpfn))
 		return -1;
 
 	for (i = 0; i < placement->num_placement; i++) {
@@ -1286,7 +1233,6 @@
 
 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 {
-	struct ttm_bo_global *glob = bdev->glob;
 	struct ttm_mem_type_manager *man;
 	int ret = -EINVAL;
 
@@ -1309,13 +1255,7 @@
 	if (mem_type > 0) {
 		ttm_bo_force_list_clean(bdev, mem_type, false);
 
-		spin_lock(&glob->lru_lock);
-		if (drm_mm_clean(&man->manager))
-			drm_mm_takedown(&man->manager);
-		else
-			ret = -EBUSY;
-
-		spin_unlock(&glob->lru_lock);
+		ret = (*man->func->takedown)(man);
 	}
 
 	return ret;
@@ -1366,6 +1306,7 @@
 	ret = bdev->driver->init_mem_type(bdev, type, man);
 	if (ret)
 		return ret;
+	man->bdev = bdev;
 
 	ret = 0;
 	if (type != TTM_PL_SYSTEM) {
@@ -1375,7 +1316,8 @@
 			       type);
 			return ret;
 		}
-		ret = drm_mm_init(&man->manager, 0, p_size);
+
+		ret = (*man->func->init)(man, p_size);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
new file mode 100644
index 0000000..7410c19
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -0,0 +1,148 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/module.h>
+
+static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
+			       struct ttm_buffer_object *bo,
+			       struct ttm_placement *placement,
+			       struct ttm_mem_reg *mem)
+{
+	struct ttm_bo_global *glob = man->bdev->glob;
+	struct drm_mm *mm = man->priv;
+	struct drm_mm_node *node = NULL;
+	unsigned long lpfn;
+	int ret;
+
+	lpfn = placement->lpfn;
+	if (!lpfn)
+		lpfn = man->size;
+	do {
+		ret = drm_mm_pre_get(mm);
+		if (unlikely(ret))
+			return ret;
+
+		spin_lock(&glob->lru_lock);
+		node = drm_mm_search_free_in_range(mm,
+					mem->num_pages, mem->page_alignment,
+					placement->fpfn, lpfn, 1);
+		if (unlikely(node == NULL)) {
+			spin_unlock(&glob->lru_lock);
+			return 0;
+		}
+		node = drm_mm_get_block_atomic_range(node, mem->num_pages,
+							mem->page_alignment,
+							placement->fpfn,
+							lpfn);
+		spin_unlock(&glob->lru_lock);
+	} while (node == NULL);
+
+	mem->mm_node = node;
+	mem->start = node->start;
+	return 0;
+}
+
+static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
+				struct ttm_mem_reg *mem)
+{
+	struct ttm_bo_global *glob = man->bdev->glob;
+
+	if (mem->mm_node) {
+		spin_lock(&glob->lru_lock);
+		drm_mm_put_block(mem->mm_node);
+		spin_unlock(&glob->lru_lock);
+		mem->mm_node = NULL;
+	}
+}
+
+static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
+			   unsigned long p_size)
+{
+	struct drm_mm *mm;
+	int ret;
+
+	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+	if (!mm)
+		return -ENOMEM;
+
+	ret = drm_mm_init(mm, 0, p_size);
+	if (ret) {
+		kfree(mm);
+		return ret;
+	}
+
+	man->priv = mm;
+	return 0;
+}
+
+static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
+{
+	struct ttm_bo_global *glob = man->bdev->glob;
+	struct drm_mm *mm = man->priv;
+	int ret = 0;
+
+	spin_lock(&glob->lru_lock);
+	if (drm_mm_clean(mm)) {
+		drm_mm_takedown(mm);
+		kfree(mm);
+		man->priv = NULL;
+	} else
+		ret = -EBUSY;
+	spin_unlock(&glob->lru_lock);
+	return ret;
+}
+
+static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
+			     const char *prefix)
+{
+	struct ttm_bo_global *glob = man->bdev->glob;
+	struct drm_mm *mm = man->priv;
+
+	spin_lock(&glob->lru_lock);
+	drm_mm_debug_table(mm, prefix);
+	spin_unlock(&glob->lru_lock);
+}
+
+const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
+	ttm_bo_man_init,
+	ttm_bo_man_takedown,
+	ttm_bo_man_get_node,
+	ttm_bo_man_put_node,
+	ttm_bo_man_debug
+};
+EXPORT_SYMBOL(ttm_bo_manager_func);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 0ebfe0d..c9d2d4d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -256,8 +256,7 @@
 	dir = 1;
 
 	if ((old_mem->mem_type == new_mem->mem_type) &&
-	    (new_mem->mm_node->start <
-	     old_mem->mm_node->start + old_mem->mm_node->size)) {
+	    (new_mem->start < old_mem->start + old_mem->size)) {
 		dir = -1;
 		add = new_mem->num_pages - 1;
 	}