drm/vkms: Add functions to map/unmap GEM backing storage

This patch add the necessary functions to map/unmap GEM
backing memory to the kernel's virtual address space.

Signed-off-by: Haneen Mohammed <hamohammed.sa@gmail.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Link: https://patchwork.freedesktop.org/patch/msgid/4b6563ae4f4337a5fd51f872424addf64e8d59a6.1532446182.git.hamohammed.sa@gmail.com
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index c7e3836..a52e883 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -37,7 +37,9 @@ void vkms_gem_free_object(struct drm_gem_object *obj)
 	struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
 						   gem);
 
-	kvfree(gem->pages);
+	WARN_ON(gem->pages);
+	WARN_ON(gem->vaddr);
+
 	mutex_destroy(&gem->pages_lock);
 	drm_gem_object_release(obj);
 	kfree(gem);
@@ -177,3 +179,78 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
 
 	return ret;
 }
+
+static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
+{
+	struct drm_gem_object *gem_obj = &vkms_obj->gem;
+
+	if (!vkms_obj->pages) {
+		struct page **pages = drm_gem_get_pages(gem_obj);
+
+		if (IS_ERR(pages))
+			return pages;
+
+		if (cmpxchg(&vkms_obj->pages, NULL, pages))
+			drm_gem_put_pages(gem_obj, pages, false, true);
+	}
+
+	return vkms_obj->pages;
+}
+
+void vkms_gem_vunmap(struct drm_gem_object *obj)
+{
+	struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
+
+	mutex_lock(&vkms_obj->pages_lock);
+	if (vkms_obj->vmap_count < 1) {
+		WARN_ON(vkms_obj->vaddr);
+		WARN_ON(vkms_obj->pages);
+		mutex_unlock(&vkms_obj->pages_lock);
+		return;
+	}
+
+	vkms_obj->vmap_count--;
+
+	if (vkms_obj->vmap_count == 0) {
+		vunmap(vkms_obj->vaddr);
+		vkms_obj->vaddr = NULL;
+		drm_gem_put_pages(obj, vkms_obj->pages, false, true);
+		vkms_obj->pages = NULL;
+	}
+
+	mutex_unlock(&vkms_obj->pages_lock);
+}
+
+int vkms_gem_vmap(struct drm_gem_object *obj)
+{
+	struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
+	int ret = 0;
+
+	mutex_lock(&vkms_obj->pages_lock);
+
+	if (!vkms_obj->vaddr) {
+		unsigned int n_pages = obj->size >> PAGE_SHIFT;
+		struct page **pages = _get_pages(vkms_obj);
+
+		if (IS_ERR(pages)) {
+			ret = PTR_ERR(pages);
+			goto out;
+		}
+
+		vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
+		if (!vkms_obj->vaddr)
+			goto err_vmap;
+
+		vkms_obj->vmap_count++;
+	}
+
+	goto out;
+
+err_vmap:
+	ret = -ENOMEM;
+	drm_gem_put_pages(obj, vkms_obj->pages, false, true);
+	vkms_obj->pages = NULL;
+out:
+	mutex_unlock(&vkms_obj->pages_lock);
+	return ret;
+}