blob: dd7f17dd9903644666591c4e462196c758ed7f07 [file] [log] [blame]
Ben Skeggs5e120f62012-04-30 13:55:29 +10001/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
Ben Skeggs02a841d2012-07-04 23:44:54 +100028#include <core/ramht.h>
Ben Skeggs5e120f62012-04-30 13:55:29 +100029#include "nouveau_fence.h"
30
31struct nv10_fence_chan {
32 struct nouveau_fence_chan base;
33};
34
35struct nv10_fence_priv {
36 struct nouveau_fence_priv base;
37 struct nouveau_bo *bo;
38 spinlock_t lock;
39 u32 sequence;
40};
41
Ben Skeggsf589be82012-07-22 11:55:54 +100042int
Ben Skeggs5e120f62012-04-30 13:55:29 +100043nv10_fence_emit(struct nouveau_fence *fence)
44{
45 struct nouveau_channel *chan = fence->channel;
46 int ret = RING_SPACE(chan, 2);
47 if (ret == 0) {
48 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
49 OUT_RING (chan, fence->sequence);
50 FIRE_RING (chan);
51 }
52 return ret;
53}
54
Ben Skeggs906c0332012-05-04 16:25:47 +100055
Ben Skeggs5e120f62012-04-30 13:55:29 +100056static int
Ben Skeggs906c0332012-05-04 16:25:47 +100057nv10_fence_sync(struct nouveau_fence *fence,
58 struct nouveau_channel *prev, struct nouveau_channel *chan)
Ben Skeggs5e120f62012-04-30 13:55:29 +100059{
60 return -ENODEV;
61}
62
Ben Skeggsf589be82012-07-22 11:55:54 +100063int
Ben Skeggs906c0332012-05-04 16:25:47 +100064nv17_fence_sync(struct nouveau_fence *fence,
65 struct nouveau_channel *prev, struct nouveau_channel *chan)
Ben Skeggs5e120f62012-04-30 13:55:29 +100066{
Ben Skeggse193b1d2012-07-19 10:51:42 +100067 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
68 struct nv10_fence_priv *priv = dev_priv->fence.func;
Ben Skeggs5e120f62012-04-30 13:55:29 +100069 u32 value;
70 int ret;
71
72 if (!mutex_trylock(&prev->mutex))
73 return -EBUSY;
74
75 spin_lock(&priv->lock);
76 value = priv->sequence;
77 priv->sequence += 2;
78 spin_unlock(&priv->lock);
79
80 ret = RING_SPACE(prev, 5);
81 if (!ret) {
82 BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
83 OUT_RING (prev, NvSema);
84 OUT_RING (prev, 0);
85 OUT_RING (prev, value + 0);
86 OUT_RING (prev, value + 1);
87 FIRE_RING (prev);
88 }
89
90 if (!ret && !(ret = RING_SPACE(chan, 5))) {
91 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
92 OUT_RING (chan, NvSema);
93 OUT_RING (chan, 0);
94 OUT_RING (chan, value + 1);
95 OUT_RING (chan, value + 2);
96 FIRE_RING (chan);
97 }
98
99 mutex_unlock(&prev->mutex);
100 return 0;
101}
102
Ben Skeggsf589be82012-07-22 11:55:54 +1000103u32
Ben Skeggs5e120f62012-04-30 13:55:29 +1000104nv10_fence_read(struct nouveau_channel *chan)
105{
106 return nvchan_rd32(chan, 0x0048);
107}
108
Ben Skeggsf589be82012-07-22 11:55:54 +1000109void
Ben Skeggse193b1d2012-07-19 10:51:42 +1000110nv10_fence_context_del(struct nouveau_channel *chan)
Ben Skeggs5e120f62012-04-30 13:55:29 +1000111{
Ben Skeggse193b1d2012-07-19 10:51:42 +1000112 struct nv10_fence_chan *fctx = chan->fence;
Ben Skeggs5e120f62012-04-30 13:55:29 +1000113 nouveau_fence_context_del(&fctx->base);
Ben Skeggse193b1d2012-07-19 10:51:42 +1000114 chan->fence = NULL;
Ben Skeggs5e120f62012-04-30 13:55:29 +1000115 kfree(fctx);
116}
117
118static int
Ben Skeggse193b1d2012-07-19 10:51:42 +1000119nv10_fence_context_new(struct nouveau_channel *chan)
Ben Skeggs5e120f62012-04-30 13:55:29 +1000120{
Ben Skeggse193b1d2012-07-19 10:51:42 +1000121 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
122 struct nv10_fence_priv *priv = dev_priv->fence.func;
Ben Skeggs5e120f62012-04-30 13:55:29 +1000123 struct nv10_fence_chan *fctx;
124 struct nouveau_gpuobj *obj;
125 int ret = 0;
126
Ben Skeggse193b1d2012-07-19 10:51:42 +1000127 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
Ben Skeggs5e120f62012-04-30 13:55:29 +1000128 if (!fctx)
129 return -ENOMEM;
130
131 nouveau_fence_context_new(&fctx->base);
132
133 if (priv->bo) {
134 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
135
136 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
137 mem->start * PAGE_SIZE, mem->size,
138 NV_MEM_ACCESS_RW,
139 NV_MEM_TARGET_VRAM, &obj);
140 if (!ret) {
141 ret = nouveau_ramht_insert(chan, NvSema, obj);
142 nouveau_gpuobj_ref(NULL, &obj);
143 }
144 }
145
146 if (ret)
Ben Skeggse193b1d2012-07-19 10:51:42 +1000147 nv10_fence_context_del(chan);
Ben Skeggs5e120f62012-04-30 13:55:29 +1000148 return ret;
149}
150
Ben Skeggsf589be82012-07-22 11:55:54 +1000151void
Ben Skeggse193b1d2012-07-19 10:51:42 +1000152nv10_fence_destroy(struct drm_device *dev)
Ben Skeggs5e120f62012-04-30 13:55:29 +1000153{
154 struct drm_nouveau_private *dev_priv = dev->dev_private;
Ben Skeggse193b1d2012-07-19 10:51:42 +1000155 struct nv10_fence_priv *priv = dev_priv->fence.func;
Ben Skeggs5e120f62012-04-30 13:55:29 +1000156
157 nouveau_bo_ref(NULL, &priv->bo);
Ben Skeggse193b1d2012-07-19 10:51:42 +1000158 dev_priv->fence.func = NULL;
Ben Skeggs5e120f62012-04-30 13:55:29 +1000159 kfree(priv);
160}
161
162int
163nv10_fence_create(struct drm_device *dev)
164{
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 struct nv10_fence_priv *priv;
167 int ret = 0;
168
169 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
170 if (!priv)
171 return -ENOMEM;
172
Ben Skeggse193b1d2012-07-19 10:51:42 +1000173 priv->base.dtor = nv10_fence_destroy;
174 priv->base.context_new = nv10_fence_context_new;
175 priv->base.context_del = nv10_fence_context_del;
Ben Skeggs5e120f62012-04-30 13:55:29 +1000176 priv->base.emit = nv10_fence_emit;
177 priv->base.read = nv10_fence_read;
178 priv->base.sync = nv10_fence_sync;
Ben Skeggse193b1d2012-07-19 10:51:42 +1000179 dev_priv->fence.func = &priv->base;
Ben Skeggs5e120f62012-04-30 13:55:29 +1000180 spin_lock_init(&priv->lock);
181
182 if (dev_priv->chipset >= 0x17) {
183 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
184 0, 0x0000, NULL, &priv->bo);
185 if (!ret) {
186 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
187 if (!ret)
188 ret = nouveau_bo_map(priv->bo);
189 if (ret)
190 nouveau_bo_ref(NULL, &priv->bo);
191 }
192
193 if (ret == 0) {
194 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
195 priv->base.sync = nv17_fence_sync;
196 }
197 }
198
199 if (ret)
Ben Skeggse193b1d2012-07-19 10:51:42 +1000200 nv10_fence_destroy(dev);
Ben Skeggs5e120f62012-04-30 13:55:29 +1000201 return ret;
202}