blob: 9add0fd5fa6cc5964125885d2a3bf81df7fbd195 [file] [log] [blame]
Houlong Wei576f1b42018-11-29 11:37:09 +08001// SPDX-License-Identifier: GPL-2.0
2//
3// Copyright (c) 2018 MediaTek Inc.
4
5#include <linux/completion.h>
6#include <linux/errno.h>
7#include <linux/dma-mapping.h>
8#include <linux/module.h>
9#include <linux/mailbox_controller.h>
10#include <linux/soc/mediatek/mtk-cmdq.h>
11
Houlong Wei576f1b42018-11-29 11:37:09 +080012#define CMDQ_WRITE_ENABLE_MASK BIT(0)
Bibby Hsiehb2ff2352019-11-21 09:54:08 +080013#define CMDQ_POLL_ENABLE_MASK BIT(0)
Houlong Wei576f1b42018-11-29 11:37:09 +080014#define CMDQ_EOC_IRQ_EN BIT(0)
15#define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
16 << 32 | CMDQ_EOC_IRQ_EN)
17
Bibby Hsieh5c8b7182019-11-21 09:54:07 +080018struct cmdq_instruction {
19 union {
20 u32 value;
21 u32 mask;
22 };
23 union {
24 u16 offset;
25 u16 event;
26 };
27 u8 subsys;
28 u8 op;
29};
30
Bibby Hsiehd412f182019-11-21 09:54:09 +080031int cmdq_dev_get_client_reg(struct device *dev,
32 struct cmdq_client_reg *client_reg, int idx)
33{
34 struct of_phandle_args spec;
35 int err;
36
37 if (!client_reg)
38 return -ENOENT;
39
40 err = of_parse_phandle_with_fixed_args(dev->of_node,
41 "mediatek,gce-client-reg",
42 3, idx, &spec);
43 if (err < 0) {
44 dev_err(dev,
45 "error %d can't parse gce-client-reg property (%d)",
46 err, idx);
47
48 return err;
49 }
50
51 client_reg->subsys = (u8)spec.args[0];
52 client_reg->offset = (u16)spec.args[1];
53 client_reg->size = (u16)spec.args[2];
54 of_node_put(spec.np);
55
56 return 0;
57}
58EXPORT_SYMBOL(cmdq_dev_get_client_reg);
59
Houlong Wei576f1b42018-11-29 11:37:09 +080060static void cmdq_client_timeout(struct timer_list *t)
61{
62 struct cmdq_client *client = from_timer(client, t, timer);
63
64 dev_err(client->client.dev, "cmdq timeout!\n");
65}
66
67struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
68{
69 struct cmdq_client *client;
70
71 client = kzalloc(sizeof(*client), GFP_KERNEL);
72 if (!client)
73 return (struct cmdq_client *)-ENOMEM;
74
75 client->timeout_ms = timeout;
76 if (timeout != CMDQ_NO_TIMEOUT) {
77 spin_lock_init(&client->lock);
78 timer_setup(&client->timer, cmdq_client_timeout, 0);
79 }
80 client->pkt_cnt = 0;
81 client->client.dev = dev;
82 client->client.tx_block = false;
83 client->chan = mbox_request_channel(&client->client, index);
84
85 if (IS_ERR(client->chan)) {
86 long err;
87
88 dev_err(dev, "failed to request channel\n");
89 err = PTR_ERR(client->chan);
90 kfree(client);
91
92 return ERR_PTR(err);
93 }
94
95 return client;
96}
97EXPORT_SYMBOL(cmdq_mbox_create);
98
99void cmdq_mbox_destroy(struct cmdq_client *client)
100{
101 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
102 spin_lock(&client->lock);
103 del_timer_sync(&client->timer);
104 spin_unlock(&client->lock);
105 }
106 mbox_free_channel(client->chan);
107 kfree(client);
108}
109EXPORT_SYMBOL(cmdq_mbox_destroy);
110
111struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
112{
113 struct cmdq_pkt *pkt;
114 struct device *dev;
115 dma_addr_t dma_addr;
116
117 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
118 if (!pkt)
119 return ERR_PTR(-ENOMEM);
120 pkt->va_base = kzalloc(size, GFP_KERNEL);
121 if (!pkt->va_base) {
122 kfree(pkt);
123 return ERR_PTR(-ENOMEM);
124 }
125 pkt->buf_size = size;
126 pkt->cl = (void *)client;
127
128 dev = client->chan->mbox->dev;
129 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
130 DMA_TO_DEVICE);
131 if (dma_mapping_error(dev, dma_addr)) {
132 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
133 kfree(pkt->va_base);
134 kfree(pkt);
135 return ERR_PTR(-ENOMEM);
136 }
137
138 pkt->pa_base = dma_addr;
139
140 return pkt;
141}
142EXPORT_SYMBOL(cmdq_pkt_create);
143
144void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
145{
146 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
147
148 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
149 DMA_TO_DEVICE);
150 kfree(pkt->va_base);
151 kfree(pkt);
152}
153EXPORT_SYMBOL(cmdq_pkt_destroy);
154
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800155static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
156 struct cmdq_instruction inst)
Houlong Wei576f1b42018-11-29 11:37:09 +0800157{
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800158 struct cmdq_instruction *cmd_ptr;
Houlong Wei576f1b42018-11-29 11:37:09 +0800159
160 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
161 /*
162 * In the case of allocated buffer size (pkt->buf_size) is used
163 * up, the real required size (pkt->cmdq_buf_size) is still
164 * increased, so that the user knows how much memory should be
165 * ultimately allocated after appending all commands and
166 * flushing the command packet. Therefor, the user can call
167 * cmdq_pkt_create() again with the real required buffer size.
168 */
169 pkt->cmd_buf_size += CMDQ_INST_SIZE;
170 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
171 __func__, (u32)pkt->buf_size);
172 return -ENOMEM;
173 }
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800174
Houlong Wei576f1b42018-11-29 11:37:09 +0800175 cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800176 *cmd_ptr = inst;
Houlong Wei576f1b42018-11-29 11:37:09 +0800177 pkt->cmd_buf_size += CMDQ_INST_SIZE;
178
179 return 0;
180}
181
Bibby Hsieh556030f2019-08-20 16:49:28 +0800182int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
Houlong Wei576f1b42018-11-29 11:37:09 +0800183{
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800184 struct cmdq_instruction inst;
Houlong Wei576f1b42018-11-29 11:37:09 +0800185
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800186 inst.op = CMDQ_CODE_WRITE;
187 inst.value = value;
188 inst.offset = offset;
189 inst.subsys = subsys;
190
191 return cmdq_pkt_append_command(pkt, inst);
Houlong Wei576f1b42018-11-29 11:37:09 +0800192}
193EXPORT_SYMBOL(cmdq_pkt_write);
194
Bibby Hsieh556030f2019-08-20 16:49:28 +0800195int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
196 u16 offset, u32 value, u32 mask)
Houlong Wei576f1b42018-11-29 11:37:09 +0800197{
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800198 struct cmdq_instruction inst = { {0} };
199 u16 offset_mask = offset;
Bibby Hsieh01d1b402019-11-21 09:54:06 +0800200 int err;
Houlong Wei576f1b42018-11-29 11:37:09 +0800201
202 if (mask != 0xffffffff) {
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800203 inst.op = CMDQ_CODE_MASK;
204 inst.mask = ~mask;
205 err = cmdq_pkt_append_command(pkt, inst);
Bibby Hsieh01d1b402019-11-21 09:54:06 +0800206 if (err < 0)
207 return err;
208
Houlong Wei576f1b42018-11-29 11:37:09 +0800209 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
210 }
Bibby Hsieh01d1b402019-11-21 09:54:06 +0800211 err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
Houlong Wei576f1b42018-11-29 11:37:09 +0800212
213 return err;
214}
215EXPORT_SYMBOL(cmdq_pkt_write_mask);
216
Bibby Hsieh556030f2019-08-20 16:49:28 +0800217int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
Houlong Wei576f1b42018-11-29 11:37:09 +0800218{
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800219 struct cmdq_instruction inst = { {0} };
Houlong Wei576f1b42018-11-29 11:37:09 +0800220
221 if (event >= CMDQ_MAX_EVENT)
222 return -EINVAL;
223
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800224 inst.op = CMDQ_CODE_WFE;
225 inst.value = CMDQ_WFE_OPTION;
226 inst.event = event;
Houlong Wei576f1b42018-11-29 11:37:09 +0800227
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800228 return cmdq_pkt_append_command(pkt, inst);
Houlong Wei576f1b42018-11-29 11:37:09 +0800229}
230EXPORT_SYMBOL(cmdq_pkt_wfe);
231
Bibby Hsieh556030f2019-08-20 16:49:28 +0800232int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
Houlong Wei576f1b42018-11-29 11:37:09 +0800233{
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800234 struct cmdq_instruction inst = { {0} };
235
Houlong Wei576f1b42018-11-29 11:37:09 +0800236 if (event >= CMDQ_MAX_EVENT)
237 return -EINVAL;
238
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800239 inst.op = CMDQ_CODE_WFE;
240 inst.value = CMDQ_WFE_UPDATE;
241 inst.event = event;
242
243 return cmdq_pkt_append_command(pkt, inst);
Houlong Wei576f1b42018-11-29 11:37:09 +0800244}
245EXPORT_SYMBOL(cmdq_pkt_clear_event);
246
Bibby Hsiehb2ff2352019-11-21 09:54:08 +0800247int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
248 u16 offset, u32 value)
249{
250 struct cmdq_instruction inst = { {0} };
251 int err;
252
253 inst.op = CMDQ_CODE_POLL;
254 inst.value = value;
255 inst.offset = offset;
256 inst.subsys = subsys;
257 err = cmdq_pkt_append_command(pkt, inst);
258
259 return err;
260}
261EXPORT_SYMBOL(cmdq_pkt_poll);
262
263int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
264 u16 offset, u32 value, u32 mask)
265{
266 struct cmdq_instruction inst = { {0} };
267 int err;
268
269 inst.op = CMDQ_CODE_MASK;
270 inst.mask = ~mask;
271 err = cmdq_pkt_append_command(pkt, inst);
272 if (err < 0)
273 return err;
274
275 offset = offset | CMDQ_POLL_ENABLE_MASK;
276 err = cmdq_pkt_poll(pkt, subsys, offset, value);
277
278 return err;
279}
280EXPORT_SYMBOL(cmdq_pkt_poll_mask);
281
Houlong Wei576f1b42018-11-29 11:37:09 +0800282static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
283{
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800284 struct cmdq_instruction inst = { {0} };
Houlong Wei576f1b42018-11-29 11:37:09 +0800285 int err;
286
287 /* insert EOC and generate IRQ for each command iteration */
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800288 inst.op = CMDQ_CODE_EOC;
289 inst.value = CMDQ_EOC_IRQ_EN;
290 err = cmdq_pkt_append_command(pkt, inst);
Bibby Hsieh01d1b402019-11-21 09:54:06 +0800291 if (err < 0)
292 return err;
Houlong Wei576f1b42018-11-29 11:37:09 +0800293
294 /* JUMP to end */
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800295 inst.op = CMDQ_CODE_JUMP;
296 inst.value = CMDQ_JUMP_PASS;
297 err = cmdq_pkt_append_command(pkt, inst);
Houlong Wei576f1b42018-11-29 11:37:09 +0800298
299 return err;
300}
301
302static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
303{
304 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
305 struct cmdq_task_cb *cb = &pkt->cb;
306 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
307
308 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
309 unsigned long flags = 0;
310
311 spin_lock_irqsave(&client->lock, flags);
312 if (--client->pkt_cnt == 0)
313 del_timer(&client->timer);
314 else
315 mod_timer(&client->timer, jiffies +
316 msecs_to_jiffies(client->timeout_ms));
317 spin_unlock_irqrestore(&client->lock, flags);
318 }
319
320 dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
321 pkt->cmd_buf_size, DMA_TO_DEVICE);
322 if (cb->cb) {
323 data.data = cb->data;
324 cb->cb(data);
325 }
326}
327
328int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
329 void *data)
330{
331 int err;
332 unsigned long flags = 0;
333 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
334
335 err = cmdq_pkt_finalize(pkt);
336 if (err < 0)
337 return err;
338
339 pkt->cb.cb = cb;
340 pkt->cb.data = data;
341 pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
342 pkt->async_cb.data = pkt;
343
344 dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
345 pkt->cmd_buf_size, DMA_TO_DEVICE);
346
347 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
348 spin_lock_irqsave(&client->lock, flags);
349 if (client->pkt_cnt++ == 0)
350 mod_timer(&client->timer, jiffies +
351 msecs_to_jiffies(client->timeout_ms));
352 spin_unlock_irqrestore(&client->lock, flags);
353 }
354
355 mbox_send_message(client->chan, pkt);
356 /* We can send next packet immediately, so just call txdone. */
357 mbox_client_txdone(client->chan, 0);
358
359 return 0;
360}
361EXPORT_SYMBOL(cmdq_pkt_flush_async);
362
363struct cmdq_flush_completion {
364 struct completion cmplt;
365 bool err;
366};
367
368static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
369{
370 struct cmdq_flush_completion *cmplt;
371
372 cmplt = (struct cmdq_flush_completion *)data.data;
373 if (data.sta != CMDQ_CB_NORMAL)
374 cmplt->err = true;
375 else
376 cmplt->err = false;
377 complete(&cmplt->cmplt);
378}
379
380int cmdq_pkt_flush(struct cmdq_pkt *pkt)
381{
382 struct cmdq_flush_completion cmplt;
383 int err;
384
385 init_completion(&cmplt.cmplt);
386 err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
387 if (err < 0)
388 return err;
389 wait_for_completion(&cmplt.cmplt);
390
391 return cmplt.err ? -EFAULT : 0;
392}
393EXPORT_SYMBOL(cmdq_pkt_flush);
394
395MODULE_LICENSE("GPL v2");