blob: 9094fda5a8fe62a045bfc1af3e75f2f94ff1db6f [file] [log] [blame]
Houlong Wei576f1b42018-11-29 11:37:09 +08001// SPDX-License-Identifier: GPL-2.0
2//
3// Copyright (c) 2018 MediaTek Inc.
4
5#include <linux/completion.h>
6#include <linux/errno.h>
7#include <linux/dma-mapping.h>
8#include <linux/module.h>
9#include <linux/mailbox_controller.h>
10#include <linux/soc/mediatek/mtk-cmdq.h>
11
Houlong Wei576f1b42018-11-29 11:37:09 +080012#define CMDQ_WRITE_ENABLE_MASK BIT(0)
Bibby Hsiehb2ff2352019-11-21 09:54:08 +080013#define CMDQ_POLL_ENABLE_MASK BIT(0)
Houlong Wei576f1b42018-11-29 11:37:09 +080014#define CMDQ_EOC_IRQ_EN BIT(0)
15#define CMDQ_EOC_CMD ((u64)((CMDQ_CODE_EOC << CMDQ_OP_CODE_SHIFT)) \
16 << 32 | CMDQ_EOC_IRQ_EN)
17
Bibby Hsieh5c8b7182019-11-21 09:54:07 +080018struct cmdq_instruction {
19 union {
20 u32 value;
21 u32 mask;
22 };
23 union {
24 u16 offset;
25 u16 event;
26 };
27 u8 subsys;
28 u8 op;
29};
30
Houlong Wei576f1b42018-11-29 11:37:09 +080031static void cmdq_client_timeout(struct timer_list *t)
32{
33 struct cmdq_client *client = from_timer(client, t, timer);
34
35 dev_err(client->client.dev, "cmdq timeout!\n");
36}
37
38struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
39{
40 struct cmdq_client *client;
41
42 client = kzalloc(sizeof(*client), GFP_KERNEL);
43 if (!client)
44 return (struct cmdq_client *)-ENOMEM;
45
46 client->timeout_ms = timeout;
47 if (timeout != CMDQ_NO_TIMEOUT) {
48 spin_lock_init(&client->lock);
49 timer_setup(&client->timer, cmdq_client_timeout, 0);
50 }
51 client->pkt_cnt = 0;
52 client->client.dev = dev;
53 client->client.tx_block = false;
54 client->chan = mbox_request_channel(&client->client, index);
55
56 if (IS_ERR(client->chan)) {
57 long err;
58
59 dev_err(dev, "failed to request channel\n");
60 err = PTR_ERR(client->chan);
61 kfree(client);
62
63 return ERR_PTR(err);
64 }
65
66 return client;
67}
68EXPORT_SYMBOL(cmdq_mbox_create);
69
70void cmdq_mbox_destroy(struct cmdq_client *client)
71{
72 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
73 spin_lock(&client->lock);
74 del_timer_sync(&client->timer);
75 spin_unlock(&client->lock);
76 }
77 mbox_free_channel(client->chan);
78 kfree(client);
79}
80EXPORT_SYMBOL(cmdq_mbox_destroy);
81
82struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
83{
84 struct cmdq_pkt *pkt;
85 struct device *dev;
86 dma_addr_t dma_addr;
87
88 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
89 if (!pkt)
90 return ERR_PTR(-ENOMEM);
91 pkt->va_base = kzalloc(size, GFP_KERNEL);
92 if (!pkt->va_base) {
93 kfree(pkt);
94 return ERR_PTR(-ENOMEM);
95 }
96 pkt->buf_size = size;
97 pkt->cl = (void *)client;
98
99 dev = client->chan->mbox->dev;
100 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
101 DMA_TO_DEVICE);
102 if (dma_mapping_error(dev, dma_addr)) {
103 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
104 kfree(pkt->va_base);
105 kfree(pkt);
106 return ERR_PTR(-ENOMEM);
107 }
108
109 pkt->pa_base = dma_addr;
110
111 return pkt;
112}
113EXPORT_SYMBOL(cmdq_pkt_create);
114
115void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
116{
117 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
118
119 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
120 DMA_TO_DEVICE);
121 kfree(pkt->va_base);
122 kfree(pkt);
123}
124EXPORT_SYMBOL(cmdq_pkt_destroy);
125
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800126static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
127 struct cmdq_instruction inst)
Houlong Wei576f1b42018-11-29 11:37:09 +0800128{
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800129 struct cmdq_instruction *cmd_ptr;
Houlong Wei576f1b42018-11-29 11:37:09 +0800130
131 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
132 /*
133 * In the case of allocated buffer size (pkt->buf_size) is used
134 * up, the real required size (pkt->cmdq_buf_size) is still
135 * increased, so that the user knows how much memory should be
136 * ultimately allocated after appending all commands and
137 * flushing the command packet. Therefor, the user can call
138 * cmdq_pkt_create() again with the real required buffer size.
139 */
140 pkt->cmd_buf_size += CMDQ_INST_SIZE;
141 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
142 __func__, (u32)pkt->buf_size);
143 return -ENOMEM;
144 }
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800145
Houlong Wei576f1b42018-11-29 11:37:09 +0800146 cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800147 *cmd_ptr = inst;
Houlong Wei576f1b42018-11-29 11:37:09 +0800148 pkt->cmd_buf_size += CMDQ_INST_SIZE;
149
150 return 0;
151}
152
Bibby Hsieh556030f2019-08-20 16:49:28 +0800153int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
Houlong Wei576f1b42018-11-29 11:37:09 +0800154{
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800155 struct cmdq_instruction inst;
Houlong Wei576f1b42018-11-29 11:37:09 +0800156
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800157 inst.op = CMDQ_CODE_WRITE;
158 inst.value = value;
159 inst.offset = offset;
160 inst.subsys = subsys;
161
162 return cmdq_pkt_append_command(pkt, inst);
Houlong Wei576f1b42018-11-29 11:37:09 +0800163}
164EXPORT_SYMBOL(cmdq_pkt_write);
165
Bibby Hsieh556030f2019-08-20 16:49:28 +0800166int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
167 u16 offset, u32 value, u32 mask)
Houlong Wei576f1b42018-11-29 11:37:09 +0800168{
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800169 struct cmdq_instruction inst = { {0} };
170 u16 offset_mask = offset;
Bibby Hsieh01d1b402019-11-21 09:54:06 +0800171 int err;
Houlong Wei576f1b42018-11-29 11:37:09 +0800172
173 if (mask != 0xffffffff) {
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800174 inst.op = CMDQ_CODE_MASK;
175 inst.mask = ~mask;
176 err = cmdq_pkt_append_command(pkt, inst);
Bibby Hsieh01d1b402019-11-21 09:54:06 +0800177 if (err < 0)
178 return err;
179
Houlong Wei576f1b42018-11-29 11:37:09 +0800180 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
181 }
Bibby Hsieh01d1b402019-11-21 09:54:06 +0800182 err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
Houlong Wei576f1b42018-11-29 11:37:09 +0800183
184 return err;
185}
186EXPORT_SYMBOL(cmdq_pkt_write_mask);
187
Bibby Hsieh556030f2019-08-20 16:49:28 +0800188int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
Houlong Wei576f1b42018-11-29 11:37:09 +0800189{
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800190 struct cmdq_instruction inst = { {0} };
Houlong Wei576f1b42018-11-29 11:37:09 +0800191
192 if (event >= CMDQ_MAX_EVENT)
193 return -EINVAL;
194
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800195 inst.op = CMDQ_CODE_WFE;
196 inst.value = CMDQ_WFE_OPTION;
197 inst.event = event;
Houlong Wei576f1b42018-11-29 11:37:09 +0800198
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800199 return cmdq_pkt_append_command(pkt, inst);
Houlong Wei576f1b42018-11-29 11:37:09 +0800200}
201EXPORT_SYMBOL(cmdq_pkt_wfe);
202
Bibby Hsieh556030f2019-08-20 16:49:28 +0800203int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
Houlong Wei576f1b42018-11-29 11:37:09 +0800204{
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800205 struct cmdq_instruction inst = { {0} };
206
Houlong Wei576f1b42018-11-29 11:37:09 +0800207 if (event >= CMDQ_MAX_EVENT)
208 return -EINVAL;
209
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800210 inst.op = CMDQ_CODE_WFE;
211 inst.value = CMDQ_WFE_UPDATE;
212 inst.event = event;
213
214 return cmdq_pkt_append_command(pkt, inst);
Houlong Wei576f1b42018-11-29 11:37:09 +0800215}
216EXPORT_SYMBOL(cmdq_pkt_clear_event);
217
Bibby Hsiehb2ff2352019-11-21 09:54:08 +0800218int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
219 u16 offset, u32 value)
220{
221 struct cmdq_instruction inst = { {0} };
222 int err;
223
224 inst.op = CMDQ_CODE_POLL;
225 inst.value = value;
226 inst.offset = offset;
227 inst.subsys = subsys;
228 err = cmdq_pkt_append_command(pkt, inst);
229
230 return err;
231}
232EXPORT_SYMBOL(cmdq_pkt_poll);
233
234int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
235 u16 offset, u32 value, u32 mask)
236{
237 struct cmdq_instruction inst = { {0} };
238 int err;
239
240 inst.op = CMDQ_CODE_MASK;
241 inst.mask = ~mask;
242 err = cmdq_pkt_append_command(pkt, inst);
243 if (err < 0)
244 return err;
245
246 offset = offset | CMDQ_POLL_ENABLE_MASK;
247 err = cmdq_pkt_poll(pkt, subsys, offset, value);
248
249 return err;
250}
251EXPORT_SYMBOL(cmdq_pkt_poll_mask);
252
Houlong Wei576f1b42018-11-29 11:37:09 +0800253static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
254{
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800255 struct cmdq_instruction inst = { {0} };
Houlong Wei576f1b42018-11-29 11:37:09 +0800256 int err;
257
258 /* insert EOC and generate IRQ for each command iteration */
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800259 inst.op = CMDQ_CODE_EOC;
260 inst.value = CMDQ_EOC_IRQ_EN;
261 err = cmdq_pkt_append_command(pkt, inst);
Bibby Hsieh01d1b402019-11-21 09:54:06 +0800262 if (err < 0)
263 return err;
Houlong Wei576f1b42018-11-29 11:37:09 +0800264
265 /* JUMP to end */
Bibby Hsieh5c8b7182019-11-21 09:54:07 +0800266 inst.op = CMDQ_CODE_JUMP;
267 inst.value = CMDQ_JUMP_PASS;
268 err = cmdq_pkt_append_command(pkt, inst);
Houlong Wei576f1b42018-11-29 11:37:09 +0800269
270 return err;
271}
272
273static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
274{
275 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
276 struct cmdq_task_cb *cb = &pkt->cb;
277 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
278
279 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
280 unsigned long flags = 0;
281
282 spin_lock_irqsave(&client->lock, flags);
283 if (--client->pkt_cnt == 0)
284 del_timer(&client->timer);
285 else
286 mod_timer(&client->timer, jiffies +
287 msecs_to_jiffies(client->timeout_ms));
288 spin_unlock_irqrestore(&client->lock, flags);
289 }
290
291 dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
292 pkt->cmd_buf_size, DMA_TO_DEVICE);
293 if (cb->cb) {
294 data.data = cb->data;
295 cb->cb(data);
296 }
297}
298
299int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
300 void *data)
301{
302 int err;
303 unsigned long flags = 0;
304 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
305
306 err = cmdq_pkt_finalize(pkt);
307 if (err < 0)
308 return err;
309
310 pkt->cb.cb = cb;
311 pkt->cb.data = data;
312 pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
313 pkt->async_cb.data = pkt;
314
315 dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
316 pkt->cmd_buf_size, DMA_TO_DEVICE);
317
318 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
319 spin_lock_irqsave(&client->lock, flags);
320 if (client->pkt_cnt++ == 0)
321 mod_timer(&client->timer, jiffies +
322 msecs_to_jiffies(client->timeout_ms));
323 spin_unlock_irqrestore(&client->lock, flags);
324 }
325
326 mbox_send_message(client->chan, pkt);
327 /* We can send next packet immediately, so just call txdone. */
328 mbox_client_txdone(client->chan, 0);
329
330 return 0;
331}
332EXPORT_SYMBOL(cmdq_pkt_flush_async);
333
334struct cmdq_flush_completion {
335 struct completion cmplt;
336 bool err;
337};
338
339static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
340{
341 struct cmdq_flush_completion *cmplt;
342
343 cmplt = (struct cmdq_flush_completion *)data.data;
344 if (data.sta != CMDQ_CB_NORMAL)
345 cmplt->err = true;
346 else
347 cmplt->err = false;
348 complete(&cmplt->cmplt);
349}
350
351int cmdq_pkt_flush(struct cmdq_pkt *pkt)
352{
353 struct cmdq_flush_completion cmplt;
354 int err;
355
356 init_completion(&cmplt.cmplt);
357 err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
358 if (err < 0)
359 return err;
360 wait_for_completion(&cmplt.cmplt);
361
362 return cmplt.err ? -EFAULT : 0;
363}
364EXPORT_SYMBOL(cmdq_pkt_flush);
365
366MODULE_LICENSE("GPL v2");