blob: ffdf88e436a7296c16cea632545d0976d7ff456f [file] [log] [blame]
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301/*
2 * Copyright (c) 2016-2017, Linaro Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/idr.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/list.h>
18#include <linux/mfd/syscon.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23#include <linux/platform_device.h>
24#include <linux/regmap.h>
25#include <linux/rpmsg.h>
26#include <linux/slab.h>
27#include <linux/workqueue.h>
28#include <linux/mailbox_client.h>
29
30#include "rpmsg_internal.h"
31#include "qcom_glink_native.h"
32
33#define GLINK_NAME_SIZE 32
34
35#define RPM_GLINK_CID_MIN 1
36#define RPM_GLINK_CID_MAX 65536
37
38struct glink_msg {
39 __le16 cmd;
40 __le16 param1;
41 __le32 param2;
42 u8 data[];
43} __packed;
44
45/**
46 * struct glink_defer_cmd - deferred incoming control message
47 * @node: list node
48 * @msg: message header
49 * data: payload of the message
50 *
51 * Copy of a received control message, to be added to @rx_queue and processed
52 * by @rx_work of @qcom_glink.
53 */
54struct glink_defer_cmd {
55 struct list_head node;
56
57 struct glink_msg msg;
58 u8 data[];
59};
60
61/**
62 * struct qcom_glink - driver context, relates to one remote subsystem
63 * @dev: reference to the associated struct device
64 * @mbox_client: mailbox client
65 * @mbox_chan: mailbox channel
66 * @rx_pipe: pipe object for receive FIFO
67 * @tx_pipe: pipe object for transmit FIFO
68 * @irq: IRQ for signaling incoming events
69 * @rx_work: worker for handling received control messages
70 * @rx_lock: protects the @rx_queue
71 * @rx_queue: queue of received control messages to be processed in @rx_work
72 * @tx_lock: synchronizes operations on the tx fifo
73 * @idr_lock: synchronizes @lcids and @rcids modifications
74 * @lcids: idr of all channels with a known local channel id
75 * @rcids: idr of all channels with a known remote channel id
76 */
77struct qcom_glink {
78 struct device *dev;
79
80 struct mbox_client mbox_client;
81 struct mbox_chan *mbox_chan;
82
83 struct qcom_glink_pipe *rx_pipe;
84 struct qcom_glink_pipe *tx_pipe;
85
86 int irq;
87
88 struct work_struct rx_work;
89 spinlock_t rx_lock;
90 struct list_head rx_queue;
91
92 struct mutex tx_lock;
93
94 struct mutex idr_lock;
95 struct idr lcids;
96 struct idr rcids;
97};
98
99enum {
100 GLINK_STATE_CLOSED,
101 GLINK_STATE_OPENING,
102 GLINK_STATE_OPEN,
103 GLINK_STATE_CLOSING,
104};
105
106/**
107 * struct glink_channel - internal representation of a channel
108 * @rpdev: rpdev reference, only used for primary endpoints
109 * @ept: rpmsg endpoint this channel is associated with
110 * @glink: qcom_glink context handle
111 * @refcount: refcount for the channel object
112 * @recv_lock: guard for @ept.cb
113 * @name: unique channel name/identifier
114 * @lcid: channel id, in local space
115 * @rcid: channel id, in remote space
116 * @buf: receive buffer, for gathering fragments
117 * @buf_offset: write offset in @buf
118 * @buf_size: size of current @buf
119 * @open_ack: completed once remote has acked the open-request
120 * @open_req: completed once open-request has been received
121 */
122struct glink_channel {
123 struct rpmsg_endpoint ept;
124
125 struct rpmsg_device *rpdev;
126 struct qcom_glink *glink;
127
128 struct kref refcount;
129
130 spinlock_t recv_lock;
131
132 char *name;
133 unsigned int lcid;
134 unsigned int rcid;
135
136 void *buf;
137 int buf_offset;
138 int buf_size;
139
140 struct completion open_ack;
141 struct completion open_req;
142};
143
144#define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept)
145
146static const struct rpmsg_endpoint_ops glink_endpoint_ops;
147
148#define RPM_CMD_VERSION 0
149#define RPM_CMD_VERSION_ACK 1
150#define RPM_CMD_OPEN 2
151#define RPM_CMD_CLOSE 3
152#define RPM_CMD_OPEN_ACK 4
153#define RPM_CMD_TX_DATA 9
154#define RPM_CMD_CLOSE_ACK 11
155#define RPM_CMD_TX_DATA_CONT 12
156#define RPM_CMD_READ_NOTIF 13
157
158#define GLINK_FEATURE_INTENTLESS BIT(1)
159
160static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
161 const char *name)
162{
163 struct glink_channel *channel;
164
165 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
166 if (!channel)
167 return ERR_PTR(-ENOMEM);
168
169 /* Setup glink internal glink_channel data */
170 spin_lock_init(&channel->recv_lock);
171 channel->glink = glink;
172 channel->name = kstrdup(name, GFP_KERNEL);
173
174 init_completion(&channel->open_req);
175 init_completion(&channel->open_ack);
176
177 kref_init(&channel->refcount);
178
179 return channel;
180}
181
182static void qcom_glink_channel_release(struct kref *ref)
183{
184 struct glink_channel *channel = container_of(ref, struct glink_channel,
185 refcount);
186
187 kfree(channel->name);
188 kfree(channel);
189}
190
191static size_t qcom_glink_rx_avail(struct qcom_glink *glink)
192{
193 return glink->rx_pipe->avail(glink->rx_pipe);
194}
195
196static void qcom_glink_rx_peak(struct qcom_glink *glink,
197 void *data, size_t count)
198{
199 glink->rx_pipe->peak(glink->rx_pipe, data, count);
200}
201
202static void qcom_glink_rx_advance(struct qcom_glink *glink, size_t count)
203{
204 glink->rx_pipe->advance(glink->rx_pipe, count);
205}
206
207static size_t qcom_glink_tx_avail(struct qcom_glink *glink)
208{
209 return glink->tx_pipe->avail(glink->tx_pipe);
210}
211
212static void qcom_glink_tx_write(struct qcom_glink *glink,
213 const void *hdr, size_t hlen,
214 const void *data, size_t dlen)
215{
216 glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen);
217}
218
219static int qcom_glink_tx(struct qcom_glink *glink,
220 const void *hdr, size_t hlen,
221 const void *data, size_t dlen, bool wait)
222{
223 unsigned int tlen = hlen + dlen;
224 int ret;
225
226 /* Reject packets that are too big */
227 if (tlen >= glink->tx_pipe->length)
228 return -EINVAL;
229
230 if (WARN(tlen % 8, "Unaligned TX request"))
231 return -EINVAL;
232
233 ret = mutex_lock_interruptible(&glink->tx_lock);
234 if (ret)
235 return ret;
236
237 while (qcom_glink_tx_avail(glink) < tlen) {
238 if (!wait) {
239 ret = -ENOMEM;
240 goto out;
241 }
242
243 usleep_range(10000, 15000);
244 }
245
246 qcom_glink_tx_write(glink, hdr, hlen, data, dlen);
247
248 mbox_send_message(glink->mbox_chan, NULL);
249 mbox_client_txdone(glink->mbox_chan, 0);
250
251out:
252 mutex_unlock(&glink->tx_lock);
253
254 return ret;
255}
256
257static int qcom_glink_send_version(struct qcom_glink *glink)
258{
259 struct glink_msg msg;
260
261 msg.cmd = cpu_to_le16(RPM_CMD_VERSION);
262 msg.param1 = cpu_to_le16(1);
263 msg.param2 = cpu_to_le32(GLINK_FEATURE_INTENTLESS);
264
265 return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
266}
267
268static void qcom_glink_send_version_ack(struct qcom_glink *glink)
269{
270 struct glink_msg msg;
271
272 msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK);
273 msg.param1 = cpu_to_le16(1);
274 msg.param2 = cpu_to_le32(0);
275
276 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
277}
278
279static void qcom_glink_send_open_ack(struct qcom_glink *glink,
280 struct glink_channel *channel)
281{
282 struct glink_msg msg;
283
284 msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK);
285 msg.param1 = cpu_to_le16(channel->rcid);
286 msg.param2 = cpu_to_le32(0);
287
288 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
289}
290
291/**
292 * qcom_glink_send_open_req() - send a RPM_CMD_OPEN request to the remote
293 * @glink: Ptr to the glink edge
294 * @channel: Ptr to the channel that the open req is sent
295 *
296 * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote.
297 * Will return with refcount held, regardless of outcome.
298 *
299 * Returns 0 on success, negative errno otherwise.
300 */
301static int qcom_glink_send_open_req(struct qcom_glink *glink,
302 struct glink_channel *channel)
303{
304 struct {
305 struct glink_msg msg;
306 u8 name[GLINK_NAME_SIZE];
307 } __packed req;
308 int name_len = strlen(channel->name) + 1;
309 int req_len = ALIGN(sizeof(req.msg) + name_len, 8);
310 int ret;
311
312 kref_get(&channel->refcount);
313
314 mutex_lock(&glink->idr_lock);
315 ret = idr_alloc_cyclic(&glink->lcids, channel,
316 RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX,
317 GFP_KERNEL);
318 mutex_unlock(&glink->idr_lock);
319 if (ret < 0)
320 return ret;
321
322 channel->lcid = ret;
323
324 req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN);
325 req.msg.param1 = cpu_to_le16(channel->lcid);
326 req.msg.param2 = cpu_to_le32(name_len);
327 strcpy(req.name, channel->name);
328
329 ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true);
330 if (ret)
331 goto remove_idr;
332
333 return 0;
334
335remove_idr:
336 mutex_lock(&glink->idr_lock);
337 idr_remove(&glink->lcids, channel->lcid);
338 channel->lcid = 0;
339 mutex_unlock(&glink->idr_lock);
340
341 return ret;
342}
343
344static void qcom_glink_send_close_req(struct qcom_glink *glink,
345 struct glink_channel *channel)
346{
347 struct glink_msg req;
348
349 req.cmd = cpu_to_le16(RPM_CMD_CLOSE);
350 req.param1 = cpu_to_le16(channel->lcid);
351 req.param2 = 0;
352
353 qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
354}
355
356static void qcom_glink_send_close_ack(struct qcom_glink *glink,
357 unsigned int rcid)
358{
359 struct glink_msg req;
360
361 req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK);
362 req.param1 = cpu_to_le16(rcid);
363 req.param2 = 0;
364
365 qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
366}
367
368static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
369{
370 struct glink_defer_cmd *dcmd;
371
372 extra = ALIGN(extra, 8);
373
374 if (qcom_glink_rx_avail(glink) < sizeof(struct glink_msg) + extra) {
375 dev_dbg(glink->dev, "Insufficient data in rx fifo");
376 return -ENXIO;
377 }
378
379 dcmd = kzalloc(sizeof(*dcmd) + extra, GFP_ATOMIC);
380 if (!dcmd)
381 return -ENOMEM;
382
383 INIT_LIST_HEAD(&dcmd->node);
384
385 qcom_glink_rx_peak(glink, &dcmd->msg, sizeof(dcmd->msg) + extra);
386
387 spin_lock(&glink->rx_lock);
388 list_add_tail(&dcmd->node, &glink->rx_queue);
389 spin_unlock(&glink->rx_lock);
390
391 schedule_work(&glink->rx_work);
392 qcom_glink_rx_advance(glink, sizeof(dcmd->msg) + extra);
393
394 return 0;
395}
396
397static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
398{
399 struct glink_channel *channel;
400 struct {
401 struct glink_msg msg;
402 __le32 chunk_size;
403 __le32 left_size;
404 } __packed hdr;
405 unsigned int chunk_size;
406 unsigned int left_size;
407 unsigned int rcid;
408
409 if (avail < sizeof(hdr)) {
410 dev_dbg(glink->dev, "Not enough data in fifo\n");
411 return -EAGAIN;
412 }
413
414 qcom_glink_rx_peak(glink, &hdr, sizeof(hdr));
415 chunk_size = le32_to_cpu(hdr.chunk_size);
416 left_size = le32_to_cpu(hdr.left_size);
417
418 if (avail < sizeof(hdr) + chunk_size) {
419 dev_dbg(glink->dev, "Payload not yet in fifo\n");
420 return -EAGAIN;
421 }
422
423 if (WARN(chunk_size % 4, "Incoming data must be word aligned\n"))
424 return -EINVAL;
425
426 rcid = le16_to_cpu(hdr.msg.param1);
427 channel = idr_find(&glink->rcids, rcid);
428 if (!channel) {
429 dev_dbg(glink->dev, "Data on non-existing channel\n");
430
431 /* Drop the message */
432 qcom_glink_rx_advance(glink,
433 ALIGN(sizeof(hdr) + chunk_size, 8));
434 return 0;
435 }
436
437 /* Might have an ongoing, fragmented, message to append */
438 if (!channel->buf) {
439 channel->buf = kmalloc(chunk_size + left_size, GFP_ATOMIC);
440 if (!channel->buf)
441 return -ENOMEM;
442
443 channel->buf_size = chunk_size + left_size;
444 channel->buf_offset = 0;
445 }
446
447 qcom_glink_rx_advance(glink, sizeof(hdr));
448
449 if (channel->buf_size - channel->buf_offset < chunk_size) {
450 dev_err(glink->dev, "Insufficient space in input buffer\n");
451
452 /* The packet header lied, drop payload */
453 qcom_glink_rx_advance(glink, chunk_size);
454 return -ENOMEM;
455 }
456
457 qcom_glink_rx_peak(glink, channel->buf + channel->buf_offset,
458 chunk_size);
459 channel->buf_offset += chunk_size;
460
461 /* Handle message when no fragments remain to be received */
462 if (!left_size) {
463 spin_lock(&channel->recv_lock);
464 if (channel->ept.cb) {
465 channel->ept.cb(channel->ept.rpdev,
466 channel->buf,
467 channel->buf_offset,
468 channel->ept.priv,
469 RPMSG_ADDR_ANY);
470 }
471 spin_unlock(&channel->recv_lock);
472
473 kfree(channel->buf);
474 channel->buf = NULL;
475 channel->buf_size = 0;
476 }
477
478 /* Each message starts at 8 byte aligned address */
479 qcom_glink_rx_advance(glink, ALIGN(chunk_size, 8));
480
481 return 0;
482}
483
484static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
485{
486 struct glink_channel *channel;
487
488 channel = idr_find(&glink->lcids, lcid);
489 if (!channel) {
490 dev_err(glink->dev, "Invalid open ack packet\n");
491 return -EINVAL;
492 }
493
494 complete(&channel->open_ack);
495
496 return 0;
497}
498
499static irqreturn_t qcom_glink_native_intr(int irq, void *data)
500{
501 struct qcom_glink *glink = data;
502 struct glink_msg msg;
503 unsigned int param1;
504 unsigned int param2;
505 unsigned int avail;
506 unsigned int cmd;
507 int ret;
508
509 for (;;) {
510 avail = qcom_glink_rx_avail(glink);
511 if (avail < sizeof(msg))
512 break;
513
514 qcom_glink_rx_peak(glink, &msg, sizeof(msg));
515
516 cmd = le16_to_cpu(msg.cmd);
517 param1 = le16_to_cpu(msg.param1);
518 param2 = le32_to_cpu(msg.param2);
519
520 switch (cmd) {
521 case RPM_CMD_VERSION:
522 case RPM_CMD_VERSION_ACK:
523 case RPM_CMD_CLOSE:
524 case RPM_CMD_CLOSE_ACK:
525 ret = qcom_glink_rx_defer(glink, 0);
526 break;
527 case RPM_CMD_OPEN_ACK:
528 ret = qcom_glink_rx_open_ack(glink, param1);
529 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
530 break;
531 case RPM_CMD_OPEN:
532 ret = qcom_glink_rx_defer(glink, param2);
533 break;
534 case RPM_CMD_TX_DATA:
535 case RPM_CMD_TX_DATA_CONT:
536 ret = qcom_glink_rx_data(glink, avail);
537 break;
538 case RPM_CMD_READ_NOTIF:
539 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
540
541 mbox_send_message(glink->mbox_chan, NULL);
542 mbox_client_txdone(glink->mbox_chan, 0);
543
544 ret = 0;
545 break;
546 default:
547 dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
548 ret = -EINVAL;
549 break;
550 }
551
552 if (ret)
553 break;
554 }
555
556 return IRQ_HANDLED;
557}
558
559/* Locally initiated rpmsg_create_ept */
560static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink,
561 const char *name)
562{
563 struct glink_channel *channel;
564 int ret;
565
566 channel = qcom_glink_alloc_channel(glink, name);
567 if (IS_ERR(channel))
568 return ERR_CAST(channel);
569
570 ret = qcom_glink_send_open_req(glink, channel);
571 if (ret)
572 goto release_channel;
573
574 ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
575 if (!ret)
576 goto err_timeout;
577
578 ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ);
579 if (!ret)
580 goto err_timeout;
581
582 qcom_glink_send_open_ack(glink, channel);
583
584 return channel;
585
586err_timeout:
587 /* qcom_glink_send_open_req() did register the channel in lcids*/
588 mutex_lock(&glink->idr_lock);
589 idr_remove(&glink->lcids, channel->lcid);
590 mutex_unlock(&glink->idr_lock);
591
592release_channel:
593 /* Release qcom_glink_send_open_req() reference */
594 kref_put(&channel->refcount, qcom_glink_channel_release);
595 /* Release qcom_glink_alloc_channel() reference */
596 kref_put(&channel->refcount, qcom_glink_channel_release);
597
598 return ERR_PTR(-ETIMEDOUT);
599}
600
601/* Remote initiated rpmsg_create_ept */
602static int qcom_glink_create_remote(struct qcom_glink *glink,
603 struct glink_channel *channel)
604{
605 int ret;
606
607 qcom_glink_send_open_ack(glink, channel);
608
609 ret = qcom_glink_send_open_req(glink, channel);
610 if (ret)
611 goto close_link;
612
613 ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
614 if (!ret) {
615 ret = -ETIMEDOUT;
616 goto close_link;
617 }
618
619 return 0;
620
621close_link:
622 /*
623 * Send a close request to "undo" our open-ack. The close-ack will
624 * release the last reference.
625 */
626 qcom_glink_send_close_req(glink, channel);
627
628 /* Release qcom_glink_send_open_req() reference */
629 kref_put(&channel->refcount, qcom_glink_channel_release);
630
631 return ret;
632}
633
634static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev,
635 rpmsg_rx_cb_t cb,
636 void *priv,
637 struct rpmsg_channel_info
638 chinfo)
639{
640 struct glink_channel *parent = to_glink_channel(rpdev->ept);
641 struct glink_channel *channel;
642 struct qcom_glink *glink = parent->glink;
643 struct rpmsg_endpoint *ept;
644 const char *name = chinfo.name;
645 int cid;
646 int ret;
647
648 idr_for_each_entry(&glink->rcids, channel, cid) {
649 if (!strcmp(channel->name, name))
650 break;
651 }
652
653 if (!channel) {
654 channel = qcom_glink_create_local(glink, name);
655 if (IS_ERR(channel))
656 return NULL;
657 } else {
658 ret = qcom_glink_create_remote(glink, channel);
659 if (ret)
660 return NULL;
661 }
662
663 ept = &channel->ept;
664 ept->rpdev = rpdev;
665 ept->cb = cb;
666 ept->priv = priv;
667 ept->ops = &glink_endpoint_ops;
668
669 return ept;
670}
671
672static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept)
673{
674 struct glink_channel *channel = to_glink_channel(ept);
675 struct qcom_glink *glink = channel->glink;
676 unsigned long flags;
677
678 spin_lock_irqsave(&channel->recv_lock, flags);
679 channel->ept.cb = NULL;
680 spin_unlock_irqrestore(&channel->recv_lock, flags);
681
682 /* Decouple the potential rpdev from the channel */
683 channel->rpdev = NULL;
684
685 qcom_glink_send_close_req(glink, channel);
686}
687
688static int __qcom_glink_send(struct glink_channel *channel,
689 void *data, int len, bool wait)
690{
691 struct qcom_glink *glink = channel->glink;
692 struct {
693 struct glink_msg msg;
694 __le32 chunk_size;
695 __le32 left_size;
696 } __packed req;
697
698 if (WARN(len % 8, "RPM GLINK expects 8 byte aligned messages\n"))
699 return -EINVAL;
700
701 req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA);
702 req.msg.param1 = cpu_to_le16(channel->lcid);
703 req.msg.param2 = cpu_to_le32(channel->rcid);
704 req.chunk_size = cpu_to_le32(len);
705 req.left_size = cpu_to_le32(0);
706
707 return qcom_glink_tx(glink, &req, sizeof(req), data, len, wait);
708}
709
710static int qcom_glink_send(struct rpmsg_endpoint *ept, void *data, int len)
711{
712 struct glink_channel *channel = to_glink_channel(ept);
713
714 return __qcom_glink_send(channel, data, len, true);
715}
716
717static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len)
718{
719 struct glink_channel *channel = to_glink_channel(ept);
720
721 return __qcom_glink_send(channel, data, len, false);
722}
723
724/*
725 * Finds the device_node for the glink child interested in this channel.
726 */
727static struct device_node *qcom_glink_match_channel(struct device_node *node,
728 const char *channel)
729{
730 struct device_node *child;
731 const char *name;
732 const char *key;
733 int ret;
734
735 for_each_available_child_of_node(node, child) {
736 key = "qcom,glink-channels";
737 ret = of_property_read_string(child, key, &name);
738 if (ret)
739 continue;
740
741 if (strcmp(name, channel) == 0)
742 return child;
743 }
744
745 return NULL;
746}
747
748static const struct rpmsg_device_ops glink_device_ops = {
749 .create_ept = qcom_glink_create_ept,
750};
751
752static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
753 .destroy_ept = qcom_glink_destroy_ept,
754 .send = qcom_glink_send,
755 .trysend = qcom_glink_trysend,
756};
757
758static void qcom_glink_rpdev_release(struct device *dev)
759{
760 struct rpmsg_device *rpdev = to_rpmsg_device(dev);
761 struct glink_channel *channel = to_glink_channel(rpdev->ept);
762
763 channel->rpdev = NULL;
764 kfree(rpdev);
765}
766
767static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
768 char *name)
769{
770 struct glink_channel *channel;
771 struct rpmsg_device *rpdev;
772 bool create_device = false;
773 struct device_node *node;
774 int lcid;
775 int ret;
776
777 idr_for_each_entry(&glink->lcids, channel, lcid) {
778 if (!strcmp(channel->name, name))
779 break;
780 }
781
782 if (!channel) {
783 channel = qcom_glink_alloc_channel(glink, name);
784 if (IS_ERR(channel))
785 return PTR_ERR(channel);
786
787 /* The opening dance was initiated by the remote */
788 create_device = true;
789 }
790
791 mutex_lock(&glink->idr_lock);
792 ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_KERNEL);
793 if (ret < 0) {
794 dev_err(glink->dev, "Unable to insert channel into rcid list\n");
795 mutex_unlock(&glink->idr_lock);
796 goto free_channel;
797 }
798 channel->rcid = ret;
799 mutex_unlock(&glink->idr_lock);
800
801 complete(&channel->open_req);
802
803 if (create_device) {
804 rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
805 if (!rpdev) {
806 ret = -ENOMEM;
807 goto rcid_remove;
808 }
809
810 rpdev->ept = &channel->ept;
811 strncpy(rpdev->id.name, name, RPMSG_NAME_SIZE);
812 rpdev->src = RPMSG_ADDR_ANY;
813 rpdev->dst = RPMSG_ADDR_ANY;
814 rpdev->ops = &glink_device_ops;
815
816 node = qcom_glink_match_channel(glink->dev->of_node, name);
817 rpdev->dev.of_node = node;
818 rpdev->dev.parent = glink->dev;
819 rpdev->dev.release = qcom_glink_rpdev_release;
820
821 ret = rpmsg_register_device(rpdev);
822 if (ret)
823 goto free_rpdev;
824
825 channel->rpdev = rpdev;
826 }
827
828 return 0;
829
830free_rpdev:
831 kfree(rpdev);
832rcid_remove:
833 mutex_lock(&glink->idr_lock);
834 idr_remove(&glink->rcids, channel->rcid);
835 channel->rcid = 0;
836 mutex_unlock(&glink->idr_lock);
837free_channel:
838 /* Release the reference, iff we took it */
839 if (create_device)
840 kref_put(&channel->refcount, qcom_glink_channel_release);
841
842 return ret;
843}
844
845static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
846{
847 struct rpmsg_channel_info chinfo;
848 struct glink_channel *channel;
849
850 channel = idr_find(&glink->rcids, rcid);
851 if (WARN(!channel, "close request on unknown channel\n"))
852 return;
853
854 if (channel->rpdev) {
855 strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
856 chinfo.src = RPMSG_ADDR_ANY;
857 chinfo.dst = RPMSG_ADDR_ANY;
858
859 rpmsg_unregister_device(glink->dev, &chinfo);
860 }
861
862 qcom_glink_send_close_ack(glink, channel->rcid);
863
864 mutex_lock(&glink->idr_lock);
865 idr_remove(&glink->rcids, channel->rcid);
866 channel->rcid = 0;
867 mutex_unlock(&glink->idr_lock);
868
869 kref_put(&channel->refcount, qcom_glink_channel_release);
870}
871
872static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
873{
874 struct glink_channel *channel;
875
876 channel = idr_find(&glink->lcids, lcid);
877 if (WARN(!channel, "close ack on unknown channel\n"))
878 return;
879
880 mutex_lock(&glink->idr_lock);
881 idr_remove(&glink->lcids, channel->lcid);
882 channel->lcid = 0;
883 mutex_unlock(&glink->idr_lock);
884
885 kref_put(&channel->refcount, qcom_glink_channel_release);
886}
887
888static void qcom_glink_work(struct work_struct *work)
889{
890 struct qcom_glink *glink = container_of(work, struct qcom_glink,
891 rx_work);
892 struct glink_defer_cmd *dcmd;
893 struct glink_msg *msg;
894 unsigned long flags;
895 unsigned int param1;
896 unsigned int param2;
897 unsigned int cmd;
898
899 for (;;) {
900 spin_lock_irqsave(&glink->rx_lock, flags);
901 if (list_empty(&glink->rx_queue)) {
902 spin_unlock_irqrestore(&glink->rx_lock, flags);
903 break;
904 }
905 dcmd = list_first_entry(&glink->rx_queue,
906 struct glink_defer_cmd, node);
907 list_del(&dcmd->node);
908 spin_unlock_irqrestore(&glink->rx_lock, flags);
909
910 msg = &dcmd->msg;
911 cmd = le16_to_cpu(msg->cmd);
912 param1 = le16_to_cpu(msg->param1);
913 param2 = le32_to_cpu(msg->param2);
914
915 switch (cmd) {
916 case RPM_CMD_VERSION:
917 qcom_glink_send_version_ack(glink);
918 break;
919 case RPM_CMD_VERSION_ACK:
920 break;
921 case RPM_CMD_OPEN:
922 qcom_glink_rx_open(glink, param1, msg->data);
923 break;
924 case RPM_CMD_CLOSE:
925 qcom_glink_rx_close(glink, param1);
926 break;
927 case RPM_CMD_CLOSE_ACK:
928 qcom_glink_rx_close_ack(glink, param1);
929 break;
930 default:
931 WARN(1, "Unknown defer object %d\n", cmd);
932 break;
933 }
934
935 kfree(dcmd);
936 }
937}
938
939struct qcom_glink *qcom_glink_native_probe(struct device *dev,
940 struct qcom_glink_pipe *rx,
941 struct qcom_glink_pipe *tx)
942{
943 int irq;
944 int ret;
945 struct qcom_glink *glink;
946
947 glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL);
948 if (!glink)
949 return ERR_PTR(-ENOMEM);
950
951 glink->dev = dev;
952 glink->tx_pipe = tx;
953 glink->rx_pipe = rx;
954
955 mutex_init(&glink->tx_lock);
956 spin_lock_init(&glink->rx_lock);
957 INIT_LIST_HEAD(&glink->rx_queue);
958 INIT_WORK(&glink->rx_work, qcom_glink_work);
959
960 mutex_init(&glink->idr_lock);
961 idr_init(&glink->lcids);
962 idr_init(&glink->rcids);
963
964 glink->mbox_client.dev = dev;
965 glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
966 if (IS_ERR(glink->mbox_chan)) {
967 if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER)
968 dev_err(dev, "failed to acquire IPC channel\n");
969 return ERR_CAST(glink->mbox_chan);
970 }
971
972 irq = of_irq_get(dev->of_node, 0);
973 ret = devm_request_irq(dev, irq,
974 qcom_glink_native_intr,
975 IRQF_NO_SUSPEND | IRQF_SHARED,
976 "glink-native", glink);
977 if (ret) {
978 dev_err(dev, "failed to request IRQ\n");
979 return ERR_PTR(ret);
980 }
981
982 glink->irq = irq;
983
984 ret = qcom_glink_send_version(glink);
985 if (ret)
986 return ERR_PTR(ret);
987
988 return glink;
989}
990
991static int qcom_glink_remove_device(struct device *dev, void *data)
992{
993 device_unregister(dev);
994
995 return 0;
996}
997
998void qcom_glink_native_remove(struct qcom_glink *glink)
999{
1000 struct glink_channel *channel;
1001 int cid;
1002 int ret;
1003
1004 disable_irq(glink->irq);
1005 cancel_work_sync(&glink->rx_work);
1006
1007 ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
1008 if (ret)
1009 dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
1010
1011 /* Release any defunct local channels, waiting for close-ack */
1012 idr_for_each_entry(&glink->lcids, channel, cid)
1013 kref_put(&channel->refcount, qcom_glink_channel_release);
1014
1015 idr_destroy(&glink->lcids);
1016 idr_destroy(&glink->rcids);
1017}