blob: 2ad171585d8fd40081ed5295e3164019f048ceab [file] [log] [blame]
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301/*
2 * Copyright (c) 2016-2017, Linaro Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/idr.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/list.h>
18#include <linux/mfd/syscon.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23#include <linux/platform_device.h>
24#include <linux/regmap.h>
25#include <linux/rpmsg.h>
26#include <linux/slab.h>
27#include <linux/workqueue.h>
28#include <linux/mailbox_client.h>
29
30#include "rpmsg_internal.h"
31#include "qcom_glink_native.h"
32
33#define GLINK_NAME_SIZE 32
Sricharan Rd31ad612017-08-24 12:51:32 +053034#define GLINK_VERSION_1 1
Bjorn Andersson835764dd2017-08-24 12:51:26 +053035
36#define RPM_GLINK_CID_MIN 1
37#define RPM_GLINK_CID_MAX 65536
38
39struct glink_msg {
40 __le16 cmd;
41 __le16 param1;
42 __le32 param2;
43 u8 data[];
44} __packed;
45
46/**
47 * struct glink_defer_cmd - deferred incoming control message
48 * @node: list node
49 * @msg: message header
50 * data: payload of the message
51 *
52 * Copy of a received control message, to be added to @rx_queue and processed
53 * by @rx_work of @qcom_glink.
54 */
55struct glink_defer_cmd {
56 struct list_head node;
57
58 struct glink_msg msg;
59 u8 data[];
60};
61
62/**
Sricharan R933b45d2017-08-24 12:51:34 +053063 * struct glink_core_rx_intent - RX intent
64 * RX intent
65 *
66 * data: pointer to the data (may be NULL for zero-copy)
67 * id: remote or local intent ID
68 * size: size of the original intent (do not modify)
69 * reuse: To mark if the intent can be reused after first use
70 * in_use: To mark if intent is already in use for the channel
71 * offset: next write offset (initially 0)
72 */
73struct glink_core_rx_intent {
74 void *data;
75 u32 id;
76 size_t size;
77 bool reuse;
78 bool in_use;
79 u32 offset;
Sricharan R1d2ea362017-08-24 12:51:37 +053080
81 struct list_head node;
Sricharan R933b45d2017-08-24 12:51:34 +053082};
83
84/**
Bjorn Andersson835764dd2017-08-24 12:51:26 +053085 * struct qcom_glink - driver context, relates to one remote subsystem
86 * @dev: reference to the associated struct device
87 * @mbox_client: mailbox client
88 * @mbox_chan: mailbox channel
89 * @rx_pipe: pipe object for receive FIFO
90 * @tx_pipe: pipe object for transmit FIFO
91 * @irq: IRQ for signaling incoming events
92 * @rx_work: worker for handling received control messages
93 * @rx_lock: protects the @rx_queue
94 * @rx_queue: queue of received control messages to be processed in @rx_work
95 * @tx_lock: synchronizes operations on the tx fifo
96 * @idr_lock: synchronizes @lcids and @rcids modifications
97 * @lcids: idr of all channels with a known local channel id
98 * @rcids: idr of all channels with a known remote channel id
99 */
100struct qcom_glink {
101 struct device *dev;
102
103 struct mbox_client mbox_client;
104 struct mbox_chan *mbox_chan;
105
106 struct qcom_glink_pipe *rx_pipe;
107 struct qcom_glink_pipe *tx_pipe;
108
109 int irq;
110
111 struct work_struct rx_work;
112 spinlock_t rx_lock;
113 struct list_head rx_queue;
114
115 struct mutex tx_lock;
116
Sricharan R44f6df92017-08-24 12:51:33 +0530117 spinlock_t idr_lock;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530118 struct idr lcids;
119 struct idr rcids;
Sricharan Rd31ad612017-08-24 12:51:32 +0530120 unsigned long features;
121
122 bool intentless;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530123};
124
125enum {
126 GLINK_STATE_CLOSED,
127 GLINK_STATE_OPENING,
128 GLINK_STATE_OPEN,
129 GLINK_STATE_CLOSING,
130};
131
132/**
133 * struct glink_channel - internal representation of a channel
134 * @rpdev: rpdev reference, only used for primary endpoints
135 * @ept: rpmsg endpoint this channel is associated with
136 * @glink: qcom_glink context handle
137 * @refcount: refcount for the channel object
138 * @recv_lock: guard for @ept.cb
139 * @name: unique channel name/identifier
140 * @lcid: channel id, in local space
141 * @rcid: channel id, in remote space
Sricharan R933b45d2017-08-24 12:51:34 +0530142 * @intent_lock: lock for protection of @liids
143 * @liids: idr of all local intents
Sricharan R1d2ea362017-08-24 12:51:37 +0530144 * @intent_work: worker responsible for transmitting rx_done packets
145 * @done_intents: list of intents that needs to be announced rx_done
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530146 * @buf: receive buffer, for gathering fragments
147 * @buf_offset: write offset in @buf
148 * @buf_size: size of current @buf
149 * @open_ack: completed once remote has acked the open-request
150 * @open_req: completed once open-request has been received
151 */
152struct glink_channel {
153 struct rpmsg_endpoint ept;
154
155 struct rpmsg_device *rpdev;
156 struct qcom_glink *glink;
157
158 struct kref refcount;
159
160 spinlock_t recv_lock;
161
162 char *name;
163 unsigned int lcid;
164 unsigned int rcid;
165
Sricharan R933b45d2017-08-24 12:51:34 +0530166 spinlock_t intent_lock;
167 struct idr liids;
Sricharan R1d2ea362017-08-24 12:51:37 +0530168 struct work_struct intent_work;
169 struct list_head done_intents;
Sricharan R933b45d2017-08-24 12:51:34 +0530170
Sricharan R64f95f82017-08-24 12:51:35 +0530171 struct glink_core_rx_intent *buf;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530172 int buf_offset;
173 int buf_size;
174
175 struct completion open_ack;
176 struct completion open_req;
177};
178
179#define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept)
180
181static const struct rpmsg_endpoint_ops glink_endpoint_ops;
182
183#define RPM_CMD_VERSION 0
184#define RPM_CMD_VERSION_ACK 1
185#define RPM_CMD_OPEN 2
186#define RPM_CMD_CLOSE 3
187#define RPM_CMD_OPEN_ACK 4
Sricharan R933b45d2017-08-24 12:51:34 +0530188#define RPM_CMD_INTENT 5
Sricharan R1d2ea362017-08-24 12:51:37 +0530189#define RPM_CMD_RX_DONE 6
Sricharan R933b45d2017-08-24 12:51:34 +0530190#define RPM_CMD_RX_INTENT_REQ 7
191#define RPM_CMD_RX_INTENT_REQ_ACK 8
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530192#define RPM_CMD_TX_DATA 9
193#define RPM_CMD_CLOSE_ACK 11
194#define RPM_CMD_TX_DATA_CONT 12
195#define RPM_CMD_READ_NOTIF 13
Sricharan R1d2ea362017-08-24 12:51:37 +0530196#define RPM_CMD_RX_DONE_W_REUSE 14
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530197
198#define GLINK_FEATURE_INTENTLESS BIT(1)
199
Sricharan R1d2ea362017-08-24 12:51:37 +0530200static void qcom_glink_rx_done_work(struct work_struct *work);
201
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530202static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
203 const char *name)
204{
205 struct glink_channel *channel;
206
207 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
208 if (!channel)
209 return ERR_PTR(-ENOMEM);
210
211 /* Setup glink internal glink_channel data */
212 spin_lock_init(&channel->recv_lock);
Sricharan R933b45d2017-08-24 12:51:34 +0530213 spin_lock_init(&channel->intent_lock);
Sricharan R1d2ea362017-08-24 12:51:37 +0530214
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530215 channel->glink = glink;
216 channel->name = kstrdup(name, GFP_KERNEL);
217
218 init_completion(&channel->open_req);
219 init_completion(&channel->open_ack);
220
Sricharan R1d2ea362017-08-24 12:51:37 +0530221 INIT_LIST_HEAD(&channel->done_intents);
222 INIT_WORK(&channel->intent_work, qcom_glink_rx_done_work);
223
Sricharan R933b45d2017-08-24 12:51:34 +0530224 idr_init(&channel->liids);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530225 kref_init(&channel->refcount);
226
227 return channel;
228}
229
230static void qcom_glink_channel_release(struct kref *ref)
231{
232 struct glink_channel *channel = container_of(ref, struct glink_channel,
233 refcount);
Sricharan R933b45d2017-08-24 12:51:34 +0530234 unsigned long flags;
235
236 spin_lock_irqsave(&channel->intent_lock, flags);
237 idr_destroy(&channel->liids);
238 spin_unlock_irqrestore(&channel->intent_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530239
240 kfree(channel->name);
241 kfree(channel);
242}
243
244static size_t qcom_glink_rx_avail(struct qcom_glink *glink)
245{
246 return glink->rx_pipe->avail(glink->rx_pipe);
247}
248
249static void qcom_glink_rx_peak(struct qcom_glink *glink,
Bjorn Anderssonb88eee92017-08-24 12:51:36 +0530250 void *data, unsigned int offset, size_t count)
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530251{
Bjorn Anderssonb88eee92017-08-24 12:51:36 +0530252 glink->rx_pipe->peak(glink->rx_pipe, data, offset, count);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530253}
254
255static void qcom_glink_rx_advance(struct qcom_glink *glink, size_t count)
256{
257 glink->rx_pipe->advance(glink->rx_pipe, count);
258}
259
260static size_t qcom_glink_tx_avail(struct qcom_glink *glink)
261{
262 return glink->tx_pipe->avail(glink->tx_pipe);
263}
264
265static void qcom_glink_tx_write(struct qcom_glink *glink,
266 const void *hdr, size_t hlen,
267 const void *data, size_t dlen)
268{
269 glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen);
270}
271
272static int qcom_glink_tx(struct qcom_glink *glink,
273 const void *hdr, size_t hlen,
274 const void *data, size_t dlen, bool wait)
275{
276 unsigned int tlen = hlen + dlen;
277 int ret;
278
279 /* Reject packets that are too big */
280 if (tlen >= glink->tx_pipe->length)
281 return -EINVAL;
282
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530283 ret = mutex_lock_interruptible(&glink->tx_lock);
284 if (ret)
285 return ret;
286
287 while (qcom_glink_tx_avail(glink) < tlen) {
288 if (!wait) {
Sricharan Ra7df9df2017-08-24 12:51:28 +0530289 ret = -EAGAIN;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530290 goto out;
291 }
292
293 usleep_range(10000, 15000);
294 }
295
296 qcom_glink_tx_write(glink, hdr, hlen, data, dlen);
297
298 mbox_send_message(glink->mbox_chan, NULL);
299 mbox_client_txdone(glink->mbox_chan, 0);
300
301out:
302 mutex_unlock(&glink->tx_lock);
303
304 return ret;
305}
306
307static int qcom_glink_send_version(struct qcom_glink *glink)
308{
309 struct glink_msg msg;
310
311 msg.cmd = cpu_to_le16(RPM_CMD_VERSION);
Sricharan Rd31ad612017-08-24 12:51:32 +0530312 msg.param1 = cpu_to_le16(GLINK_VERSION_1);
313 msg.param2 = cpu_to_le32(glink->features);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530314
315 return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
316}
317
318static void qcom_glink_send_version_ack(struct qcom_glink *glink)
319{
320 struct glink_msg msg;
321
322 msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK);
Sricharan Rd31ad612017-08-24 12:51:32 +0530323 msg.param1 = cpu_to_le16(GLINK_VERSION_1);
324 msg.param2 = cpu_to_le32(glink->features);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530325
326 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
327}
328
329static void qcom_glink_send_open_ack(struct qcom_glink *glink,
330 struct glink_channel *channel)
331{
332 struct glink_msg msg;
333
334 msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK);
335 msg.param1 = cpu_to_le16(channel->rcid);
336 msg.param2 = cpu_to_le32(0);
337
338 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
339}
340
341/**
342 * qcom_glink_send_open_req() - send a RPM_CMD_OPEN request to the remote
343 * @glink: Ptr to the glink edge
344 * @channel: Ptr to the channel that the open req is sent
345 *
346 * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote.
347 * Will return with refcount held, regardless of outcome.
348 *
349 * Returns 0 on success, negative errno otherwise.
350 */
351static int qcom_glink_send_open_req(struct qcom_glink *glink,
352 struct glink_channel *channel)
353{
354 struct {
355 struct glink_msg msg;
356 u8 name[GLINK_NAME_SIZE];
357 } __packed req;
358 int name_len = strlen(channel->name) + 1;
359 int req_len = ALIGN(sizeof(req.msg) + name_len, 8);
360 int ret;
Sricharan R44f6df92017-08-24 12:51:33 +0530361 unsigned long flags;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530362
363 kref_get(&channel->refcount);
364
Sricharan R44f6df92017-08-24 12:51:33 +0530365 spin_lock_irqsave(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530366 ret = idr_alloc_cyclic(&glink->lcids, channel,
367 RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX,
Sricharan R44f6df92017-08-24 12:51:33 +0530368 GFP_ATOMIC);
369 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530370 if (ret < 0)
371 return ret;
372
373 channel->lcid = ret;
374
375 req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN);
376 req.msg.param1 = cpu_to_le16(channel->lcid);
377 req.msg.param2 = cpu_to_le32(name_len);
378 strcpy(req.name, channel->name);
379
380 ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true);
381 if (ret)
382 goto remove_idr;
383
384 return 0;
385
386remove_idr:
Sricharan R44f6df92017-08-24 12:51:33 +0530387 spin_lock_irqsave(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530388 idr_remove(&glink->lcids, channel->lcid);
389 channel->lcid = 0;
Sricharan R44f6df92017-08-24 12:51:33 +0530390 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530391
392 return ret;
393}
394
395static void qcom_glink_send_close_req(struct qcom_glink *glink,
396 struct glink_channel *channel)
397{
398 struct glink_msg req;
399
400 req.cmd = cpu_to_le16(RPM_CMD_CLOSE);
401 req.param1 = cpu_to_le16(channel->lcid);
402 req.param2 = 0;
403
404 qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
405}
406
407static void qcom_glink_send_close_ack(struct qcom_glink *glink,
408 unsigned int rcid)
409{
410 struct glink_msg req;
411
412 req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK);
413 req.param1 = cpu_to_le16(rcid);
414 req.param2 = 0;
415
416 qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
417}
418
Sricharan R1d2ea362017-08-24 12:51:37 +0530419static void qcom_glink_rx_done_work(struct work_struct *work)
420{
421 struct glink_channel *channel = container_of(work, struct glink_channel,
422 intent_work);
423 struct qcom_glink *glink = channel->glink;
424 struct glink_core_rx_intent *intent, *tmp;
425 struct {
426 u16 id;
427 u16 lcid;
428 u32 liid;
429 } __packed cmd;
430
431 unsigned int cid = channel->lcid;
432 unsigned int iid;
433 bool reuse;
434 unsigned long flags;
435
436 spin_lock_irqsave(&channel->intent_lock, flags);
437 list_for_each_entry_safe(intent, tmp, &channel->done_intents, node) {
438 list_del(&intent->node);
439 spin_unlock_irqrestore(&channel->intent_lock, flags);
440 iid = intent->id;
441 reuse = intent->reuse;
442
443 cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE;
444 cmd.lcid = cid;
445 cmd.liid = iid;
446
447 qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
448 if (!reuse) {
449 kfree(intent->data);
450 kfree(intent);
451 }
452 spin_lock_irqsave(&channel->intent_lock, flags);
453 }
454 spin_unlock_irqrestore(&channel->intent_lock, flags);
455}
456
457static void qcom_glink_rx_done(struct qcom_glink *glink,
458 struct glink_channel *channel,
459 struct glink_core_rx_intent *intent)
460{
461 /* We don't send RX_DONE to intentless systems */
462 if (glink->intentless) {
463 kfree(intent->data);
464 kfree(intent);
465 return;
466 }
467
468 /* Take it off the tree of receive intents */
469 if (!intent->reuse) {
470 spin_lock(&channel->intent_lock);
471 idr_remove(&channel->liids, intent->id);
472 spin_unlock(&channel->intent_lock);
473 }
474
475 /* Schedule the sending of a rx_done indication */
476 spin_lock(&channel->intent_lock);
477 list_add_tail(&intent->node, &channel->done_intents);
478 spin_unlock(&channel->intent_lock);
479
480 schedule_work(&channel->intent_work);
481}
482
Sricharan Rd31ad612017-08-24 12:51:32 +0530483/**
484 * qcom_glink_receive_version() - receive version/features from remote system
485 *
486 * @glink: pointer to transport interface
487 * @r_version: remote version
488 * @r_features: remote features
489 *
490 * This function is called in response to a remote-initiated version/feature
491 * negotiation sequence.
492 */
493static void qcom_glink_receive_version(struct qcom_glink *glink,
494 u32 version,
495 u32 features)
496{
497 switch (version) {
498 case 0:
499 break;
500 case GLINK_VERSION_1:
501 glink->features &= features;
502 /* FALLTHROUGH */
503 default:
504 qcom_glink_send_version_ack(glink);
505 break;
506 }
507}
508
509/**
510 * qcom_glink_receive_version_ack() - receive negotiation ack from remote system
511 *
512 * @glink: pointer to transport interface
513 * @r_version: remote version response
514 * @r_features: remote features response
515 *
516 * This function is called in response to a local-initiated version/feature
517 * negotiation sequence and is the counter-offer from the remote side based
518 * upon the initial version and feature set requested.
519 */
520static void qcom_glink_receive_version_ack(struct qcom_glink *glink,
521 u32 version,
522 u32 features)
523{
524 switch (version) {
525 case 0:
526 /* Version negotiation failed */
527 break;
528 case GLINK_VERSION_1:
529 if (features == glink->features)
530 break;
531
532 glink->features &= features;
533 /* FALLTHROUGH */
534 default:
535 qcom_glink_send_version(glink);
536 break;
537 }
538}
539
Sricharan R933b45d2017-08-24 12:51:34 +0530540/**
541 * qcom_glink_send_intent_req_ack() - convert an rx intent request ack cmd to
542 wire format and transmit
543 * @glink: The transport to transmit on.
544 * @channel: The glink channel
545 * @granted: The request response to encode.
546 *
547 * Return: 0 on success or standard Linux error code.
548 */
549static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink,
550 struct glink_channel *channel,
551 bool granted)
552{
553 struct glink_msg msg;
554
555 msg.cmd = cpu_to_le16(RPM_CMD_RX_INTENT_REQ_ACK);
556 msg.param1 = cpu_to_le16(channel->lcid);
557 msg.param2 = cpu_to_le32(granted);
558
559 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
560
561 return 0;
562}
563
564/**
565 * qcom_glink_advertise_intent - convert an rx intent cmd to wire format and
566 * transmit
567 * @glink: The transport to transmit on.
568 * @channel: The local channel
569 * @size: The intent to pass on to remote.
570 *
571 * Return: 0 on success or standard Linux error code.
572 */
573static int qcom_glink_advertise_intent(struct qcom_glink *glink,
574 struct glink_channel *channel,
575 struct glink_core_rx_intent *intent)
576{
577 struct command {
578 u16 id;
579 u16 lcid;
580 u32 count;
581 u32 size;
582 u32 liid;
583 } __packed;
584 struct command cmd;
585
586 cmd.id = cpu_to_le16(RPM_CMD_INTENT);
587 cmd.lcid = cpu_to_le16(channel->lcid);
588 cmd.count = cpu_to_le32(1);
589 cmd.size = cpu_to_le32(intent->size);
590 cmd.liid = cpu_to_le32(intent->id);
591
592 qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true);
593
594 return 0;
595}
596
597static struct glink_core_rx_intent *
598qcom_glink_alloc_intent(struct qcom_glink *glink,
599 struct glink_channel *channel,
600 size_t size,
601 bool reuseable)
602{
603 struct glink_core_rx_intent *intent;
604 int ret;
605 unsigned long flags;
606
607 intent = kzalloc(sizeof(*intent), GFP_KERNEL);
608
609 if (!intent)
610 return NULL;
611
612 intent->data = kzalloc(size, GFP_KERNEL);
613 if (!intent->data)
614 return NULL;
615
616 spin_lock_irqsave(&channel->intent_lock, flags);
617 ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC);
618 if (ret < 0) {
619 spin_unlock_irqrestore(&channel->intent_lock, flags);
620 return NULL;
621 }
622 spin_unlock_irqrestore(&channel->intent_lock, flags);
623
624 intent->id = ret;
625 intent->size = size;
626 intent->reuse = reuseable;
627
628 return intent;
629}
630
631/**
632 * qcom_glink_handle_intent_req() - Receive a request for rx_intent
633 * from remote side
634 * if_ptr: Pointer to the transport interface
635 * rcid: Remote channel ID
636 * size: size of the intent
637 *
638 * The function searches for the local channel to which the request for
639 * rx_intent has arrived and allocates and notifies the remote back
640 */
641static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
642 u32 cid, size_t size)
643{
644 struct glink_core_rx_intent *intent;
645 struct glink_channel *channel;
646 unsigned long flags;
647
648 spin_lock_irqsave(&glink->idr_lock, flags);
649 channel = idr_find(&glink->rcids, cid);
650 spin_unlock_irqrestore(&glink->idr_lock, flags);
651
652 if (!channel) {
653 pr_err("%s channel not found for cid %d\n", __func__, cid);
654 return;
655 }
656
657 intent = qcom_glink_alloc_intent(glink, channel, size, false);
658 if (intent)
659 qcom_glink_advertise_intent(glink, channel, intent);
660
661 qcom_glink_send_intent_req_ack(glink, channel, !!intent);
662}
663
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530664static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
665{
666 struct glink_defer_cmd *dcmd;
667
668 extra = ALIGN(extra, 8);
669
670 if (qcom_glink_rx_avail(glink) < sizeof(struct glink_msg) + extra) {
671 dev_dbg(glink->dev, "Insufficient data in rx fifo");
672 return -ENXIO;
673 }
674
675 dcmd = kzalloc(sizeof(*dcmd) + extra, GFP_ATOMIC);
676 if (!dcmd)
677 return -ENOMEM;
678
679 INIT_LIST_HEAD(&dcmd->node);
680
Bjorn Anderssonb88eee92017-08-24 12:51:36 +0530681 qcom_glink_rx_peak(glink, &dcmd->msg, 0, sizeof(dcmd->msg) + extra);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530682
683 spin_lock(&glink->rx_lock);
684 list_add_tail(&dcmd->node, &glink->rx_queue);
685 spin_unlock(&glink->rx_lock);
686
687 schedule_work(&glink->rx_work);
688 qcom_glink_rx_advance(glink, sizeof(dcmd->msg) + extra);
689
690 return 0;
691}
692
693static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
694{
Sricharan R64f95f82017-08-24 12:51:35 +0530695 struct glink_core_rx_intent *intent;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530696 struct glink_channel *channel;
697 struct {
698 struct glink_msg msg;
699 __le32 chunk_size;
700 __le32 left_size;
701 } __packed hdr;
702 unsigned int chunk_size;
703 unsigned int left_size;
704 unsigned int rcid;
Sricharan R64f95f82017-08-24 12:51:35 +0530705 unsigned int liid;
706 int ret = 0;
Sricharan R44f6df92017-08-24 12:51:33 +0530707 unsigned long flags;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530708
709 if (avail < sizeof(hdr)) {
710 dev_dbg(glink->dev, "Not enough data in fifo\n");
711 return -EAGAIN;
712 }
713
Bjorn Anderssonb88eee92017-08-24 12:51:36 +0530714 qcom_glink_rx_peak(glink, &hdr, 0, sizeof(hdr));
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530715 chunk_size = le32_to_cpu(hdr.chunk_size);
716 left_size = le32_to_cpu(hdr.left_size);
717
718 if (avail < sizeof(hdr) + chunk_size) {
719 dev_dbg(glink->dev, "Payload not yet in fifo\n");
720 return -EAGAIN;
721 }
722
723 if (WARN(chunk_size % 4, "Incoming data must be word aligned\n"))
724 return -EINVAL;
725
726 rcid = le16_to_cpu(hdr.msg.param1);
Sricharan R44f6df92017-08-24 12:51:33 +0530727 spin_lock_irqsave(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530728 channel = idr_find(&glink->rcids, rcid);
Sricharan R44f6df92017-08-24 12:51:33 +0530729 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530730 if (!channel) {
731 dev_dbg(glink->dev, "Data on non-existing channel\n");
732
733 /* Drop the message */
Sricharan R64f95f82017-08-24 12:51:35 +0530734 goto advance_rx;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530735 }
736
Sricharan R64f95f82017-08-24 12:51:35 +0530737 if (glink->intentless) {
738 /* Might have an ongoing, fragmented, message to append */
739 if (!channel->buf) {
740 intent = kzalloc(sizeof(*intent), GFP_ATOMIC);
741 if (!intent)
742 return -ENOMEM;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530743
Sricharan R64f95f82017-08-24 12:51:35 +0530744 intent->data = kmalloc(chunk_size + left_size,
745 GFP_ATOMIC);
746 if (!intent->data) {
747 kfree(intent);
748 return -ENOMEM;
749 }
750
751 intent->id = 0xbabababa;
752 intent->size = chunk_size + left_size;
753 intent->offset = 0;
754
755 channel->buf = intent;
756 } else {
757 intent = channel->buf;
758 }
759 } else {
760 liid = le32_to_cpu(hdr.msg.param2);
761
762 spin_lock_irqsave(&channel->intent_lock, flags);
763 intent = idr_find(&channel->liids, liid);
764 spin_unlock_irqrestore(&channel->intent_lock, flags);
765
766 if (!intent) {
767 dev_err(glink->dev,
768 "no intent found for channel %s intent %d",
769 channel->name, liid);
770 goto advance_rx;
771 }
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530772 }
773
Sricharan R64f95f82017-08-24 12:51:35 +0530774 if (intent->size - intent->offset < chunk_size) {
775 dev_err(glink->dev, "Insufficient space in intent\n");
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530776
777 /* The packet header lied, drop payload */
Sricharan R64f95f82017-08-24 12:51:35 +0530778 goto advance_rx;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530779 }
780
Sricharan R64f95f82017-08-24 12:51:35 +0530781 qcom_glink_rx_peak(glink, intent->data + intent->offset,
Bjorn Anderssonb88eee92017-08-24 12:51:36 +0530782 sizeof(hdr), chunk_size);
Sricharan R64f95f82017-08-24 12:51:35 +0530783 intent->offset += chunk_size;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530784
785 /* Handle message when no fragments remain to be received */
786 if (!left_size) {
787 spin_lock(&channel->recv_lock);
788 if (channel->ept.cb) {
789 channel->ept.cb(channel->ept.rpdev,
Sricharan R64f95f82017-08-24 12:51:35 +0530790 intent->data,
791 intent->offset,
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530792 channel->ept.priv,
793 RPMSG_ADDR_ANY);
794 }
795 spin_unlock(&channel->recv_lock);
796
Sricharan R64f95f82017-08-24 12:51:35 +0530797 intent->offset = 0;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530798 channel->buf = NULL;
Sricharan R1d2ea362017-08-24 12:51:37 +0530799
800 qcom_glink_rx_done(glink, channel, intent);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530801 }
802
Sricharan R64f95f82017-08-24 12:51:35 +0530803advance_rx:
Bjorn Anderssonb88eee92017-08-24 12:51:36 +0530804 qcom_glink_rx_advance(glink, ALIGN(sizeof(hdr) + chunk_size, 8));
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530805
Sricharan R64f95f82017-08-24 12:51:35 +0530806 return ret;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530807}
808
809static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
810{
811 struct glink_channel *channel;
812
Sricharan R44f6df92017-08-24 12:51:33 +0530813 spin_lock(&glink->idr_lock);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530814 channel = idr_find(&glink->lcids, lcid);
815 if (!channel) {
816 dev_err(glink->dev, "Invalid open ack packet\n");
817 return -EINVAL;
818 }
Sricharan R44f6df92017-08-24 12:51:33 +0530819 spin_unlock(&glink->idr_lock);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530820
821 complete(&channel->open_ack);
822
823 return 0;
824}
825
826static irqreturn_t qcom_glink_native_intr(int irq, void *data)
827{
828 struct qcom_glink *glink = data;
829 struct glink_msg msg;
830 unsigned int param1;
831 unsigned int param2;
832 unsigned int avail;
833 unsigned int cmd;
834 int ret;
835
836 for (;;) {
837 avail = qcom_glink_rx_avail(glink);
838 if (avail < sizeof(msg))
839 break;
840
Bjorn Anderssonb88eee92017-08-24 12:51:36 +0530841 qcom_glink_rx_peak(glink, &msg, 0, sizeof(msg));
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530842
843 cmd = le16_to_cpu(msg.cmd);
844 param1 = le16_to_cpu(msg.param1);
845 param2 = le32_to_cpu(msg.param2);
846
847 switch (cmd) {
848 case RPM_CMD_VERSION:
849 case RPM_CMD_VERSION_ACK:
850 case RPM_CMD_CLOSE:
851 case RPM_CMD_CLOSE_ACK:
Sricharan R933b45d2017-08-24 12:51:34 +0530852 case RPM_CMD_RX_INTENT_REQ:
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530853 ret = qcom_glink_rx_defer(glink, 0);
854 break;
855 case RPM_CMD_OPEN_ACK:
856 ret = qcom_glink_rx_open_ack(glink, param1);
857 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
858 break;
859 case RPM_CMD_OPEN:
860 ret = qcom_glink_rx_defer(glink, param2);
861 break;
862 case RPM_CMD_TX_DATA:
863 case RPM_CMD_TX_DATA_CONT:
864 ret = qcom_glink_rx_data(glink, avail);
865 break;
866 case RPM_CMD_READ_NOTIF:
867 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
868
869 mbox_send_message(glink->mbox_chan, NULL);
870 mbox_client_txdone(glink->mbox_chan, 0);
871
872 ret = 0;
873 break;
874 default:
875 dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
876 ret = -EINVAL;
877 break;
878 }
879
880 if (ret)
881 break;
882 }
883
884 return IRQ_HANDLED;
885}
886
887/* Locally initiated rpmsg_create_ept */
888static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink,
889 const char *name)
890{
891 struct glink_channel *channel;
892 int ret;
Sricharan R44f6df92017-08-24 12:51:33 +0530893 unsigned long flags;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530894
895 channel = qcom_glink_alloc_channel(glink, name);
896 if (IS_ERR(channel))
897 return ERR_CAST(channel);
898
899 ret = qcom_glink_send_open_req(glink, channel);
900 if (ret)
901 goto release_channel;
902
903 ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
904 if (!ret)
905 goto err_timeout;
906
907 ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ);
908 if (!ret)
909 goto err_timeout;
910
911 qcom_glink_send_open_ack(glink, channel);
912
913 return channel;
914
915err_timeout:
916 /* qcom_glink_send_open_req() did register the channel in lcids*/
Sricharan R44f6df92017-08-24 12:51:33 +0530917 spin_lock_irqsave(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530918 idr_remove(&glink->lcids, channel->lcid);
Sricharan R44f6df92017-08-24 12:51:33 +0530919 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530920
921release_channel:
922 /* Release qcom_glink_send_open_req() reference */
923 kref_put(&channel->refcount, qcom_glink_channel_release);
924 /* Release qcom_glink_alloc_channel() reference */
925 kref_put(&channel->refcount, qcom_glink_channel_release);
926
927 return ERR_PTR(-ETIMEDOUT);
928}
929
930/* Remote initiated rpmsg_create_ept */
931static int qcom_glink_create_remote(struct qcom_glink *glink,
932 struct glink_channel *channel)
933{
934 int ret;
935
936 qcom_glink_send_open_ack(glink, channel);
937
938 ret = qcom_glink_send_open_req(glink, channel);
939 if (ret)
940 goto close_link;
941
942 ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
943 if (!ret) {
944 ret = -ETIMEDOUT;
945 goto close_link;
946 }
947
948 return 0;
949
950close_link:
951 /*
952 * Send a close request to "undo" our open-ack. The close-ack will
953 * release the last reference.
954 */
955 qcom_glink_send_close_req(glink, channel);
956
957 /* Release qcom_glink_send_open_req() reference */
958 kref_put(&channel->refcount, qcom_glink_channel_release);
959
960 return ret;
961}
962
963static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev,
964 rpmsg_rx_cb_t cb,
965 void *priv,
966 struct rpmsg_channel_info
967 chinfo)
968{
969 struct glink_channel *parent = to_glink_channel(rpdev->ept);
970 struct glink_channel *channel;
971 struct qcom_glink *glink = parent->glink;
972 struct rpmsg_endpoint *ept;
973 const char *name = chinfo.name;
974 int cid;
975 int ret;
Sricharan R44f6df92017-08-24 12:51:33 +0530976 unsigned long flags;
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530977
Sricharan R44f6df92017-08-24 12:51:33 +0530978 spin_lock_irqsave(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530979 idr_for_each_entry(&glink->rcids, channel, cid) {
980 if (!strcmp(channel->name, name))
981 break;
982 }
Sricharan R44f6df92017-08-24 12:51:33 +0530983 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +0530984
985 if (!channel) {
986 channel = qcom_glink_create_local(glink, name);
987 if (IS_ERR(channel))
988 return NULL;
989 } else {
990 ret = qcom_glink_create_remote(glink, channel);
991 if (ret)
992 return NULL;
993 }
994
995 ept = &channel->ept;
996 ept->rpdev = rpdev;
997 ept->cb = cb;
998 ept->priv = priv;
999 ept->ops = &glink_endpoint_ops;
1000
1001 return ept;
1002}
1003
1004static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept)
1005{
1006 struct glink_channel *channel = to_glink_channel(ept);
1007 struct qcom_glink *glink = channel->glink;
1008 unsigned long flags;
1009
1010 spin_lock_irqsave(&channel->recv_lock, flags);
1011 channel->ept.cb = NULL;
1012 spin_unlock_irqrestore(&channel->recv_lock, flags);
1013
1014 /* Decouple the potential rpdev from the channel */
1015 channel->rpdev = NULL;
1016
1017 qcom_glink_send_close_req(glink, channel);
1018}
1019
1020static int __qcom_glink_send(struct glink_channel *channel,
1021 void *data, int len, bool wait)
1022{
1023 struct qcom_glink *glink = channel->glink;
1024 struct {
1025 struct glink_msg msg;
1026 __le32 chunk_size;
1027 __le32 left_size;
1028 } __packed req;
1029
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301030 req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA);
1031 req.msg.param1 = cpu_to_le16(channel->lcid);
1032 req.msg.param2 = cpu_to_le32(channel->rcid);
1033 req.chunk_size = cpu_to_le32(len);
1034 req.left_size = cpu_to_le32(0);
1035
1036 return qcom_glink_tx(glink, &req, sizeof(req), data, len, wait);
1037}
1038
1039static int qcom_glink_send(struct rpmsg_endpoint *ept, void *data, int len)
1040{
1041 struct glink_channel *channel = to_glink_channel(ept);
1042
1043 return __qcom_glink_send(channel, data, len, true);
1044}
1045
1046static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len)
1047{
1048 struct glink_channel *channel = to_glink_channel(ept);
1049
1050 return __qcom_glink_send(channel, data, len, false);
1051}
1052
1053/*
1054 * Finds the device_node for the glink child interested in this channel.
1055 */
1056static struct device_node *qcom_glink_match_channel(struct device_node *node,
1057 const char *channel)
1058{
1059 struct device_node *child;
1060 const char *name;
1061 const char *key;
1062 int ret;
1063
1064 for_each_available_child_of_node(node, child) {
1065 key = "qcom,glink-channels";
1066 ret = of_property_read_string(child, key, &name);
1067 if (ret)
1068 continue;
1069
1070 if (strcmp(name, channel) == 0)
1071 return child;
1072 }
1073
1074 return NULL;
1075}
1076
1077static const struct rpmsg_device_ops glink_device_ops = {
1078 .create_ept = qcom_glink_create_ept,
1079};
1080
1081static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
1082 .destroy_ept = qcom_glink_destroy_ept,
1083 .send = qcom_glink_send,
1084 .trysend = qcom_glink_trysend,
1085};
1086
1087static void qcom_glink_rpdev_release(struct device *dev)
1088{
1089 struct rpmsg_device *rpdev = to_rpmsg_device(dev);
1090 struct glink_channel *channel = to_glink_channel(rpdev->ept);
1091
1092 channel->rpdev = NULL;
1093 kfree(rpdev);
1094}
1095
1096static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
1097 char *name)
1098{
1099 struct glink_channel *channel;
1100 struct rpmsg_device *rpdev;
1101 bool create_device = false;
1102 struct device_node *node;
1103 int lcid;
1104 int ret;
Sricharan R44f6df92017-08-24 12:51:33 +05301105 unsigned long flags;
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301106
Sricharan R44f6df92017-08-24 12:51:33 +05301107 spin_lock_irqsave(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301108 idr_for_each_entry(&glink->lcids, channel, lcid) {
1109 if (!strcmp(channel->name, name))
1110 break;
1111 }
Sricharan R44f6df92017-08-24 12:51:33 +05301112 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301113
1114 if (!channel) {
1115 channel = qcom_glink_alloc_channel(glink, name);
1116 if (IS_ERR(channel))
1117 return PTR_ERR(channel);
1118
1119 /* The opening dance was initiated by the remote */
1120 create_device = true;
1121 }
1122
Sricharan R44f6df92017-08-24 12:51:33 +05301123 spin_lock_irqsave(&glink->idr_lock, flags);
1124 ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_ATOMIC);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301125 if (ret < 0) {
1126 dev_err(glink->dev, "Unable to insert channel into rcid list\n");
Sricharan R44f6df92017-08-24 12:51:33 +05301127 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301128 goto free_channel;
1129 }
1130 channel->rcid = ret;
Sricharan R44f6df92017-08-24 12:51:33 +05301131 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301132
1133 complete(&channel->open_req);
1134
1135 if (create_device) {
1136 rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
1137 if (!rpdev) {
1138 ret = -ENOMEM;
1139 goto rcid_remove;
1140 }
1141
1142 rpdev->ept = &channel->ept;
1143 strncpy(rpdev->id.name, name, RPMSG_NAME_SIZE);
1144 rpdev->src = RPMSG_ADDR_ANY;
1145 rpdev->dst = RPMSG_ADDR_ANY;
1146 rpdev->ops = &glink_device_ops;
1147
1148 node = qcom_glink_match_channel(glink->dev->of_node, name);
1149 rpdev->dev.of_node = node;
1150 rpdev->dev.parent = glink->dev;
1151 rpdev->dev.release = qcom_glink_rpdev_release;
1152
1153 ret = rpmsg_register_device(rpdev);
1154 if (ret)
1155 goto free_rpdev;
1156
1157 channel->rpdev = rpdev;
1158 }
1159
1160 return 0;
1161
1162free_rpdev:
1163 kfree(rpdev);
1164rcid_remove:
Sricharan R44f6df92017-08-24 12:51:33 +05301165 spin_lock_irqsave(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301166 idr_remove(&glink->rcids, channel->rcid);
1167 channel->rcid = 0;
Sricharan R44f6df92017-08-24 12:51:33 +05301168 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301169free_channel:
1170 /* Release the reference, iff we took it */
1171 if (create_device)
1172 kref_put(&channel->refcount, qcom_glink_channel_release);
1173
1174 return ret;
1175}
1176
1177static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
1178{
1179 struct rpmsg_channel_info chinfo;
1180 struct glink_channel *channel;
Sricharan R44f6df92017-08-24 12:51:33 +05301181 unsigned long flags;
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301182
Sricharan R44f6df92017-08-24 12:51:33 +05301183 spin_lock_irqsave(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301184 channel = idr_find(&glink->rcids, rcid);
Sricharan R44f6df92017-08-24 12:51:33 +05301185 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301186 if (WARN(!channel, "close request on unknown channel\n"))
1187 return;
1188
Sricharan R1d2ea362017-08-24 12:51:37 +05301189 /* cancel pending rx_done work */
1190 cancel_work_sync(&channel->intent_work);
1191
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301192 if (channel->rpdev) {
1193 strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
1194 chinfo.src = RPMSG_ADDR_ANY;
1195 chinfo.dst = RPMSG_ADDR_ANY;
1196
1197 rpmsg_unregister_device(glink->dev, &chinfo);
1198 }
1199
1200 qcom_glink_send_close_ack(glink, channel->rcid);
1201
Sricharan R44f6df92017-08-24 12:51:33 +05301202 spin_lock_irqsave(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301203 idr_remove(&glink->rcids, channel->rcid);
1204 channel->rcid = 0;
Sricharan R44f6df92017-08-24 12:51:33 +05301205 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301206
1207 kref_put(&channel->refcount, qcom_glink_channel_release);
1208}
1209
1210static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
1211{
1212 struct glink_channel *channel;
Sricharan R44f6df92017-08-24 12:51:33 +05301213 unsigned long flags;
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301214
Sricharan R44f6df92017-08-24 12:51:33 +05301215 spin_lock_irqsave(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301216 channel = idr_find(&glink->lcids, lcid);
Sricharan R44f6df92017-08-24 12:51:33 +05301217 if (WARN(!channel, "close ack on unknown channel\n")) {
1218 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301219 return;
Sricharan R44f6df92017-08-24 12:51:33 +05301220 }
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301221
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301222 idr_remove(&glink->lcids, channel->lcid);
1223 channel->lcid = 0;
Sricharan R44f6df92017-08-24 12:51:33 +05301224 spin_unlock_irqrestore(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301225
1226 kref_put(&channel->refcount, qcom_glink_channel_release);
1227}
1228
1229static void qcom_glink_work(struct work_struct *work)
1230{
1231 struct qcom_glink *glink = container_of(work, struct qcom_glink,
1232 rx_work);
1233 struct glink_defer_cmd *dcmd;
1234 struct glink_msg *msg;
1235 unsigned long flags;
1236 unsigned int param1;
1237 unsigned int param2;
1238 unsigned int cmd;
1239
1240 for (;;) {
1241 spin_lock_irqsave(&glink->rx_lock, flags);
1242 if (list_empty(&glink->rx_queue)) {
1243 spin_unlock_irqrestore(&glink->rx_lock, flags);
1244 break;
1245 }
1246 dcmd = list_first_entry(&glink->rx_queue,
1247 struct glink_defer_cmd, node);
1248 list_del(&dcmd->node);
1249 spin_unlock_irqrestore(&glink->rx_lock, flags);
1250
1251 msg = &dcmd->msg;
1252 cmd = le16_to_cpu(msg->cmd);
1253 param1 = le16_to_cpu(msg->param1);
1254 param2 = le32_to_cpu(msg->param2);
1255
1256 switch (cmd) {
1257 case RPM_CMD_VERSION:
Sricharan Rd31ad612017-08-24 12:51:32 +05301258 qcom_glink_receive_version(glink, param1, param2);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301259 break;
1260 case RPM_CMD_VERSION_ACK:
Sricharan Rd31ad612017-08-24 12:51:32 +05301261 qcom_glink_receive_version_ack(glink, param1, param2);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301262 break;
1263 case RPM_CMD_OPEN:
1264 qcom_glink_rx_open(glink, param1, msg->data);
1265 break;
1266 case RPM_CMD_CLOSE:
1267 qcom_glink_rx_close(glink, param1);
1268 break;
1269 case RPM_CMD_CLOSE_ACK:
1270 qcom_glink_rx_close_ack(glink, param1);
1271 break;
Sricharan R933b45d2017-08-24 12:51:34 +05301272 case RPM_CMD_RX_INTENT_REQ:
1273 qcom_glink_handle_intent_req(glink, param1, param2);
1274 break;
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301275 default:
1276 WARN(1, "Unknown defer object %d\n", cmd);
1277 break;
1278 }
1279
1280 kfree(dcmd);
1281 }
1282}
1283
1284struct qcom_glink *qcom_glink_native_probe(struct device *dev,
Sricharan Rd31ad612017-08-24 12:51:32 +05301285 unsigned long features,
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301286 struct qcom_glink_pipe *rx,
Sricharan R933b45d2017-08-24 12:51:34 +05301287 struct qcom_glink_pipe *tx,
1288 bool intentless)
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301289{
1290 int irq;
1291 int ret;
1292 struct qcom_glink *glink;
1293
1294 glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL);
1295 if (!glink)
1296 return ERR_PTR(-ENOMEM);
1297
1298 glink->dev = dev;
1299 glink->tx_pipe = tx;
1300 glink->rx_pipe = rx;
1301
Sricharan Rd31ad612017-08-24 12:51:32 +05301302 glink->features = features;
Sricharan R933b45d2017-08-24 12:51:34 +05301303 glink->intentless = intentless;
Sricharan Rd31ad612017-08-24 12:51:32 +05301304
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301305 mutex_init(&glink->tx_lock);
1306 spin_lock_init(&glink->rx_lock);
1307 INIT_LIST_HEAD(&glink->rx_queue);
1308 INIT_WORK(&glink->rx_work, qcom_glink_work);
1309
Sricharan R44f6df92017-08-24 12:51:33 +05301310 spin_lock_init(&glink->idr_lock);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301311 idr_init(&glink->lcids);
1312 idr_init(&glink->rcids);
1313
1314 glink->mbox_client.dev = dev;
1315 glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
1316 if (IS_ERR(glink->mbox_chan)) {
1317 if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER)
1318 dev_err(dev, "failed to acquire IPC channel\n");
1319 return ERR_CAST(glink->mbox_chan);
1320 }
1321
1322 irq = of_irq_get(dev->of_node, 0);
1323 ret = devm_request_irq(dev, irq,
1324 qcom_glink_native_intr,
1325 IRQF_NO_SUSPEND | IRQF_SHARED,
1326 "glink-native", glink);
1327 if (ret) {
1328 dev_err(dev, "failed to request IRQ\n");
1329 return ERR_PTR(ret);
1330 }
1331
1332 glink->irq = irq;
1333
1334 ret = qcom_glink_send_version(glink);
1335 if (ret)
1336 return ERR_PTR(ret);
1337
1338 return glink;
1339}
1340
1341static int qcom_glink_remove_device(struct device *dev, void *data)
1342{
1343 device_unregister(dev);
1344
1345 return 0;
1346}
1347
1348void qcom_glink_native_remove(struct qcom_glink *glink)
1349{
1350 struct glink_channel *channel;
1351 int cid;
1352 int ret;
Sricharan R44f6df92017-08-24 12:51:33 +05301353 unsigned long flags;
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301354
1355 disable_irq(glink->irq);
1356 cancel_work_sync(&glink->rx_work);
1357
1358 ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
1359 if (ret)
1360 dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
1361
Sricharan R44f6df92017-08-24 12:51:33 +05301362 spin_lock_irqsave(&glink->idr_lock, flags);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301363 /* Release any defunct local channels, waiting for close-ack */
1364 idr_for_each_entry(&glink->lcids, channel, cid)
1365 kref_put(&channel->refcount, qcom_glink_channel_release);
1366
1367 idr_destroy(&glink->lcids);
1368 idr_destroy(&glink->rcids);
Sricharan R44f6df92017-08-24 12:51:33 +05301369 spin_unlock_irqrestore(&glink->idr_lock, flags);
Sricharan R76cf1102017-08-24 12:51:29 +05301370 mbox_free_channel(glink->mbox_chan);
Bjorn Andersson835764dd2017-08-24 12:51:26 +05301371}
Bjorn Anderssoncaf989c2017-08-24 12:51:30 +05301372
1373void qcom_glink_native_unregister(struct qcom_glink *glink)
1374{
1375 device_unregister(glink->dev);
1376}