blob: 6437bbeebc9103479d8fa145316ac7f9137b5223 [file] [log] [blame]
Suman Anna49b05972018-05-31 12:11:01 -05001// SPDX-License-Identifier: GPL-2.0
Bjorn Andersson53e28222016-09-01 15:28:09 -07002/*
3 * Copyright (c) 2015, Sony Mobile Communications AB.
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
Bjorn Andersson53e28222016-09-01 15:28:09 -07005 */
6
7#include <linux/interrupt.h>
8#include <linux/io.h>
Bjorn Anderssonab460a22018-04-19 18:17:57 -07009#include <linux/mailbox_client.h>
Bjorn Andersson53e28222016-09-01 15:28:09 -070010#include <linux/mfd/syscon.h>
11#include <linux/module.h>
12#include <linux/of_irq.h>
13#include <linux/of_platform.h>
14#include <linux/platform_device.h>
15#include <linux/regmap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/soc/qcom/smem.h>
19#include <linux/wait.h>
20#include <linux/rpmsg.h>
Bjorn Andersson8fc94722016-10-19 19:40:03 -070021#include <linux/rpmsg/qcom_smd.h>
Bjorn Andersson53e28222016-09-01 15:28:09 -070022
23#include "rpmsg_internal.h"
24
25/*
26 * The Qualcomm Shared Memory communication solution provides point-to-point
27 * channels for clients to send and receive streaming or packet based data.
28 *
29 * Each channel consists of a control item (channel info) and a ring buffer
30 * pair. The channel info carry information related to channel state, flow
31 * control and the offsets within the ring buffer.
32 *
33 * All allocated channels are listed in an allocation table, identifying the
34 * pair of items by name, type and remote processor.
35 *
36 * Upon creating a new channel the remote processor allocates channel info and
37 * ring buffer items from the smem heap and populate the allocation table. An
38 * interrupt is sent to the other end of the channel and a scan for new
39 * channels should be done. A channel never goes away, it will only change
40 * state.
41 *
42 * The remote processor signals it intent for bring up the communication
43 * channel by setting the state of its end of the channel to "opening" and
44 * sends out an interrupt. We detect this change and register a smd device to
45 * consume the channel. Upon finding a consumer we finish the handshake and the
46 * channel is up.
47 *
48 * Upon closing a channel, the remote processor will update the state of its
49 * end of the channel and signal us, we will then unregister any attached
50 * device and close our end of the channel.
51 *
52 * Devices attached to a channel can use the qcom_smd_send function to push
53 * data to the channel, this is done by copying the data into the tx ring
54 * buffer, updating the pointers in the channel info and signaling the remote
55 * processor.
56 *
57 * The remote processor does the equivalent when it transfer data and upon
58 * receiving the interrupt we check the channel info for new data and delivers
59 * this to the attached device. If the device is not ready to receive the data
60 * we leave it in the ring buffer for now.
61 */
62
63struct smd_channel_info;
64struct smd_channel_info_pair;
65struct smd_channel_info_word;
66struct smd_channel_info_word_pair;
67
68static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops;
69
70#define SMD_ALLOC_TBL_COUNT 2
71#define SMD_ALLOC_TBL_SIZE 64
72
73/*
74 * This lists the various smem heap items relevant for the allocation table and
75 * smd channel entries.
76 */
77static const struct {
78 unsigned alloc_tbl_id;
79 unsigned info_base_id;
80 unsigned fifo_base_id;
81} smem_items[SMD_ALLOC_TBL_COUNT] = {
82 {
83 .alloc_tbl_id = 13,
84 .info_base_id = 14,
85 .fifo_base_id = 338
86 },
87 {
88 .alloc_tbl_id = 266,
89 .info_base_id = 138,
90 .fifo_base_id = 202,
91 },
92};
93
94/**
95 * struct qcom_smd_edge - representing a remote processor
96 * @of_node: of_node handle for information related to this edge
97 * @edge_id: identifier of this edge
98 * @remote_pid: identifier of remote processor
99 * @irq: interrupt for signals on this edge
100 * @ipc_regmap: regmap handle holding the outgoing ipc register
101 * @ipc_offset: offset within @ipc_regmap of the register for ipc
102 * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap
Bjorn Anderssonab460a22018-04-19 18:17:57 -0700103 * @mbox_client: mailbox client handle
104 * @mbox_chan: apcs ipc mailbox channel handle
Bjorn Andersson53e28222016-09-01 15:28:09 -0700105 * @channels: list of all channels detected on this edge
106 * @channels_lock: guard for modifications of @channels
107 * @allocated: array of bitmaps representing already allocated channels
108 * @smem_available: last available amount of smem triggering a channel scan
109 * @scan_work: work item for discovering new channels
110 * @state_work: work item for edge state changes
111 */
112struct qcom_smd_edge {
113 struct device dev;
114
Bjorn Andersson5e53c422016-12-02 14:06:02 -0800115 const char *name;
116
Bjorn Andersson53e28222016-09-01 15:28:09 -0700117 struct device_node *of_node;
118 unsigned edge_id;
119 unsigned remote_pid;
120
121 int irq;
122
123 struct regmap *ipc_regmap;
124 int ipc_offset;
125 int ipc_bit;
126
Bjorn Anderssonab460a22018-04-19 18:17:57 -0700127 struct mbox_client mbox_client;
128 struct mbox_chan *mbox_chan;
129
Bjorn Andersson53e28222016-09-01 15:28:09 -0700130 struct list_head channels;
131 spinlock_t channels_lock;
132
133 DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
134
135 unsigned smem_available;
136
137 wait_queue_head_t new_channel_event;
138
139 struct work_struct scan_work;
140 struct work_struct state_work;
141};
142
143/*
144 * SMD channel states.
145 */
146enum smd_channel_state {
147 SMD_CHANNEL_CLOSED,
148 SMD_CHANNEL_OPENING,
149 SMD_CHANNEL_OPENED,
150 SMD_CHANNEL_FLUSHING,
151 SMD_CHANNEL_CLOSING,
152 SMD_CHANNEL_RESET,
153 SMD_CHANNEL_RESET_OPENING
154};
155
156struct qcom_smd_device {
157 struct rpmsg_device rpdev;
158
159 struct qcom_smd_edge *edge;
160};
161
162struct qcom_smd_endpoint {
163 struct rpmsg_endpoint ept;
164
165 struct qcom_smd_channel *qsch;
166};
167
Bjorn Andersson6ddf12d2018-03-27 14:06:41 -0700168#define to_smd_device(r) container_of(r, struct qcom_smd_device, rpdev)
Bjorn Andersson53e28222016-09-01 15:28:09 -0700169#define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev)
Bjorn Andersson6ddf12d2018-03-27 14:06:41 -0700170#define to_smd_endpoint(e) container_of(e, struct qcom_smd_endpoint, ept)
Bjorn Andersson53e28222016-09-01 15:28:09 -0700171
172/**
173 * struct qcom_smd_channel - smd channel struct
174 * @edge: qcom_smd_edge this channel is living on
175 * @qsdev: reference to a associated smd client device
176 * @name: name of the channel
177 * @state: local state of the channel
178 * @remote_state: remote state of the channel
179 * @info: byte aligned outgoing/incoming channel info
180 * @info_word: word aligned outgoing/incoming channel info
181 * @tx_lock: lock to make writes to the channel mutually exclusive
182 * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR
183 * @tx_fifo: pointer to the outgoing ring buffer
184 * @rx_fifo: pointer to the incoming ring buffer
185 * @fifo_size: size of each ring buffer
186 * @bounce_buffer: bounce buffer for reading wrapped packets
187 * @cb: callback function registered for this channel
188 * @recv_lock: guard for rx info modifications and cb pointer
189 * @pkt_size: size of the currently handled packet
190 * @list: lite entry for @channels in qcom_smd_edge
191 */
192struct qcom_smd_channel {
193 struct qcom_smd_edge *edge;
194
195 struct qcom_smd_endpoint *qsept;
196 bool registered;
197
198 char *name;
199 enum smd_channel_state state;
200 enum smd_channel_state remote_state;
Bjorn Andersson268105f2017-12-12 15:58:53 -0800201 wait_queue_head_t state_change_event;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700202
203 struct smd_channel_info_pair *info;
204 struct smd_channel_info_word_pair *info_word;
205
Bjorn Andersson33e38202018-02-13 11:04:11 -0800206 spinlock_t tx_lock;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700207 wait_queue_head_t fblockread_event;
208
209 void *tx_fifo;
210 void *rx_fifo;
211 int fifo_size;
212
213 void *bounce_buffer;
214
215 spinlock_t recv_lock;
216
217 int pkt_size;
218
219 void *drvdata;
220
221 struct list_head list;
222};
223
224/*
225 * Format of the smd_info smem items, for byte aligned channels.
226 */
227struct smd_channel_info {
228 __le32 state;
229 u8 fDSR;
230 u8 fCTS;
231 u8 fCD;
232 u8 fRI;
233 u8 fHEAD;
234 u8 fTAIL;
235 u8 fSTATE;
236 u8 fBLOCKREADINTR;
237 __le32 tail;
238 __le32 head;
239};
240
241struct smd_channel_info_pair {
242 struct smd_channel_info tx;
243 struct smd_channel_info rx;
244};
245
246/*
247 * Format of the smd_info smem items, for word aligned channels.
248 */
249struct smd_channel_info_word {
250 __le32 state;
251 __le32 fDSR;
252 __le32 fCTS;
253 __le32 fCD;
254 __le32 fRI;
255 __le32 fHEAD;
256 __le32 fTAIL;
257 __le32 fSTATE;
258 __le32 fBLOCKREADINTR;
259 __le32 tail;
260 __le32 head;
261};
262
263struct smd_channel_info_word_pair {
264 struct smd_channel_info_word tx;
265 struct smd_channel_info_word rx;
266};
267
268#define GET_RX_CHANNEL_FLAG(channel, param) \
269 ({ \
270 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
271 channel->info_word ? \
272 le32_to_cpu(channel->info_word->rx.param) : \
273 channel->info->rx.param; \
274 })
275
276#define GET_RX_CHANNEL_INFO(channel, param) \
277 ({ \
278 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
279 le32_to_cpu(channel->info_word ? \
280 channel->info_word->rx.param : \
281 channel->info->rx.param); \
282 })
283
284#define SET_RX_CHANNEL_FLAG(channel, param, value) \
285 ({ \
286 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
287 if (channel->info_word) \
288 channel->info_word->rx.param = cpu_to_le32(value); \
289 else \
290 channel->info->rx.param = value; \
291 })
292
293#define SET_RX_CHANNEL_INFO(channel, param, value) \
294 ({ \
295 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
296 if (channel->info_word) \
297 channel->info_word->rx.param = cpu_to_le32(value); \
298 else \
299 channel->info->rx.param = cpu_to_le32(value); \
300 })
301
302#define GET_TX_CHANNEL_FLAG(channel, param) \
303 ({ \
304 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
305 channel->info_word ? \
306 le32_to_cpu(channel->info_word->tx.param) : \
307 channel->info->tx.param; \
308 })
309
310#define GET_TX_CHANNEL_INFO(channel, param) \
311 ({ \
312 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
313 le32_to_cpu(channel->info_word ? \
314 channel->info_word->tx.param : \
315 channel->info->tx.param); \
316 })
317
318#define SET_TX_CHANNEL_FLAG(channel, param, value) \
319 ({ \
320 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
321 if (channel->info_word) \
322 channel->info_word->tx.param = cpu_to_le32(value); \
323 else \
324 channel->info->tx.param = value; \
325 })
326
327#define SET_TX_CHANNEL_INFO(channel, param, value) \
328 ({ \
329 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
330 if (channel->info_word) \
331 channel->info_word->tx.param = cpu_to_le32(value); \
332 else \
333 channel->info->tx.param = cpu_to_le32(value); \
334 })
335
336/**
337 * struct qcom_smd_alloc_entry - channel allocation entry
338 * @name: channel name
339 * @cid: channel index
340 * @flags: channel flags and edge id
341 * @ref_count: reference count of the channel
342 */
343struct qcom_smd_alloc_entry {
344 u8 name[20];
345 __le32 cid;
346 __le32 flags;
347 __le32 ref_count;
348} __packed;
349
350#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff
351#define SMD_CHANNEL_FLAGS_STREAM BIT(8)
352#define SMD_CHANNEL_FLAGS_PACKET BIT(9)
353
354/*
355 * Each smd packet contains a 20 byte header, with the first 4 being the length
356 * of the packet.
357 */
358#define SMD_PACKET_HEADER_LEN 20
359
360/*
361 * Signal the remote processor associated with 'channel'.
362 */
363static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
364{
365 struct qcom_smd_edge *edge = channel->edge;
366
Bjorn Anderssonab460a22018-04-19 18:17:57 -0700367 if (edge->mbox_chan) {
368 /*
369 * We can ignore a failing mbox_send_message() as the only
370 * possible cause is that the FIFO in the framework is full of
371 * other writes to the same bit.
372 */
373 mbox_send_message(edge->mbox_chan, NULL);
374 mbox_client_txdone(edge->mbox_chan, 0);
375 } else {
376 regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
377 }
Bjorn Andersson53e28222016-09-01 15:28:09 -0700378}
379
380/*
381 * Initialize the tx channel info
382 */
383static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
384{
385 SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
386 SET_TX_CHANNEL_FLAG(channel, fDSR, 0);
387 SET_TX_CHANNEL_FLAG(channel, fCTS, 0);
388 SET_TX_CHANNEL_FLAG(channel, fCD, 0);
389 SET_TX_CHANNEL_FLAG(channel, fRI, 0);
390 SET_TX_CHANNEL_FLAG(channel, fHEAD, 0);
391 SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
392 SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
393 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
394 SET_TX_CHANNEL_INFO(channel, head, 0);
395 SET_RX_CHANNEL_INFO(channel, tail, 0);
396
397 qcom_smd_signal_channel(channel);
398
399 channel->state = SMD_CHANNEL_CLOSED;
400 channel->pkt_size = 0;
401}
402
403/*
404 * Set the callback for a channel, with appropriate locking
405 */
406static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel,
407 rpmsg_rx_cb_t cb)
408{
409 struct rpmsg_endpoint *ept = &channel->qsept->ept;
410 unsigned long flags;
411
412 spin_lock_irqsave(&channel->recv_lock, flags);
413 ept->cb = cb;
414 spin_unlock_irqrestore(&channel->recv_lock, flags);
415};
416
417/*
418 * Calculate the amount of data available in the rx fifo
419 */
420static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
421{
422 unsigned head;
423 unsigned tail;
424
425 head = GET_RX_CHANNEL_INFO(channel, head);
426 tail = GET_RX_CHANNEL_INFO(channel, tail);
427
428 return (head - tail) & (channel->fifo_size - 1);
429}
430
431/*
432 * Set tx channel state and inform the remote processor
433 */
434static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
435 int state)
436{
437 struct qcom_smd_edge *edge = channel->edge;
438 bool is_open = state == SMD_CHANNEL_OPENED;
439
440 if (channel->state == state)
441 return;
442
443 dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state);
444
445 SET_TX_CHANNEL_FLAG(channel, fDSR, is_open);
446 SET_TX_CHANNEL_FLAG(channel, fCTS, is_open);
447 SET_TX_CHANNEL_FLAG(channel, fCD, is_open);
448
449 SET_TX_CHANNEL_INFO(channel, state, state);
450 SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
451
452 channel->state = state;
453 qcom_smd_signal_channel(channel);
454}
455
456/*
457 * Copy count bytes of data using 32bit accesses, if that's required.
458 */
459static void smd_copy_to_fifo(void __iomem *dst,
460 const void *src,
461 size_t count,
462 bool word_aligned)
463{
464 if (word_aligned) {
465 __iowrite32_copy(dst, src, count / sizeof(u32));
466 } else {
467 memcpy_toio(dst, src, count);
468 }
469}
470
471/*
472 * Copy count bytes of data using 32bit accesses, if that is required.
473 */
474static void smd_copy_from_fifo(void *dst,
475 const void __iomem *src,
476 size_t count,
477 bool word_aligned)
478{
479 if (word_aligned) {
480 __ioread32_copy(dst, src, count / sizeof(u32));
481 } else {
482 memcpy_fromio(dst, src, count);
483 }
484}
485
486/*
487 * Read count bytes of data from the rx fifo into buf, but don't advance the
488 * tail.
489 */
490static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
491 void *buf, size_t count)
492{
493 bool word_aligned;
494 unsigned tail;
495 size_t len;
496
497 word_aligned = channel->info_word;
498 tail = GET_RX_CHANNEL_INFO(channel, tail);
499
500 len = min_t(size_t, count, channel->fifo_size - tail);
501 if (len) {
502 smd_copy_from_fifo(buf,
503 channel->rx_fifo + tail,
504 len,
505 word_aligned);
506 }
507
508 if (len != count) {
509 smd_copy_from_fifo(buf + len,
510 channel->rx_fifo,
511 count - len,
512 word_aligned);
513 }
514
515 return count;
516}
517
518/*
519 * Advance the rx tail by count bytes.
520 */
521static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
522 size_t count)
523{
524 unsigned tail;
525
526 tail = GET_RX_CHANNEL_INFO(channel, tail);
527 tail += count;
528 tail &= (channel->fifo_size - 1);
529 SET_RX_CHANNEL_INFO(channel, tail, tail);
530}
531
532/*
533 * Read out a single packet from the rx fifo and deliver it to the device
534 */
535static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
536{
537 struct rpmsg_endpoint *ept = &channel->qsept->ept;
538 unsigned tail;
539 size_t len;
540 void *ptr;
541 int ret;
542
543 tail = GET_RX_CHANNEL_INFO(channel, tail);
544
545 /* Use bounce buffer if the data wraps */
546 if (tail + channel->pkt_size >= channel->fifo_size) {
547 ptr = channel->bounce_buffer;
548 len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
549 } else {
550 ptr = channel->rx_fifo + tail;
551 len = channel->pkt_size;
552 }
553
554 ret = ept->cb(ept->rpdev, ptr, len, ept->priv, RPMSG_ADDR_ANY);
555 if (ret < 0)
556 return ret;
557
558 /* Only forward the tail if the client consumed the data */
559 qcom_smd_channel_advance(channel, len);
560
561 channel->pkt_size = 0;
562
563 return 0;
564}
565
566/*
567 * Per channel interrupt handling
568 */
569static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
570{
571 bool need_state_scan = false;
572 int remote_state;
573 __le32 pktlen;
574 int avail;
575 int ret;
576
577 /* Handle state changes */
578 remote_state = GET_RX_CHANNEL_INFO(channel, state);
579 if (remote_state != channel->remote_state) {
580 channel->remote_state = remote_state;
581 need_state_scan = true;
Bjorn Andersson268105f2017-12-12 15:58:53 -0800582
583 wake_up_interruptible_all(&channel->state_change_event);
Bjorn Andersson53e28222016-09-01 15:28:09 -0700584 }
585 /* Indicate that we have seen any state change */
586 SET_RX_CHANNEL_FLAG(channel, fSTATE, 0);
587
588 /* Signal waiting qcom_smd_send() about the interrupt */
589 if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR))
Bjorn Anderssoneb114f22017-12-12 15:58:55 -0800590 wake_up_interruptible_all(&channel->fblockread_event);
Bjorn Andersson53e28222016-09-01 15:28:09 -0700591
592 /* Don't consume any data until we've opened the channel */
593 if (channel->state != SMD_CHANNEL_OPENED)
594 goto out;
595
596 /* Indicate that we've seen the new data */
597 SET_RX_CHANNEL_FLAG(channel, fHEAD, 0);
598
599 /* Consume data */
600 for (;;) {
601 avail = qcom_smd_channel_get_rx_avail(channel);
602
603 if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
604 qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
605 qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
606 channel->pkt_size = le32_to_cpu(pktlen);
607 } else if (channel->pkt_size && avail >= channel->pkt_size) {
608 ret = qcom_smd_channel_recv_single(channel);
609 if (ret)
610 break;
611 } else {
612 break;
613 }
614 }
615
616 /* Indicate that we have seen and updated tail */
617 SET_RX_CHANNEL_FLAG(channel, fTAIL, 1);
618
619 /* Signal the remote that we've consumed the data (if requested) */
620 if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) {
621 /* Ensure ordering of channel info updates */
622 wmb();
623
624 qcom_smd_signal_channel(channel);
625 }
626
627out:
628 return need_state_scan;
629}
630
631/*
632 * The edge interrupts are triggered by the remote processor on state changes,
633 * channel info updates or when new channels are created.
634 */
635static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
636{
637 struct qcom_smd_edge *edge = data;
638 struct qcom_smd_channel *channel;
639 unsigned available;
640 bool kick_scanner = false;
641 bool kick_state = false;
642
643 /*
644 * Handle state changes or data on each of the channels on this edge
645 */
646 spin_lock(&edge->channels_lock);
647 list_for_each_entry(channel, &edge->channels, list) {
648 spin_lock(&channel->recv_lock);
649 kick_state |= qcom_smd_channel_intr(channel);
650 spin_unlock(&channel->recv_lock);
651 }
652 spin_unlock(&edge->channels_lock);
653
654 /*
655 * Creating a new channel requires allocating an smem entry, so we only
656 * have to scan if the amount of available space in smem have changed
657 * since last scan.
658 */
659 available = qcom_smem_get_free_space(edge->remote_pid);
660 if (available != edge->smem_available) {
661 edge->smem_available = available;
662 kick_scanner = true;
663 }
664
665 if (kick_scanner)
666 schedule_work(&edge->scan_work);
667 if (kick_state)
668 schedule_work(&edge->state_work);
669
670 return IRQ_HANDLED;
671}
672
673/*
674 * Calculate how much space is available in the tx fifo.
675 */
676static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
677{
678 unsigned head;
679 unsigned tail;
680 unsigned mask = channel->fifo_size - 1;
681
682 head = GET_TX_CHANNEL_INFO(channel, head);
683 tail = GET_TX_CHANNEL_INFO(channel, tail);
684
685 return mask - ((head - tail) & mask);
686}
687
688/*
689 * Write count bytes of data into channel, possibly wrapping in the ring buffer
690 */
691static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
692 const void *data,
693 size_t count)
694{
695 bool word_aligned;
696 unsigned head;
697 size_t len;
698
699 word_aligned = channel->info_word;
700 head = GET_TX_CHANNEL_INFO(channel, head);
701
702 len = min_t(size_t, count, channel->fifo_size - head);
703 if (len) {
704 smd_copy_to_fifo(channel->tx_fifo + head,
705 data,
706 len,
707 word_aligned);
708 }
709
710 if (len != count) {
711 smd_copy_to_fifo(channel->tx_fifo,
712 data + len,
713 count - len,
714 word_aligned);
715 }
716
717 head += count;
718 head &= (channel->fifo_size - 1);
719 SET_TX_CHANNEL_INFO(channel, head, head);
720
721 return count;
722}
723
724/**
725 * qcom_smd_send - write data to smd channel
726 * @channel: channel handle
727 * @data: buffer of data to write
728 * @len: number of bytes to write
729 *
730 * This is a blocking write of len bytes into the channel's tx ring buffer and
731 * signal the remote end. It will sleep until there is enough space available
732 * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid
733 * polling.
734 */
735static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data,
736 int len, bool wait)
737{
738 __le32 hdr[5] = { cpu_to_le32(len), };
739 int tlen = sizeof(hdr) + len;
Bjorn Andersson33e38202018-02-13 11:04:11 -0800740 unsigned long flags;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700741 int ret;
742
743 /* Word aligned channels only accept word size aligned data */
744 if (channel->info_word && len % 4)
745 return -EINVAL;
746
747 /* Reject packets that are too big */
748 if (tlen >= channel->fifo_size)
749 return -EINVAL;
750
Bjorn Andersson33e38202018-02-13 11:04:11 -0800751 /* Highlight the fact that if we enter the loop below we might sleep */
752 if (wait)
753 might_sleep();
754
755 spin_lock_irqsave(&channel->tx_lock, flags);
Bjorn Andersson53e28222016-09-01 15:28:09 -0700756
Bjorn Anderssonb2c932e2017-12-12 15:58:56 -0800757 while (qcom_smd_get_tx_avail(channel) < tlen &&
758 channel->state == SMD_CHANNEL_OPENED) {
Bjorn Andersson53e28222016-09-01 15:28:09 -0700759 if (!wait) {
Bjorn Andersson1d74e7e2016-12-01 16:59:55 -0800760 ret = -EAGAIN;
Dan Carpenterc3388a02018-01-19 16:22:36 +0300761 goto out_unlock;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700762 }
763
764 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0);
765
Bjorn Andersson178f3f72017-12-12 15:58:57 -0800766 /* Wait without holding the tx_lock */
Bjorn Andersson33e38202018-02-13 11:04:11 -0800767 spin_unlock_irqrestore(&channel->tx_lock, flags);
Bjorn Andersson178f3f72017-12-12 15:58:57 -0800768
Bjorn Andersson53e28222016-09-01 15:28:09 -0700769 ret = wait_event_interruptible(channel->fblockread_event,
770 qcom_smd_get_tx_avail(channel) >= tlen ||
771 channel->state != SMD_CHANNEL_OPENED);
772 if (ret)
Dan Carpenterc3388a02018-01-19 16:22:36 +0300773 return ret;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700774
Bjorn Andersson33e38202018-02-13 11:04:11 -0800775 spin_lock_irqsave(&channel->tx_lock, flags);
Bjorn Andersson53e28222016-09-01 15:28:09 -0700776
777 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
778 }
779
Bjorn Anderssonb2c932e2017-12-12 15:58:56 -0800780 /* Fail if the channel was closed */
781 if (channel->state != SMD_CHANNEL_OPENED) {
782 ret = -EPIPE;
Dan Carpenterc3388a02018-01-19 16:22:36 +0300783 goto out_unlock;
Bjorn Anderssonb2c932e2017-12-12 15:58:56 -0800784 }
785
Bjorn Andersson53e28222016-09-01 15:28:09 -0700786 SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
787
788 qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
789 qcom_smd_write_fifo(channel, data, len);
790
791 SET_TX_CHANNEL_FLAG(channel, fHEAD, 1);
792
793 /* Ensure ordering of channel info updates */
794 wmb();
795
796 qcom_smd_signal_channel(channel);
797
Dan Carpenterc3388a02018-01-19 16:22:36 +0300798out_unlock:
Bjorn Andersson33e38202018-02-13 11:04:11 -0800799 spin_unlock_irqrestore(&channel->tx_lock, flags);
Bjorn Andersson53e28222016-09-01 15:28:09 -0700800
801 return ret;
802}
803
804/*
805 * Helper for opening a channel
806 */
807static int qcom_smd_channel_open(struct qcom_smd_channel *channel,
808 rpmsg_rx_cb_t cb)
809{
Bjorn Andersson268105f2017-12-12 15:58:53 -0800810 struct qcom_smd_edge *edge = channel->edge;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700811 size_t bb_size;
Bjorn Andersson268105f2017-12-12 15:58:53 -0800812 int ret;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700813
814 /*
815 * Packets are maximum 4k, but reduce if the fifo is smaller
816 */
817 bb_size = min(channel->fifo_size, SZ_4K);
818 channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL);
819 if (!channel->bounce_buffer)
820 return -ENOMEM;
821
822 qcom_smd_channel_set_callback(channel, cb);
823 qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
Bjorn Andersson268105f2017-12-12 15:58:53 -0800824
825 /* Wait for remote to enter opening or opened */
826 ret = wait_event_interruptible_timeout(channel->state_change_event,
827 channel->remote_state == SMD_CHANNEL_OPENING ||
828 channel->remote_state == SMD_CHANNEL_OPENED,
829 HZ);
830 if (!ret) {
831 dev_err(&edge->dev, "remote side did not enter opening state\n");
832 goto out_close_timeout;
833 }
834
Bjorn Andersson53e28222016-09-01 15:28:09 -0700835 qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
836
Bjorn Andersson268105f2017-12-12 15:58:53 -0800837 /* Wait for remote to enter opened */
838 ret = wait_event_interruptible_timeout(channel->state_change_event,
839 channel->remote_state == SMD_CHANNEL_OPENED,
840 HZ);
841 if (!ret) {
842 dev_err(&edge->dev, "remote side did not enter open state\n");
843 goto out_close_timeout;
844 }
845
Bjorn Andersson53e28222016-09-01 15:28:09 -0700846 return 0;
Bjorn Andersson268105f2017-12-12 15:58:53 -0800847
848out_close_timeout:
849 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
850 return -ETIMEDOUT;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700851}
852
853/*
854 * Helper for closing and resetting a channel
855 */
856static void qcom_smd_channel_close(struct qcom_smd_channel *channel)
857{
858 qcom_smd_channel_set_callback(channel, NULL);
859
860 kfree(channel->bounce_buffer);
861 channel->bounce_buffer = NULL;
862
863 qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
864 qcom_smd_channel_reset(channel);
865}
866
867static struct qcom_smd_channel *
868qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
869{
870 struct qcom_smd_channel *channel;
871 struct qcom_smd_channel *ret = NULL;
872 unsigned long flags;
Bjorn Andersson53e28222016-09-01 15:28:09 -0700873
874 spin_lock_irqsave(&edge->channels_lock, flags);
875 list_for_each_entry(channel, &edge->channels, list) {
Bjorn Andersson66dca392016-10-07 21:23:11 -0700876 if (!strcmp(channel->name, name)) {
877 ret = channel;
878 break;
879 }
Bjorn Andersson53e28222016-09-01 15:28:09 -0700880 }
881 spin_unlock_irqrestore(&edge->channels_lock, flags);
882
883 return ret;
884}
885
886static void __ept_release(struct kref *kref)
887{
888 struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
889 refcount);
890 kfree(to_smd_endpoint(ept));
891}
892
893static struct rpmsg_endpoint *qcom_smd_create_ept(struct rpmsg_device *rpdev,
894 rpmsg_rx_cb_t cb, void *priv,
895 struct rpmsg_channel_info chinfo)
896{
897 struct qcom_smd_endpoint *qsept;
898 struct qcom_smd_channel *channel;
899 struct qcom_smd_device *qsdev = to_smd_device(rpdev);
900 struct qcom_smd_edge *edge = qsdev->edge;
901 struct rpmsg_endpoint *ept;
902 const char *name = chinfo.name;
903 int ret;
904
905 /* Wait up to HZ for the channel to appear */
906 ret = wait_event_interruptible_timeout(edge->new_channel_event,
907 (channel = qcom_smd_find_channel(edge, name)) != NULL,
908 HZ);
909 if (!ret)
910 return NULL;
911
912 if (channel->state != SMD_CHANNEL_CLOSED) {
913 dev_err(&rpdev->dev, "channel %s is busy\n", channel->name);
914 return NULL;
915 }
916
917 qsept = kzalloc(sizeof(*qsept), GFP_KERNEL);
918 if (!qsept)
919 return NULL;
920
921 ept = &qsept->ept;
922
923 kref_init(&ept->refcount);
924
925 ept->rpdev = rpdev;
926 ept->cb = cb;
927 ept->priv = priv;
928 ept->ops = &qcom_smd_endpoint_ops;
929
930 channel->qsept = qsept;
931 qsept->qsch = channel;
932
933 ret = qcom_smd_channel_open(channel, cb);
934 if (ret)
935 goto free_ept;
936
937 return ept;
938
939free_ept:
940 channel->qsept = NULL;
941 kref_put(&ept->refcount, __ept_release);
942 return NULL;
943}
944
945static void qcom_smd_destroy_ept(struct rpmsg_endpoint *ept)
946{
947 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
948 struct qcom_smd_channel *ch = qsept->qsch;
949
950 qcom_smd_channel_close(ch);
951 ch->qsept = NULL;
952 kref_put(&ept->refcount, __ept_release);
953}
954
955static int qcom_smd_send(struct rpmsg_endpoint *ept, void *data, int len)
956{
957 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
958
959 return __qcom_smd_send(qsept->qsch, data, len, true);
960}
961
962static int qcom_smd_trysend(struct rpmsg_endpoint *ept, void *data, int len)
963{
964 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
965
966 return __qcom_smd_send(qsept->qsch, data, len, false);
967}
968
Al Viroafc9a422017-07-03 06:39:46 -0400969static __poll_t qcom_smd_poll(struct rpmsg_endpoint *ept,
Bjorn Anderssonadaa11b2017-01-11 06:35:11 -0800970 struct file *filp, poll_table *wait)
971{
972 struct qcom_smd_endpoint *qsept = to_smd_endpoint(ept);
973 struct qcom_smd_channel *channel = qsept->qsch;
Al Viroafc9a422017-07-03 06:39:46 -0400974 __poll_t mask = 0;
Bjorn Anderssonadaa11b2017-01-11 06:35:11 -0800975
976 poll_wait(filp, &channel->fblockread_event, wait);
977
978 if (qcom_smd_get_tx_avail(channel) > 20)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800979 mask |= EPOLLOUT | EPOLLWRNORM;
Bjorn Anderssonadaa11b2017-01-11 06:35:11 -0800980
981 return mask;
982}
983
Bjorn Andersson53e28222016-09-01 15:28:09 -0700984/*
985 * Finds the device_node for the smd child interested in this channel.
986 */
987static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
988 const char *channel)
989{
990 struct device_node *child;
991 const char *name;
992 const char *key;
993 int ret;
994
995 for_each_available_child_of_node(edge_node, child) {
996 key = "qcom,smd-channels";
997 ret = of_property_read_string(child, key, &name);
998 if (ret)
999 continue;
1000
1001 if (strcmp(name, channel) == 0)
1002 return child;
1003 }
1004
1005 return NULL;
1006}
1007
Bjorn Andersson0d720382018-03-27 14:06:43 -07001008static int qcom_smd_announce_create(struct rpmsg_device *rpdev)
1009{
1010 struct qcom_smd_endpoint *qept = to_smd_endpoint(rpdev->ept);
1011 struct qcom_smd_channel *channel = qept->qsch;
1012 unsigned long flags;
1013 bool kick_state;
1014
1015 spin_lock_irqsave(&channel->recv_lock, flags);
1016 kick_state = qcom_smd_channel_intr(channel);
1017 spin_unlock_irqrestore(&channel->recv_lock, flags);
1018
1019 if (kick_state)
1020 schedule_work(&channel->edge->state_work);
1021
1022 return 0;
1023}
1024
Bjorn Andersson53e28222016-09-01 15:28:09 -07001025static const struct rpmsg_device_ops qcom_smd_device_ops = {
1026 .create_ept = qcom_smd_create_ept,
Bjorn Andersson0d720382018-03-27 14:06:43 -07001027 .announce_create = qcom_smd_announce_create,
Bjorn Andersson53e28222016-09-01 15:28:09 -07001028};
1029
1030static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = {
1031 .destroy_ept = qcom_smd_destroy_ept,
1032 .send = qcom_smd_send,
1033 .trysend = qcom_smd_trysend,
Bjorn Anderssonadaa11b2017-01-11 06:35:11 -08001034 .poll = qcom_smd_poll,
Bjorn Andersson53e28222016-09-01 15:28:09 -07001035};
1036
Bjorn Anderssonb0b03b82017-03-15 22:18:35 -07001037static void qcom_smd_release_device(struct device *dev)
1038{
1039 struct rpmsg_device *rpdev = to_rpmsg_device(dev);
1040 struct qcom_smd_device *qsdev = to_smd_device(rpdev);
1041
1042 kfree(qsdev);
1043}
1044
Bjorn Andersson53e28222016-09-01 15:28:09 -07001045/*
1046 * Create a smd client device for channel that is being opened.
1047 */
1048static int qcom_smd_create_device(struct qcom_smd_channel *channel)
1049{
1050 struct qcom_smd_device *qsdev;
1051 struct rpmsg_device *rpdev;
1052 struct qcom_smd_edge *edge = channel->edge;
1053
1054 dev_dbg(&edge->dev, "registering '%s'\n", channel->name);
1055
1056 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
1057 if (!qsdev)
1058 return -ENOMEM;
1059
1060 /* Link qsdev to our SMD edge */
1061 qsdev->edge = edge;
1062
1063 /* Assign callbacks for rpmsg_device */
1064 qsdev->rpdev.ops = &qcom_smd_device_ops;
1065
1066 /* Assign public information to the rpmsg_device */
1067 rpdev = &qsdev->rpdev;
1068 strncpy(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
1069 rpdev->src = RPMSG_ADDR_ANY;
1070 rpdev->dst = RPMSG_ADDR_ANY;
1071
1072 rpdev->dev.of_node = qcom_smd_match_channel(edge->of_node, channel->name);
1073 rpdev->dev.parent = &edge->dev;
Bjorn Anderssonb0b03b82017-03-15 22:18:35 -07001074 rpdev->dev.release = qcom_smd_release_device;
Bjorn Andersson53e28222016-09-01 15:28:09 -07001075
1076 return rpmsg_register_device(rpdev);
1077}
1078
Bjorn Andersson0be363b2017-01-11 06:35:13 -08001079static int qcom_smd_create_chrdev(struct qcom_smd_edge *edge)
1080{
1081 struct qcom_smd_device *qsdev;
1082
1083 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
1084 if (!qsdev)
1085 return -ENOMEM;
1086
1087 qsdev->edge = edge;
1088 qsdev->rpdev.ops = &qcom_smd_device_ops;
1089 qsdev->rpdev.dev.parent = &edge->dev;
Bjorn Anderssonb0b03b82017-03-15 22:18:35 -07001090 qsdev->rpdev.dev.release = qcom_smd_release_device;
1091
Bjorn Andersson0be363b2017-01-11 06:35:13 -08001092 return rpmsg_chrdev_register_device(&qsdev->rpdev);
1093}
1094
Bjorn Andersson53e28222016-09-01 15:28:09 -07001095/*
1096 * Allocate the qcom_smd_channel object for a newly found smd channel,
1097 * retrieving and validating the smem items involved.
1098 */
1099static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *edge,
1100 unsigned smem_info_item,
1101 unsigned smem_fifo_item,
1102 char *name)
1103{
1104 struct qcom_smd_channel *channel;
1105 size_t fifo_size;
1106 size_t info_size;
1107 void *fifo_base;
1108 void *info;
1109 int ret;
1110
Srinivas Kandagatla4a2e84c2018-06-04 10:39:01 +01001111 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001112 if (!channel)
1113 return ERR_PTR(-ENOMEM);
1114
1115 channel->edge = edge;
Srinivas Kandagatla4a2e84c2018-06-04 10:39:01 +01001116 channel->name = kstrdup(name, GFP_KERNEL);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001117 if (!channel->name)
1118 return ERR_PTR(-ENOMEM);
1119
Bjorn Andersson33e38202018-02-13 11:04:11 -08001120 spin_lock_init(&channel->tx_lock);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001121 spin_lock_init(&channel->recv_lock);
1122 init_waitqueue_head(&channel->fblockread_event);
Bjorn Andersson268105f2017-12-12 15:58:53 -08001123 init_waitqueue_head(&channel->state_change_event);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001124
1125 info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size);
1126 if (IS_ERR(info)) {
1127 ret = PTR_ERR(info);
1128 goto free_name_and_channel;
1129 }
1130
1131 /*
1132 * Use the size of the item to figure out which channel info struct to
1133 * use.
1134 */
1135 if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
1136 channel->info_word = info;
1137 } else if (info_size == 2 * sizeof(struct smd_channel_info)) {
1138 channel->info = info;
1139 } else {
1140 dev_err(&edge->dev,
1141 "channel info of size %zu not supported\n", info_size);
1142 ret = -EINVAL;
1143 goto free_name_and_channel;
1144 }
1145
1146 fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size);
1147 if (IS_ERR(fifo_base)) {
1148 ret = PTR_ERR(fifo_base);
1149 goto free_name_and_channel;
1150 }
1151
1152 /* The channel consist of a rx and tx fifo of equal size */
1153 fifo_size /= 2;
1154
1155 dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
1156 name, info_size, fifo_size);
1157
1158 channel->tx_fifo = fifo_base;
1159 channel->rx_fifo = fifo_base + fifo_size;
1160 channel->fifo_size = fifo_size;
1161
1162 qcom_smd_channel_reset(channel);
1163
1164 return channel;
1165
1166free_name_and_channel:
Srinivas Kandagatla4a2e84c2018-06-04 10:39:01 +01001167 kfree(channel->name);
1168 kfree(channel);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001169
1170 return ERR_PTR(ret);
1171}
1172
1173/*
1174 * Scans the allocation table for any newly allocated channels, calls
1175 * qcom_smd_create_channel() to create representations of these and add
1176 * them to the edge's list of channels.
1177 */
1178static void qcom_channel_scan_worker(struct work_struct *work)
1179{
1180 struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work);
1181 struct qcom_smd_alloc_entry *alloc_tbl;
1182 struct qcom_smd_alloc_entry *entry;
1183 struct qcom_smd_channel *channel;
1184 unsigned long flags;
1185 unsigned fifo_id;
1186 unsigned info_id;
1187 int tbl;
1188 int i;
1189 u32 eflags, cid;
1190
1191 for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
1192 alloc_tbl = qcom_smem_get(edge->remote_pid,
1193 smem_items[tbl].alloc_tbl_id, NULL);
1194 if (IS_ERR(alloc_tbl))
1195 continue;
1196
1197 for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
1198 entry = &alloc_tbl[i];
1199 eflags = le32_to_cpu(entry->flags);
1200 if (test_bit(i, edge->allocated[tbl]))
1201 continue;
1202
1203 if (entry->ref_count == 0)
1204 continue;
1205
1206 if (!entry->name[0])
1207 continue;
1208
1209 if (!(eflags & SMD_CHANNEL_FLAGS_PACKET))
1210 continue;
1211
1212 if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
1213 continue;
1214
1215 cid = le32_to_cpu(entry->cid);
1216 info_id = smem_items[tbl].info_base_id + cid;
1217 fifo_id = smem_items[tbl].fifo_base_id + cid;
1218
1219 channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
1220 if (IS_ERR(channel))
1221 continue;
1222
1223 spin_lock_irqsave(&edge->channels_lock, flags);
1224 list_add(&channel->list, &edge->channels);
1225 spin_unlock_irqrestore(&edge->channels_lock, flags);
1226
1227 dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name);
1228 set_bit(i, edge->allocated[tbl]);
1229
Bjorn Anderssoneb114f22017-12-12 15:58:55 -08001230 wake_up_interruptible_all(&edge->new_channel_event);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001231 }
1232 }
1233
1234 schedule_work(&edge->state_work);
1235}
1236
1237/*
1238 * This per edge worker scans smem for any new channels and register these. It
1239 * then scans all registered channels for state changes that should be handled
1240 * by creating or destroying smd client devices for the registered channels.
1241 *
1242 * LOCKING: edge->channels_lock only needs to cover the list operations, as the
1243 * worker is killed before any channels are deallocated
1244 */
1245static void qcom_channel_state_worker(struct work_struct *work)
1246{
1247 struct qcom_smd_channel *channel;
1248 struct qcom_smd_edge *edge = container_of(work,
1249 struct qcom_smd_edge,
1250 state_work);
1251 struct rpmsg_channel_info chinfo;
1252 unsigned remote_state;
1253 unsigned long flags;
1254
1255 /*
1256 * Register a device for any closed channel where the remote processor
1257 * is showing interest in opening the channel.
1258 */
1259 spin_lock_irqsave(&edge->channels_lock, flags);
1260 list_for_each_entry(channel, &edge->channels, list) {
1261 if (channel->state != SMD_CHANNEL_CLOSED)
1262 continue;
1263
Bjorn Andersson2bd9b432018-03-15 11:12:44 -07001264 remote_state = GET_RX_CHANNEL_INFO(channel, state);
1265 if (remote_state != SMD_CHANNEL_OPENING &&
1266 remote_state != SMD_CHANNEL_OPENED)
1267 continue;
1268
Bjorn Andersson53e28222016-09-01 15:28:09 -07001269 if (channel->registered)
1270 continue;
1271
1272 spin_unlock_irqrestore(&edge->channels_lock, flags);
1273 qcom_smd_create_device(channel);
1274 channel->registered = true;
1275 spin_lock_irqsave(&edge->channels_lock, flags);
1276
1277 channel->registered = true;
1278 }
1279
1280 /*
1281 * Unregister the device for any channel that is opened where the
1282 * remote processor is closing the channel.
1283 */
1284 list_for_each_entry(channel, &edge->channels, list) {
1285 if (channel->state != SMD_CHANNEL_OPENING &&
1286 channel->state != SMD_CHANNEL_OPENED)
1287 continue;
1288
1289 remote_state = GET_RX_CHANNEL_INFO(channel, state);
1290 if (remote_state == SMD_CHANNEL_OPENING ||
1291 remote_state == SMD_CHANNEL_OPENED)
1292 continue;
1293
1294 spin_unlock_irqrestore(&edge->channels_lock, flags);
1295
1296 strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
1297 chinfo.src = RPMSG_ADDR_ANY;
1298 chinfo.dst = RPMSG_ADDR_ANY;
1299 rpmsg_unregister_device(&edge->dev, &chinfo);
1300 channel->registered = false;
1301 spin_lock_irqsave(&edge->channels_lock, flags);
1302 }
1303 spin_unlock_irqrestore(&edge->channels_lock, flags);
1304}
1305
1306/*
1307 * Parses an of_node describing an edge.
1308 */
1309static int qcom_smd_parse_edge(struct device *dev,
1310 struct device_node *node,
1311 struct qcom_smd_edge *edge)
1312{
1313 struct device_node *syscon_np;
1314 const char *key;
1315 int irq;
1316 int ret;
1317
1318 INIT_LIST_HEAD(&edge->channels);
1319 spin_lock_init(&edge->channels_lock);
1320
1321 INIT_WORK(&edge->scan_work, qcom_channel_scan_worker);
1322 INIT_WORK(&edge->state_work, qcom_channel_state_worker);
1323
1324 edge->of_node = of_node_get(node);
1325
1326 key = "qcom,smd-edge";
1327 ret = of_property_read_u32(node, key, &edge->edge_id);
1328 if (ret) {
1329 dev_err(dev, "edge missing %s property\n", key);
1330 return -EINVAL;
1331 }
1332
1333 edge->remote_pid = QCOM_SMEM_HOST_ANY;
1334 key = "qcom,remote-pid";
1335 of_property_read_u32(node, key, &edge->remote_pid);
1336
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001337 edge->mbox_client.dev = dev;
1338 edge->mbox_client.knows_txdone = true;
1339 edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0);
1340 if (IS_ERR(edge->mbox_chan)) {
1341 if (PTR_ERR(edge->mbox_chan) != -ENODEV)
1342 return PTR_ERR(edge->mbox_chan);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001343
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001344 edge->mbox_chan = NULL;
Bjorn Andersson53e28222016-09-01 15:28:09 -07001345
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001346 syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
1347 if (!syscon_np) {
1348 dev_err(dev, "no qcom,ipc node\n");
1349 return -ENODEV;
1350 }
Bjorn Andersson53e28222016-09-01 15:28:09 -07001351
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001352 edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
1353 if (IS_ERR(edge->ipc_regmap))
1354 return PTR_ERR(edge->ipc_regmap);
1355
1356 key = "qcom,ipc";
1357 ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset);
1358 if (ret < 0) {
1359 dev_err(dev, "no offset in %s\n", key);
1360 return -EINVAL;
1361 }
1362
1363 ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit);
1364 if (ret < 0) {
1365 dev_err(dev, "no bit in %s\n", key);
1366 return -EINVAL;
1367 }
Bjorn Andersson53e28222016-09-01 15:28:09 -07001368 }
1369
Bjorn Andersson5e53c422016-12-02 14:06:02 -08001370 ret = of_property_read_string(node, "label", &edge->name);
1371 if (ret < 0)
1372 edge->name = node->name;
1373
Bjorn Andersson53e28222016-09-01 15:28:09 -07001374 irq = irq_of_parse_and_map(node, 0);
1375 if (irq < 0) {
1376 dev_err(dev, "required smd interrupt missing\n");
1377 return -EINVAL;
1378 }
1379
1380 ret = devm_request_irq(dev, irq,
1381 qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
1382 node->name, edge);
1383 if (ret) {
1384 dev_err(dev, "failed to request smd irq\n");
1385 return ret;
1386 }
1387
1388 edge->irq = irq;
1389
1390 return 0;
1391}
1392
1393/*
1394 * Release function for an edge.
1395 * Reset the state of each associated channel and free the edge context.
1396 */
1397static void qcom_smd_edge_release(struct device *dev)
1398{
Srinivas Kandagatla4a2e84c2018-06-04 10:39:01 +01001399 struct qcom_smd_channel *channel, *tmp;
Bjorn Andersson53e28222016-09-01 15:28:09 -07001400 struct qcom_smd_edge *edge = to_smd_edge(dev);
1401
Srinivas Kandagatla4a2e84c2018-06-04 10:39:01 +01001402 list_for_each_entry_safe(channel, tmp, &edge->channels, list) {
1403 list_del(&channel->list);
1404 kfree(channel->name);
1405 kfree(channel);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001406 }
1407
1408 kfree(edge);
1409}
1410
Bjorn Andersson5e53c422016-12-02 14:06:02 -08001411static ssize_t rpmsg_name_show(struct device *dev,
1412 struct device_attribute *attr, char *buf)
1413{
1414 struct qcom_smd_edge *edge = to_smd_edge(dev);
1415
1416 return sprintf(buf, "%s\n", edge->name);
1417}
1418static DEVICE_ATTR_RO(rpmsg_name);
1419
1420static struct attribute *qcom_smd_edge_attrs[] = {
1421 &dev_attr_rpmsg_name.attr,
1422 NULL
1423};
1424ATTRIBUTE_GROUPS(qcom_smd_edge);
1425
Bjorn Andersson53e28222016-09-01 15:28:09 -07001426/**
1427 * qcom_smd_register_edge() - register an edge based on an device_node
1428 * @parent: parent device for the edge
1429 * @node: device_node describing the edge
1430 *
1431 * Returns an edge reference, or negative ERR_PTR() on failure.
1432 */
1433struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
1434 struct device_node *node)
1435{
1436 struct qcom_smd_edge *edge;
1437 int ret;
1438
1439 edge = kzalloc(sizeof(*edge), GFP_KERNEL);
1440 if (!edge)
1441 return ERR_PTR(-ENOMEM);
1442
1443 init_waitqueue_head(&edge->new_channel_event);
1444
1445 edge->dev.parent = parent;
1446 edge->dev.release = qcom_smd_edge_release;
Srinivas Kandagatlaaaafb242017-07-26 18:53:44 +02001447 edge->dev.of_node = node;
Bjorn Andersson5e53c422016-12-02 14:06:02 -08001448 edge->dev.groups = qcom_smd_edge_groups;
Bjorn Andersson53e28222016-09-01 15:28:09 -07001449 dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name);
1450 ret = device_register(&edge->dev);
1451 if (ret) {
1452 pr_err("failed to register smd edge\n");
Arvind Yadavbe5acd22018-03-08 15:06:08 +05301453 put_device(&edge->dev);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001454 return ERR_PTR(ret);
1455 }
1456
1457 ret = qcom_smd_parse_edge(&edge->dev, node, edge);
1458 if (ret) {
1459 dev_err(&edge->dev, "failed to parse smd edge\n");
1460 goto unregister_dev;
1461 }
1462
Bjorn Andersson0be363b2017-01-11 06:35:13 -08001463 ret = qcom_smd_create_chrdev(edge);
1464 if (ret) {
1465 dev_err(&edge->dev, "failed to register chrdev for edge\n");
1466 goto unregister_dev;
1467 }
1468
Bjorn Andersson53e28222016-09-01 15:28:09 -07001469 schedule_work(&edge->scan_work);
1470
1471 return edge;
1472
1473unregister_dev:
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001474 if (!IS_ERR_OR_NULL(edge->mbox_chan))
1475 mbox_free_channel(edge->mbox_chan);
1476
Arvind Yadavbe5acd22018-03-08 15:06:08 +05301477 device_unregister(&edge->dev);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001478 return ERR_PTR(ret);
1479}
1480EXPORT_SYMBOL(qcom_smd_register_edge);
1481
1482static int qcom_smd_remove_device(struct device *dev, void *data)
1483{
1484 device_unregister(dev);
1485
1486 return 0;
1487}
1488
1489/**
1490 * qcom_smd_unregister_edge() - release an edge and its children
1491 * @edge: edge reference acquired from qcom_smd_register_edge
1492 */
1493int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
1494{
1495 int ret;
1496
1497 disable_irq(edge->irq);
1498 cancel_work_sync(&edge->scan_work);
1499 cancel_work_sync(&edge->state_work);
1500
1501 ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device);
1502 if (ret)
1503 dev_warn(&edge->dev, "can't remove smd device: %d\n", ret);
1504
Bjorn Anderssonab460a22018-04-19 18:17:57 -07001505 mbox_free_channel(edge->mbox_chan);
Bjorn Andersson53e28222016-09-01 15:28:09 -07001506 device_unregister(&edge->dev);
1507
1508 return 0;
1509}
1510EXPORT_SYMBOL(qcom_smd_unregister_edge);
1511
1512static int qcom_smd_probe(struct platform_device *pdev)
1513{
1514 struct device_node *node;
1515 void *p;
1516
1517 /* Wait for smem */
1518 p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL);
1519 if (PTR_ERR(p) == -EPROBE_DEFER)
1520 return PTR_ERR(p);
1521
1522 for_each_available_child_of_node(pdev->dev.of_node, node)
1523 qcom_smd_register_edge(&pdev->dev, node);
1524
1525 return 0;
1526}
1527
1528static int qcom_smd_remove_edge(struct device *dev, void *data)
1529{
1530 struct qcom_smd_edge *edge = to_smd_edge(dev);
1531
1532 return qcom_smd_unregister_edge(edge);
1533}
1534
1535/*
1536 * Shut down all smd clients by making sure that each edge stops processing
1537 * events and scanning for new channels, then call destroy on the devices.
1538 */
1539static int qcom_smd_remove(struct platform_device *pdev)
1540{
1541 int ret;
1542
1543 ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge);
1544 if (ret)
1545 dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret);
1546
1547 return ret;
1548}
1549
1550static const struct of_device_id qcom_smd_of_match[] = {
1551 { .compatible = "qcom,smd" },
1552 {}
1553};
1554MODULE_DEVICE_TABLE(of, qcom_smd_of_match);
1555
1556static struct platform_driver qcom_smd_driver = {
1557 .probe = qcom_smd_probe,
1558 .remove = qcom_smd_remove,
1559 .driver = {
1560 .name = "qcom-smd",
1561 .of_match_table = qcom_smd_of_match,
1562 },
1563};
1564
1565static int __init qcom_smd_init(void)
1566{
1567 return platform_driver_register(&qcom_smd_driver);
1568}
1569subsys_initcall(qcom_smd_init);
1570
1571static void __exit qcom_smd_exit(void)
1572{
1573 platform_driver_unregister(&qcom_smd_driver);
1574}
1575module_exit(qcom_smd_exit);
1576
1577MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
1578MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
1579MODULE_LICENSE("GPL v2");