blob: 8184d34124b7b2f4055b3d150b3e59b5705ea94a [file] [log] [blame]
Alex Elder650d1602020-03-05 22:28:21 -06001// SPDX-License-Identifier: GPL-2.0
2
3/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2020 Linaro Ltd.
5 */
6
7#include <linux/types.h>
8#include <linux/bits.h>
9#include <linux/bitfield.h>
10#include <linux/mutex.h>
11#include <linux/completion.h>
12#include <linux/io.h>
13#include <linux/bug.h>
14#include <linux/interrupt.h>
15#include <linux/platform_device.h>
16#include <linux/netdevice.h>
17
18#include "gsi.h"
19#include "gsi_reg.h"
20#include "gsi_private.h"
21#include "gsi_trans.h"
22#include "ipa_gsi.h"
23#include "ipa_data.h"
24
25/**
26 * DOC: The IPA Generic Software Interface
27 *
28 * The generic software interface (GSI) is an integral component of the IPA,
29 * providing a well-defined communication layer between the AP subsystem
30 * and the IPA core. The modem uses the GSI layer as well.
31 *
32 * -------- ---------
33 * | | | |
34 * | AP +<---. .----+ Modem |
35 * | +--. | | .->+ |
36 * | | | | | | | |
37 * -------- | | | | ---------
38 * v | v |
39 * --+-+---+-+--
40 * | GSI |
41 * |-----------|
42 * | |
43 * | IPA |
44 * | |
45 * -------------
46 *
47 * In the above diagram, the AP and Modem represent "execution environments"
48 * (EEs), which are independent operating environments that use the IPA for
49 * data transfer.
50 *
51 * Each EE uses a set of unidirectional GSI "channels," which allow transfer
52 * of data to or from the IPA. A channel is implemented as a ring buffer,
53 * with a DRAM-resident array of "transfer elements" (TREs) available to
54 * describe transfers to or from other EEs through the IPA. A transfer
55 * element can also contain an immediate command, requesting the IPA perform
56 * actions other than data transfer.
57 *
58 * Each TRE refers to a block of data--also located DRAM. After writing one
59 * or more TREs to a channel, the writer (either the IPA or an EE) writes a
60 * doorbell register to inform the receiving side how many elements have
61 * been written.
62 *
63 * Each channel has a GSI "event ring" associated with it. An event ring
64 * is implemented very much like a channel ring, but is always directed from
65 * the IPA to an EE. The IPA notifies an EE (such as the AP) about channel
66 * events by adding an entry to the event ring associated with the channel.
67 * The GSI then writes its doorbell for the event ring, causing the target
68 * EE to be interrupted. Each entry in an event ring contains a pointer
69 * to the channel TRE whose completion the event represents.
70 *
71 * Each TRE in a channel ring has a set of flags. One flag indicates whether
72 * the completion of the transfer operation generates an entry (and possibly
73 * an interrupt) in the channel's event ring. Other flags allow transfer
74 * elements to be chained together, forming a single logical transaction.
75 * TRE flags are used to control whether and when interrupts are generated
76 * to signal completion of channel transfers.
77 *
78 * Elements in channel and event rings are completed (or consumed) strictly
79 * in order. Completion of one entry implies the completion of all preceding
80 * entries. A single completion interrupt can therefore communicate the
81 * completion of many transfers.
82 *
83 * Note that all GSI registers are little-endian, which is the assumed
84 * endianness of I/O space accesses. The accessor functions perform byte
85 * swapping if needed (i.e., for a big endian CPU).
86 */
87
88/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
89#define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
90
91#define GSI_CMD_TIMEOUT 5 /* seconds */
92
93#define GSI_CHANNEL_STOP_RX_RETRIES 10
94
95#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
96#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
97
98#define GSI_ISR_MAX_ITER 50 /* Detect interrupt storms */
99
100/* An entry in an event ring */
101struct gsi_event {
102 __le64 xfer_ptr;
103 __le16 len;
104 u8 reserved1;
105 u8 code;
106 __le16 reserved2;
107 u8 type;
108 u8 chid;
109};
110
111/* Hardware values from the error log register error code field */
112enum gsi_err_code {
113 GSI_INVALID_TRE_ERR = 0x1,
114 GSI_OUT_OF_BUFFERS_ERR = 0x2,
115 GSI_OUT_OF_RESOURCES_ERR = 0x3,
116 GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
117 GSI_EVT_RING_EMPTY_ERR = 0x5,
118 GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
119 GSI_HWO_1_ERR = 0x8,
120};
121
122/* Hardware values from the error log register error type field */
123enum gsi_err_type {
124 GSI_ERR_TYPE_GLOB = 0x1,
125 GSI_ERR_TYPE_CHAN = 0x2,
126 GSI_ERR_TYPE_EVT = 0x3,
127};
128
129/* Hardware values used when programming an event ring */
130enum gsi_evt_chtype {
131 GSI_EVT_CHTYPE_MHI_EV = 0x0,
132 GSI_EVT_CHTYPE_XHCI_EV = 0x1,
133 GSI_EVT_CHTYPE_GPI_EV = 0x2,
134 GSI_EVT_CHTYPE_XDCI_EV = 0x3,
135};
136
137/* Hardware values used when programming a channel */
138enum gsi_channel_protocol {
139 GSI_CHANNEL_PROTOCOL_MHI = 0x0,
140 GSI_CHANNEL_PROTOCOL_XHCI = 0x1,
141 GSI_CHANNEL_PROTOCOL_GPI = 0x2,
142 GSI_CHANNEL_PROTOCOL_XDCI = 0x3,
143};
144
145/* Hardware values representing an event ring immediate command opcode */
146enum gsi_evt_cmd_opcode {
147 GSI_EVT_ALLOCATE = 0x0,
148 GSI_EVT_RESET = 0x9,
149 GSI_EVT_DE_ALLOC = 0xa,
150};
151
152/* Hardware values representing a generic immediate command opcode */
153enum gsi_generic_cmd_opcode {
154 GSI_GENERIC_HALT_CHANNEL = 0x1,
155 GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
156};
157
158/* Hardware values representing a channel immediate command opcode */
159enum gsi_ch_cmd_opcode {
160 GSI_CH_ALLOCATE = 0x0,
161 GSI_CH_START = 0x1,
162 GSI_CH_STOP = 0x2,
163 GSI_CH_RESET = 0x9,
164 GSI_CH_DE_ALLOC = 0xa,
165};
166
167/** gsi_channel_scratch_gpi - GPI protocol scratch register
168 * @max_outstanding_tre:
169 * Defines the maximum number of TREs allowed in a single transaction
170 * on a channel (in bytes). This determines the amount of prefetch
171 * performed by the hardware. We configure this to equal the size of
172 * the TLV FIFO for the channel.
173 * @outstanding_threshold:
174 * Defines the threshold (in bytes) determining when the sequencer
175 * should update the channel doorbell. We configure this to equal
176 * the size of two TREs.
177 */
178struct gsi_channel_scratch_gpi {
179 u64 reserved1;
180 u16 reserved2;
181 u16 max_outstanding_tre;
182 u16 reserved3;
183 u16 outstanding_threshold;
184};
185
186/** gsi_channel_scratch - channel scratch configuration area
187 *
188 * The exact interpretation of this register is protocol-specific.
189 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
190 */
191union gsi_channel_scratch {
192 struct gsi_channel_scratch_gpi gpi;
193 struct {
194 u32 word1;
195 u32 word2;
196 u32 word3;
197 u32 word4;
198 } data;
199};
200
201/* Check things that can be validated at build time. */
202static void gsi_validate_build(void)
203{
204 /* This is used as a divisor */
205 BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
206
207 /* Code assumes the size of channel and event ring element are
208 * the same (and fixed). Make sure the size of an event ring
209 * element is what's expected.
210 */
211 BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
212
213 /* Hardware requires a 2^n ring size. We ensure the number of
214 * elements in an event ring is a power of 2 elsewhere; this
215 * ensure the elements themselves meet the requirement.
216 */
217 BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
218
219 /* The channel element size must fit in this field */
220 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
221
222 /* The event ring element size must fit in this field */
223 BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
224}
225
226/* Return the channel id associated with a given channel */
227static u32 gsi_channel_id(struct gsi_channel *channel)
228{
229 return channel - &channel->gsi->channel[0];
230}
231
232static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
233{
234 u32 val;
235
236 gsi->event_enable_bitmap |= BIT(evt_ring_id);
237 val = gsi->event_enable_bitmap;
238 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
239}
240
241static void gsi_isr_ieob_clear(struct gsi *gsi, u32 mask)
242{
243 iowrite32(mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
244}
245
246static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
247{
248 u32 val;
249
250 gsi->event_enable_bitmap &= ~BIT(evt_ring_id);
251 val = gsi->event_enable_bitmap;
252 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
253}
254
255/* Enable all GSI_interrupt types */
256static void gsi_irq_enable(struct gsi *gsi)
257{
258 u32 val;
259
260 /* We don't use inter-EE channel or event interrupts */
261 val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
262 val &= ~MSK_INTER_EE_CH_CTRL_FMASK;
263 val &= ~MSK_INTER_EE_EV_CTRL_FMASK;
264 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
265
266 val = GENMASK(gsi->channel_count - 1, 0);
267 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
268
269 val = GENMASK(gsi->evt_ring_count - 1, 0);
270 iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
271
272 /* Each IEOB interrupt is enabled (later) as needed by channels */
273 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
274
275 val = GSI_CNTXT_GLOB_IRQ_ALL;
276 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
277
278 /* Never enable GSI_BREAK_POINT */
279 val = GSI_CNTXT_GSI_IRQ_ALL & ~EN_BREAK_POINT_FMASK;
280 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
281}
282
283/* Disable all GSI_interrupt types */
284static void gsi_irq_disable(struct gsi *gsi)
285{
286 iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
287 iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
288 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
289 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
290 iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
291 iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
292}
293
294/* Return the virtual address associated with a ring index */
295void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
296{
297 /* Note: index *must* be used modulo the ring count here */
298 return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
299}
300
301/* Return the 32-bit DMA address associated with a ring index */
302static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
303{
304 return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE;
305}
306
307/* Return the ring index of a 32-bit ring offset */
308static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
309{
310 return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
311}
312
313/* Issue a GSI command by writing a value to a register, then wait for
314 * completion to be signaled. Returns true if the command completes
315 * or false if it times out.
316 */
317static bool
318gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
319{
320 reinit_completion(completion);
321
322 iowrite32(val, gsi->virt + reg);
323
324 return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
325}
326
327/* Return the hardware's notion of the current state of an event ring */
328static enum gsi_evt_ring_state
329gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
330{
331 u32 val;
332
333 val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
334
335 return u32_get_bits(val, EV_CHSTATE_FMASK);
336}
337
338/* Issue an event ring command and wait for it to complete */
339static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
340 enum gsi_evt_cmd_opcode opcode)
341{
342 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
343 struct completion *completion = &evt_ring->completion;
344 u32 val;
345
346 val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
347 val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
348
349 if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
350 return 0; /* Success! */
351
352 dev_err(gsi->dev, "GSI command %u to event ring %u timed out "
353 "(state is %u)\n", opcode, evt_ring_id, evt_ring->state);
354
355 return -ETIMEDOUT;
356}
357
358/* Allocate an event ring in NOT_ALLOCATED state */
359static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
360{
361 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
362 int ret;
363
364 /* Get initial event ring state */
365 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
366
367 if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
368 return -EINVAL;
369
370 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
371 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
372 dev_err(gsi->dev, "bad event ring state (%u) after alloc\n",
373 evt_ring->state);
374 ret = -EIO;
375 }
376
377 return ret;
378}
379
380/* Reset a GSI event ring in ALLOCATED or ERROR state. */
381static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
382{
383 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
384 enum gsi_evt_ring_state state = evt_ring->state;
385 int ret;
386
387 if (state != GSI_EVT_RING_STATE_ALLOCATED &&
388 state != GSI_EVT_RING_STATE_ERROR) {
389 dev_err(gsi->dev, "bad event ring state (%u) before reset\n",
390 evt_ring->state);
391 return;
392 }
393
394 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
395 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
396 dev_err(gsi->dev, "bad event ring state (%u) after reset\n",
397 evt_ring->state);
398}
399
400/* Issue a hardware de-allocation request for an allocated event ring */
401static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
402{
403 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
404 int ret;
405
406 if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
407 dev_err(gsi->dev, "bad event ring state (%u) before dealloc\n",
408 evt_ring->state);
409 return;
410 }
411
412 ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
413 if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
414 dev_err(gsi->dev, "bad event ring state (%u) after dealloc\n",
415 evt_ring->state);
416}
417
Alex Eldera2003b32020-04-30 17:13:23 -0500418/* Fetch the current state of a channel from hardware */
Alex Elderaba79242020-04-30 17:13:22 -0500419static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
Alex Elder650d1602020-03-05 22:28:21 -0600420{
Alex Elderaba79242020-04-30 17:13:22 -0500421 u32 channel_id = gsi_channel_id(channel);
422 void *virt = channel->gsi->virt;
Alex Elder650d1602020-03-05 22:28:21 -0600423 u32 val;
424
Alex Elderaba79242020-04-30 17:13:22 -0500425 val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
Alex Elder650d1602020-03-05 22:28:21 -0600426
427 return u32_get_bits(val, CHSTATE_FMASK);
428}
429
430/* Issue a channel command and wait for it to complete */
431static int
432gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
433{
434 struct completion *completion = &channel->completion;
435 u32 channel_id = gsi_channel_id(channel);
Alex Eldera2003b32020-04-30 17:13:23 -0500436 struct gsi *gsi = channel->gsi;
Alex Elder650d1602020-03-05 22:28:21 -0600437 u32 val;
438
439 val = u32_encode_bits(channel_id, CH_CHID_FMASK);
440 val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
441
Alex Eldera2003b32020-04-30 17:13:23 -0500442 if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
Alex Elder650d1602020-03-05 22:28:21 -0600443 return 0; /* Success! */
444
Alex Eldera2003b32020-04-30 17:13:23 -0500445 dev_err(gsi->dev,
446 "GSI command %u to channel %u timed out (state is %u)\n",
447 opcode, channel_id, gsi_channel_state(channel));
Alex Elder650d1602020-03-05 22:28:21 -0600448
449 return -ETIMEDOUT;
450}
451
452/* Allocate GSI channel in NOT_ALLOCATED state */
453static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
454{
455 struct gsi_channel *channel = &gsi->channel[channel_id];
Alex Eldera2003b32020-04-30 17:13:23 -0500456 enum gsi_channel_state state;
Alex Elder650d1602020-03-05 22:28:21 -0600457 int ret;
458
459 /* Get initial channel state */
Alex Eldera2003b32020-04-30 17:13:23 -0500460 state = gsi_channel_state(channel);
461 if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
Alex Elder650d1602020-03-05 22:28:21 -0600462 return -EINVAL;
463
464 ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
Alex Eldera2003b32020-04-30 17:13:23 -0500465
466 /* Channel state will normally have been updated */
467 state = gsi_channel_state(channel);
468 if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
Alex Elder650d1602020-03-05 22:28:21 -0600469 dev_err(gsi->dev, "bad channel state (%u) after alloc\n",
Alex Eldera2003b32020-04-30 17:13:23 -0500470 state);
Alex Elder650d1602020-03-05 22:28:21 -0600471 ret = -EIO;
472 }
473
474 return ret;
475}
476
477/* Start an ALLOCATED channel */
478static int gsi_channel_start_command(struct gsi_channel *channel)
479{
Alex Eldera2003b32020-04-30 17:13:23 -0500480 enum gsi_channel_state state;
Alex Elder650d1602020-03-05 22:28:21 -0600481 int ret;
482
Alex Eldera2003b32020-04-30 17:13:23 -0500483 state = gsi_channel_state(channel);
Alex Elder650d1602020-03-05 22:28:21 -0600484 if (state != GSI_CHANNEL_STATE_ALLOCATED &&
485 state != GSI_CHANNEL_STATE_STOPPED)
486 return -EINVAL;
487
488 ret = gsi_channel_command(channel, GSI_CH_START);
Alex Eldera2003b32020-04-30 17:13:23 -0500489
490 /* Channel state will normally have been updated */
491 state = gsi_channel_state(channel);
492 if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
Alex Elder650d1602020-03-05 22:28:21 -0600493 dev_err(channel->gsi->dev,
Alex Eldera2003b32020-04-30 17:13:23 -0500494 "bad channel state (%u) after start\n", state);
Alex Elder650d1602020-03-05 22:28:21 -0600495 ret = -EIO;
496 }
497
498 return ret;
499}
500
501/* Stop a GSI channel in STARTED state */
502static int gsi_channel_stop_command(struct gsi_channel *channel)
503{
Alex Eldera2003b32020-04-30 17:13:23 -0500504 enum gsi_channel_state state;
Alex Elder650d1602020-03-05 22:28:21 -0600505 int ret;
506
Alex Eldera2003b32020-04-30 17:13:23 -0500507 state = gsi_channel_state(channel);
Alex Elder650d1602020-03-05 22:28:21 -0600508 if (state != GSI_CHANNEL_STATE_STARTED &&
509 state != GSI_CHANNEL_STATE_STOP_IN_PROC)
510 return -EINVAL;
511
512 ret = gsi_channel_command(channel, GSI_CH_STOP);
Alex Eldera2003b32020-04-30 17:13:23 -0500513
514 /* Channel state will normally have been updated */
515 state = gsi_channel_state(channel);
516 if (ret || state == GSI_CHANNEL_STATE_STOPPED)
Alex Elder650d1602020-03-05 22:28:21 -0600517 return ret;
518
519 /* We may have to try again if stop is in progress */
Alex Eldera2003b32020-04-30 17:13:23 -0500520 if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
Alex Elder650d1602020-03-05 22:28:21 -0600521 return -EAGAIN;
522
Alex Eldera2003b32020-04-30 17:13:23 -0500523 dev_err(channel->gsi->dev,
524 "bad channel state (%u) after stop\n", state);
Alex Elder650d1602020-03-05 22:28:21 -0600525
526 return -EIO;
527}
528
529/* Reset a GSI channel in ALLOCATED or ERROR state. */
530static void gsi_channel_reset_command(struct gsi_channel *channel)
531{
Alex Eldera2003b32020-04-30 17:13:23 -0500532 enum gsi_channel_state state;
Alex Elder650d1602020-03-05 22:28:21 -0600533 int ret;
534
535 msleep(1); /* A short delay is required before a RESET command */
536
Alex Eldera2003b32020-04-30 17:13:23 -0500537 state = gsi_channel_state(channel);
538 if (state != GSI_CHANNEL_STATE_STOPPED &&
539 state != GSI_CHANNEL_STATE_ERROR) {
Alex Elder650d1602020-03-05 22:28:21 -0600540 dev_err(channel->gsi->dev,
Alex Eldera2003b32020-04-30 17:13:23 -0500541 "bad channel state (%u) before reset\n", state);
Alex Elder650d1602020-03-05 22:28:21 -0600542 return;
543 }
544
545 ret = gsi_channel_command(channel, GSI_CH_RESET);
Alex Eldera2003b32020-04-30 17:13:23 -0500546
547 /* Channel state will normally have been updated */
548 state = gsi_channel_state(channel);
549 if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
Alex Elder650d1602020-03-05 22:28:21 -0600550 dev_err(channel->gsi->dev,
Alex Eldera2003b32020-04-30 17:13:23 -0500551 "bad channel state (%u) after reset\n", state);
Alex Elder650d1602020-03-05 22:28:21 -0600552}
553
554/* Deallocate an ALLOCATED GSI channel */
555static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
556{
557 struct gsi_channel *channel = &gsi->channel[channel_id];
Alex Eldera2003b32020-04-30 17:13:23 -0500558 enum gsi_channel_state state;
Alex Elder650d1602020-03-05 22:28:21 -0600559 int ret;
560
Alex Eldera2003b32020-04-30 17:13:23 -0500561 state = gsi_channel_state(channel);
562 if (state != GSI_CHANNEL_STATE_ALLOCATED) {
563 dev_err(gsi->dev,
564 "bad channel state (%u) before dealloc\n", state);
Alex Elder650d1602020-03-05 22:28:21 -0600565 return;
566 }
567
568 ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
Alex Eldera2003b32020-04-30 17:13:23 -0500569
570 /* Channel state will normally have been updated */
571 state = gsi_channel_state(channel);
572 if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
573 dev_err(gsi->dev,
574 "bad channel state (%u) after dealloc\n", state);
Alex Elder650d1602020-03-05 22:28:21 -0600575}
576
577/* Ring an event ring doorbell, reporting the last entry processed by the AP.
578 * The index argument (modulo the ring count) is the first unfilled entry, so
579 * we supply one less than that with the doorbell. Update the event ring
580 * index field with the value provided.
581 */
582static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
583{
584 struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
585 u32 val;
586
587 ring->index = index; /* Next unused entry */
588
589 /* Note: index *must* be used modulo the ring count here */
590 val = gsi_ring_addr(ring, (index - 1) % ring->count);
591 iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
592}
593
594/* Program an event ring for use */
595static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
596{
597 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
598 size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
599 u32 val;
600
601 val = u32_encode_bits(GSI_EVT_CHTYPE_GPI_EV, EV_CHTYPE_FMASK);
602 val |= EV_INTYPE_FMASK;
603 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
604 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
605
606 val = u32_encode_bits(size, EV_R_LENGTH_FMASK);
607 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
608
609 /* The context 2 and 3 registers store the low-order and
610 * high-order 32 bits of the address of the event ring,
611 * respectively.
612 */
613 val = evt_ring->ring.addr & GENMASK(31, 0);
614 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
615
616 val = evt_ring->ring.addr >> 32;
617 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
618
619 /* Enable interrupt moderation by setting the moderation delay */
620 val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
621 val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
622 iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
623
624 /* No MSI write data, and MSI address high and low address is 0 */
625 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
626 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
627 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
628
629 /* We don't need to get event read pointer updates */
630 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
631 iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
632
633 /* Finally, tell the hardware we've completed event 0 (arbitrary) */
634 gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
635}
636
637/* Return the last (most recent) transaction completed on a channel. */
638static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
639{
640 struct gsi_trans_info *trans_info = &channel->trans_info;
641 struct gsi_trans *trans;
642
643 spin_lock_bh(&trans_info->spinlock);
644
645 if (!list_empty(&trans_info->complete))
646 trans = list_last_entry(&trans_info->complete,
647 struct gsi_trans, links);
648 else if (!list_empty(&trans_info->polled))
649 trans = list_last_entry(&trans_info->polled,
650 struct gsi_trans, links);
651 else
652 trans = NULL;
653
654 /* Caller will wait for this, so take a reference */
655 if (trans)
656 refcount_inc(&trans->refcount);
657
658 spin_unlock_bh(&trans_info->spinlock);
659
660 return trans;
661}
662
663/* Wait for transaction activity on a channel to complete */
664static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
665{
666 struct gsi_trans *trans;
667
668 /* Get the last transaction, and wait for it to complete */
669 trans = gsi_channel_trans_last(channel);
670 if (trans) {
671 wait_for_completion(&trans->completion);
672 gsi_trans_free(trans);
673 }
674}
675
676/* Stop channel activity. Transactions may not be allocated until thawed. */
677static void gsi_channel_freeze(struct gsi_channel *channel)
678{
679 gsi_channel_trans_quiesce(channel);
680
681 napi_disable(&channel->napi);
682
683 gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
684}
685
686/* Allow transactions to be used on the channel again. */
687static void gsi_channel_thaw(struct gsi_channel *channel)
688{
689 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
690
691 napi_enable(&channel->napi);
692}
693
694/* Program a channel for use */
695static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
696{
697 size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
698 u32 channel_id = gsi_channel_id(channel);
699 union gsi_channel_scratch scr = { };
700 struct gsi_channel_scratch_gpi *gpi;
701 struct gsi *gsi = channel->gsi;
702 u32 wrr_weight = 0;
703 u32 val;
704
705 /* Arbitrarily pick TRE 0 as the first channel element to use */
706 channel->tre_ring.index = 0;
707
708 /* We program all channels to use GPI protocol */
709 val = u32_encode_bits(GSI_CHANNEL_PROTOCOL_GPI, CHTYPE_PROTOCOL_FMASK);
710 if (channel->toward_ipa)
711 val |= CHTYPE_DIR_FMASK;
712 val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
713 val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
714 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
715
716 val = u32_encode_bits(size, R_LENGTH_FMASK);
717 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
718
719 /* The context 2 and 3 registers store the low-order and
720 * high-order 32 bits of the address of the channel ring,
721 * respectively.
722 */
723 val = channel->tre_ring.addr & GENMASK(31, 0);
724 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
725
726 val = channel->tre_ring.addr >> 32;
727 iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
728
729 /* Command channel gets low weighted round-robin priority */
730 if (channel->command)
731 wrr_weight = field_max(WRR_WEIGHT_FMASK);
732 val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
733
734 /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
735
736 /* Enable the doorbell engine if requested */
737 if (doorbell)
738 val |= USE_DB_ENG_FMASK;
739
740 if (!channel->use_prefetch)
741 val |= USE_ESCAPE_BUF_ONLY_FMASK;
742
743 iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
744
745 /* Now update the scratch registers for GPI protocol */
746 gpi = &scr.gpi;
747 gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
748 GSI_RING_ELEMENT_SIZE;
749 gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
750
751 val = scr.data.word1;
752 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
753
754 val = scr.data.word2;
755 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
756
757 val = scr.data.word3;
758 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
759
760 /* We must preserve the upper 16 bits of the last scratch register.
761 * The next sequence assumes those bits remain unchanged between the
762 * read and the write.
763 */
764 val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
765 val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
766 iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
767
768 /* All done! */
769}
770
771static void gsi_channel_deprogram(struct gsi_channel *channel)
772{
773 /* Nothing to do */
774}
775
776/* Start an allocated GSI channel */
777int gsi_channel_start(struct gsi *gsi, u32 channel_id)
778{
779 struct gsi_channel *channel = &gsi->channel[channel_id];
780 u32 evt_ring_id = channel->evt_ring_id;
781 int ret;
782
783 mutex_lock(&gsi->mutex);
784
785 ret = gsi_channel_start_command(channel);
786
787 mutex_unlock(&gsi->mutex);
788
789 /* Clear the channel's event ring interrupt in case it's pending */
790 gsi_isr_ieob_clear(gsi, BIT(evt_ring_id));
791
792 gsi_channel_thaw(channel);
793
794 return ret;
795}
796
797/* Stop a started channel */
798int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
799{
800 struct gsi_channel *channel = &gsi->channel[channel_id];
Alex Eldera2003b32020-04-30 17:13:23 -0500801 enum gsi_channel_state state;
Alex Elder650d1602020-03-05 22:28:21 -0600802 u32 retries;
803 int ret;
804
805 gsi_channel_freeze(channel);
806
807 /* Channel could have entered STOPPED state since last call if the
808 * STOP command timed out. We won't stop a channel if stopping it
809 * was successful previously (so we still want the freeze above).
810 */
Alex Eldera2003b32020-04-30 17:13:23 -0500811 state = gsi_channel_state(channel);
812 if (state == GSI_CHANNEL_STATE_STOPPED)
Alex Elder650d1602020-03-05 22:28:21 -0600813 return 0;
814
815 /* RX channels might require a little time to enter STOPPED state */
816 retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
817
818 mutex_lock(&gsi->mutex);
819
820 do {
821 ret = gsi_channel_stop_command(channel);
822 if (ret != -EAGAIN)
823 break;
824 msleep(1);
825 } while (retries--);
826
827 mutex_unlock(&gsi->mutex);
828
829 /* Thaw the channel if we need to retry (or on error) */
830 if (ret)
831 gsi_channel_thaw(channel);
832
833 return ret;
834}
835
836/* Reset and reconfigure a channel (possibly leaving doorbell disabled) */
837void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool db_enable)
838{
839 struct gsi_channel *channel = &gsi->channel[channel_id];
840
841 mutex_lock(&gsi->mutex);
842
843 /* Due to a hardware quirk we need to reset RX channels twice. */
844 gsi_channel_reset_command(channel);
845 if (!channel->toward_ipa)
846 gsi_channel_reset_command(channel);
847
848 gsi_channel_program(channel, db_enable);
849 gsi_channel_trans_cancel_pending(channel);
850
851 mutex_unlock(&gsi->mutex);
852}
853
854/* Stop a STARTED channel for suspend (using stop if requested) */
855int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
856{
857 struct gsi_channel *channel = &gsi->channel[channel_id];
858
859 if (stop)
860 return gsi_channel_stop(gsi, channel_id);
861
862 gsi_channel_freeze(channel);
863
864 return 0;
865}
866
867/* Resume a suspended channel (starting will be requested if STOPPED) */
868int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
869{
870 struct gsi_channel *channel = &gsi->channel[channel_id];
871
872 if (start)
873 return gsi_channel_start(gsi, channel_id);
874
875 gsi_channel_thaw(channel);
876
877 return 0;
878}
879
880/**
881 * gsi_channel_tx_queued() - Report queued TX transfers for a channel
882 * @channel: Channel for which to report
883 *
884 * Report to the network stack the number of bytes and transactions that
885 * have been queued to hardware since last call. This and the next function
886 * supply information used by the network stack for throttling.
887 *
888 * For each channel we track the number of transactions used and bytes of
889 * data those transactions represent. We also track what those values are
890 * each time this function is called. Subtracting the two tells us
891 * the number of bytes and transactions that have been added between
892 * successive calls.
893 *
894 * Calling this each time we ring the channel doorbell allows us to
895 * provide accurate information to the network stack about how much
896 * work we've given the hardware at any point in time.
897 */
898void gsi_channel_tx_queued(struct gsi_channel *channel)
899{
900 u32 trans_count;
901 u32 byte_count;
902
903 byte_count = channel->byte_count - channel->queued_byte_count;
904 trans_count = channel->trans_count - channel->queued_trans_count;
905 channel->queued_byte_count = channel->byte_count;
906 channel->queued_trans_count = channel->trans_count;
907
908 ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
909 trans_count, byte_count);
910}
911
912/**
913 * gsi_channel_tx_update() - Report completed TX transfers
914 * @channel: Channel that has completed transmitting packets
915 * @trans: Last transation known to be complete
916 *
917 * Compute the number of transactions and bytes that have been transferred
918 * over a TX channel since the given transaction was committed. Report this
919 * information to the network stack.
920 *
921 * At the time a transaction is committed, we record its channel's
922 * committed transaction and byte counts *in the transaction*.
923 * Completions are signaled by the hardware with an interrupt, and
924 * we can determine the latest completed transaction at that time.
925 *
926 * The difference between the byte/transaction count recorded in
927 * the transaction and the count last time we recorded a completion
928 * tells us exactly how much data has been transferred between
929 * completions.
930 *
931 * Calling this each time we learn of a newly-completed transaction
932 * allows us to provide accurate information to the network stack
933 * about how much work has been completed by the hardware at a given
934 * point in time.
935 */
936static void
937gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
938{
939 u64 byte_count = trans->byte_count + trans->len;
940 u64 trans_count = trans->trans_count + 1;
941
942 byte_count -= channel->compl_byte_count;
943 channel->compl_byte_count += byte_count;
944 trans_count -= channel->compl_trans_count;
945 channel->compl_trans_count += trans_count;
946
947 ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
948 trans_count, byte_count);
949}
950
951/* Channel control interrupt handler */
952static void gsi_isr_chan_ctrl(struct gsi *gsi)
953{
954 u32 channel_mask;
955
956 channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
957 iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
958
959 while (channel_mask) {
960 u32 channel_id = __ffs(channel_mask);
961 struct gsi_channel *channel;
962
963 channel_mask ^= BIT(channel_id);
964
965 channel = &gsi->channel[channel_id];
Alex Elder650d1602020-03-05 22:28:21 -0600966
967 complete(&channel->completion);
968 }
969}
970
971/* Event ring control interrupt handler */
972static void gsi_isr_evt_ctrl(struct gsi *gsi)
973{
974 u32 event_mask;
975
976 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
977 iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
978
979 while (event_mask) {
980 u32 evt_ring_id = __ffs(event_mask);
981 struct gsi_evt_ring *evt_ring;
982
983 event_mask ^= BIT(evt_ring_id);
984
985 evt_ring = &gsi->evt_ring[evt_ring_id];
986 evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
987
988 complete(&evt_ring->completion);
989 }
990}
991
992/* Global channel error interrupt handler */
993static void
994gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
995{
996 if (code == GSI_OUT_OF_RESOURCES_ERR) {
997 dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
998 complete(&gsi->channel[channel_id].completion);
999 return;
1000 }
1001
1002 /* Report, but otherwise ignore all other error codes */
1003 dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1004 channel_id, err_ee, code);
1005}
1006
1007/* Global event error interrupt handler */
1008static void
1009gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1010{
1011 if (code == GSI_OUT_OF_RESOURCES_ERR) {
1012 struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1013 u32 channel_id = gsi_channel_id(evt_ring->channel);
1014
1015 complete(&evt_ring->completion);
1016 dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1017 channel_id);
1018 return;
1019 }
1020
1021 /* Report, but otherwise ignore all other error codes */
1022 dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1023 evt_ring_id, err_ee, code);
1024}
1025
1026/* Global error interrupt handler */
1027static void gsi_isr_glob_err(struct gsi *gsi)
1028{
1029 enum gsi_err_type type;
1030 enum gsi_err_code code;
1031 u32 which;
1032 u32 val;
1033 u32 ee;
1034
1035 /* Get the logged error, then reinitialize the log */
1036 val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1037 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1038 iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1039
1040 ee = u32_get_bits(val, ERR_EE_FMASK);
1041 which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1042 type = u32_get_bits(val, ERR_TYPE_FMASK);
1043 code = u32_get_bits(val, ERR_CODE_FMASK);
1044
1045 if (type == GSI_ERR_TYPE_CHAN)
1046 gsi_isr_glob_chan_err(gsi, ee, which, code);
1047 else if (type == GSI_ERR_TYPE_EVT)
1048 gsi_isr_glob_evt_err(gsi, ee, which, code);
1049 else /* type GSI_ERR_TYPE_GLOB should be fatal */
1050 dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1051}
1052
1053/* Generic EE interrupt handler */
1054static void gsi_isr_gp_int1(struct gsi *gsi)
1055{
1056 u32 result;
1057 u32 val;
1058
1059 val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1060 result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1061 if (result != GENERIC_EE_SUCCESS_FVAL)
1062 dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1063
1064 complete(&gsi->completion);
1065}
1066/* Inter-EE interrupt handler */
1067static void gsi_isr_glob_ee(struct gsi *gsi)
1068{
1069 u32 val;
1070
1071 val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1072
1073 if (val & ERROR_INT_FMASK)
1074 gsi_isr_glob_err(gsi);
1075
1076 iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1077
1078 val &= ~ERROR_INT_FMASK;
1079
1080 if (val & EN_GP_INT1_FMASK) {
1081 val ^= EN_GP_INT1_FMASK;
1082 gsi_isr_gp_int1(gsi);
1083 }
1084
1085 if (val)
1086 dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1087}
1088
1089/* I/O completion interrupt event */
1090static void gsi_isr_ieob(struct gsi *gsi)
1091{
1092 u32 event_mask;
1093
1094 event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1095 gsi_isr_ieob_clear(gsi, event_mask);
1096
1097 while (event_mask) {
1098 u32 evt_ring_id = __ffs(event_mask);
1099
1100 event_mask ^= BIT(evt_ring_id);
1101
1102 gsi_irq_ieob_disable(gsi, evt_ring_id);
1103 napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1104 }
1105}
1106
1107/* General event interrupts represent serious problems, so report them */
1108static void gsi_isr_general(struct gsi *gsi)
1109{
1110 struct device *dev = gsi->dev;
1111 u32 val;
1112
1113 val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1114 iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1115
1116 if (val)
1117 dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1118}
1119
1120/**
1121 * gsi_isr() - Top level GSI interrupt service routine
1122 * @irq: Interrupt number (ignored)
1123 * @dev_id: GSI pointer supplied to request_irq()
1124 *
1125 * This is the main handler function registered for the GSI IRQ. Each type
1126 * of interrupt has a separate handler function that is called from here.
1127 */
1128static irqreturn_t gsi_isr(int irq, void *dev_id)
1129{
1130 struct gsi *gsi = dev_id;
1131 u32 intr_mask;
1132 u32 cnt = 0;
1133
1134 while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1135 /* intr_mask contains bitmask of pending GSI interrupts */
1136 do {
1137 u32 gsi_intr = BIT(__ffs(intr_mask));
1138
1139 intr_mask ^= gsi_intr;
1140
1141 switch (gsi_intr) {
1142 case CH_CTRL_FMASK:
1143 gsi_isr_chan_ctrl(gsi);
1144 break;
1145 case EV_CTRL_FMASK:
1146 gsi_isr_evt_ctrl(gsi);
1147 break;
1148 case GLOB_EE_FMASK:
1149 gsi_isr_glob_ee(gsi);
1150 break;
1151 case IEOB_FMASK:
1152 gsi_isr_ieob(gsi);
1153 break;
1154 case GENERAL_FMASK:
1155 gsi_isr_general(gsi);
1156 break;
1157 default:
1158 dev_err(gsi->dev,
1159 "%s: unrecognized type 0x%08x\n",
1160 __func__, gsi_intr);
1161 break;
1162 }
1163 } while (intr_mask);
1164
1165 if (++cnt > GSI_ISR_MAX_ITER) {
1166 dev_err(gsi->dev, "interrupt flood\n");
1167 break;
1168 }
1169 }
1170
1171 return IRQ_HANDLED;
1172}
1173
1174/* Return the transaction associated with a transfer completion event */
1175static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1176 struct gsi_event *event)
1177{
1178 u32 tre_offset;
1179 u32 tre_index;
1180
1181 /* Event xfer_ptr records the TRE it's associated with */
1182 tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0);
1183 tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1184
1185 return gsi_channel_trans_mapped(channel, tre_index);
1186}
1187
1188/**
1189 * gsi_evt_ring_rx_update() - Record lengths of received data
1190 * @evt_ring: Event ring associated with channel that received packets
1191 * @index: Event index in ring reported by hardware
1192 *
1193 * Events for RX channels contain the actual number of bytes received into
1194 * the buffer. Every event has a transaction associated with it, and here
1195 * we update transactions to record their actual received lengths.
1196 *
1197 * This function is called whenever we learn that the GSI hardware has filled
1198 * new events since the last time we checked. The ring's index field tells
1199 * the first entry in need of processing. The index provided is the
1200 * first *unfilled* event in the ring (following the last filled one).
1201 *
1202 * Events are sequential within the event ring, and transactions are
1203 * sequential within the transaction pool.
1204 *
1205 * Note that @index always refers to an element *within* the event ring.
1206 */
1207static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1208{
1209 struct gsi_channel *channel = evt_ring->channel;
1210 struct gsi_ring *ring = &evt_ring->ring;
1211 struct gsi_trans_info *trans_info;
1212 struct gsi_event *event_done;
1213 struct gsi_event *event;
1214 struct gsi_trans *trans;
1215 u32 byte_count = 0;
1216 u32 old_index;
1217 u32 event_avail;
1218
1219 trans_info = &channel->trans_info;
1220
1221 /* We'll start with the oldest un-processed event. RX channels
1222 * replenish receive buffers in single-TRE transactions, so we
1223 * can just map that event to its transaction. Transactions
1224 * associated with completion events are consecutive.
1225 */
1226 old_index = ring->index;
1227 event = gsi_ring_virt(ring, old_index);
1228 trans = gsi_event_trans(channel, event);
1229
1230 /* Compute the number of events to process before we wrap,
1231 * and determine when we'll be done processing events.
1232 */
1233 event_avail = ring->count - old_index % ring->count;
1234 event_done = gsi_ring_virt(ring, index);
1235 do {
1236 trans->len = __le16_to_cpu(event->len);
1237 byte_count += trans->len;
1238
1239 /* Move on to the next event and transaction */
1240 if (--event_avail)
1241 event++;
1242 else
1243 event = gsi_ring_virt(ring, 0);
1244 trans = gsi_trans_pool_next(&trans_info->pool, trans);
1245 } while (event != event_done);
1246
1247 /* We record RX bytes when they are received */
1248 channel->byte_count += byte_count;
1249 channel->trans_count++;
1250}
1251
1252/* Initialize a ring, including allocating DMA memory for its entries */
1253static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1254{
1255 size_t size = count * GSI_RING_ELEMENT_SIZE;
1256 struct device *dev = gsi->dev;
1257 dma_addr_t addr;
1258
1259 /* Hardware requires a 2^n ring size, with alignment equal to size */
1260 ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1261 if (ring->virt && addr % size) {
1262 dma_free_coherent(dev, size, ring->virt, ring->addr);
1263 dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
1264 size);
1265 return -EINVAL; /* Not a good error value, but distinct */
1266 } else if (!ring->virt) {
1267 return -ENOMEM;
1268 }
1269 ring->addr = addr;
1270 ring->count = count;
1271
1272 return 0;
1273}
1274
1275/* Free a previously-allocated ring */
1276static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1277{
1278 size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1279
1280 dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1281}
1282
1283/* Allocate an available event ring id */
1284static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1285{
1286 u32 evt_ring_id;
1287
1288 if (gsi->event_bitmap == ~0U) {
1289 dev_err(gsi->dev, "event rings exhausted\n");
1290 return -ENOSPC;
1291 }
1292
1293 evt_ring_id = ffz(gsi->event_bitmap);
1294 gsi->event_bitmap |= BIT(evt_ring_id);
1295
1296 return (int)evt_ring_id;
1297}
1298
1299/* Free a previously-allocated event ring id */
1300static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1301{
1302 gsi->event_bitmap &= ~BIT(evt_ring_id);
1303}
1304
1305/* Ring a channel doorbell, reporting the first un-filled entry */
1306void gsi_channel_doorbell(struct gsi_channel *channel)
1307{
1308 struct gsi_ring *tre_ring = &channel->tre_ring;
1309 u32 channel_id = gsi_channel_id(channel);
1310 struct gsi *gsi = channel->gsi;
1311 u32 val;
1312
1313 /* Note: index *must* be used modulo the ring count here */
1314 val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1315 iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1316}
1317
1318/* Consult hardware, move any newly completed transactions to completed list */
1319static void gsi_channel_update(struct gsi_channel *channel)
1320{
1321 u32 evt_ring_id = channel->evt_ring_id;
1322 struct gsi *gsi = channel->gsi;
1323 struct gsi_evt_ring *evt_ring;
1324 struct gsi_trans *trans;
1325 struct gsi_ring *ring;
1326 u32 offset;
1327 u32 index;
1328
1329 evt_ring = &gsi->evt_ring[evt_ring_id];
1330 ring = &evt_ring->ring;
1331
1332 /* See if there's anything new to process; if not, we're done. Note
1333 * that index always refers to an entry *within* the event ring.
1334 */
1335 offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1336 index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1337 if (index == ring->index % ring->count)
1338 return;
1339
1340 /* Get the transaction for the latest completed event. Take a
1341 * reference to keep it from completing before we give the events
1342 * for this and previous transactions back to the hardware.
1343 */
1344 trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1345 refcount_inc(&trans->refcount);
1346
1347 /* For RX channels, update each completed transaction with the number
1348 * of bytes that were actually received. For TX channels, report
1349 * the number of transactions and bytes this completion represents
1350 * up the network stack.
1351 */
1352 if (channel->toward_ipa)
1353 gsi_channel_tx_update(channel, trans);
1354 else
1355 gsi_evt_ring_rx_update(evt_ring, index);
1356
1357 gsi_trans_move_complete(trans);
1358
1359 /* Tell the hardware we've handled these events */
1360 gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1361
1362 gsi_trans_free(trans);
1363}
1364
1365/**
1366 * gsi_channel_poll_one() - Return a single completed transaction on a channel
1367 * @channel: Channel to be polled
1368 *
1369 * @Return: Transaction pointer, or null if none are available
1370 *
1371 * This function returns the first entry on a channel's completed transaction
1372 * list. If that list is empty, the hardware is consulted to determine
1373 * whether any new transactions have completed. If so, they're moved to the
1374 * completed list and the new first entry is returned. If there are no more
1375 * completed transactions, a null pointer is returned.
1376 */
1377static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1378{
1379 struct gsi_trans *trans;
1380
1381 /* Get the first transaction from the completed list */
1382 trans = gsi_channel_trans_complete(channel);
1383 if (!trans) {
1384 /* List is empty; see if there's more to do */
1385 gsi_channel_update(channel);
1386 trans = gsi_channel_trans_complete(channel);
1387 }
1388
1389 if (trans)
1390 gsi_trans_move_polled(trans);
1391
1392 return trans;
1393}
1394
1395/**
1396 * gsi_channel_poll() - NAPI poll function for a channel
1397 * @napi: NAPI structure for the channel
1398 * @budget: Budget supplied by NAPI core
1399
1400 * @Return: Number of items polled (<= budget)
1401 *
1402 * Single transactions completed by hardware are polled until either
1403 * the budget is exhausted, or there are no more. Each transaction
1404 * polled is passed to gsi_trans_complete(), to perform remaining
1405 * completion processing and retire/free the transaction.
1406 */
1407static int gsi_channel_poll(struct napi_struct *napi, int budget)
1408{
1409 struct gsi_channel *channel;
1410 int count = 0;
1411
1412 channel = container_of(napi, struct gsi_channel, napi);
1413 while (count < budget) {
1414 struct gsi_trans *trans;
1415
1416 trans = gsi_channel_poll_one(channel);
1417 if (!trans)
1418 break;
1419 gsi_trans_complete(trans);
1420 }
1421
1422 if (count < budget) {
1423 napi_complete(&channel->napi);
1424 gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
1425 }
1426
1427 return count;
1428}
1429
1430/* The event bitmap represents which event ids are available for allocation.
1431 * Set bits are not available, clear bits can be used. This function
1432 * initializes the map so all events supported by the hardware are available,
1433 * then precludes any reserved events from being allocated.
1434 */
1435static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1436{
1437 u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1438
1439 event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1440
1441 return event_bitmap;
1442}
1443
1444/* Setup function for event rings */
1445static void gsi_evt_ring_setup(struct gsi *gsi)
1446{
1447 /* Nothing to do */
1448}
1449
1450/* Inverse of gsi_evt_ring_setup() */
1451static void gsi_evt_ring_teardown(struct gsi *gsi)
1452{
1453 /* Nothing to do */
1454}
1455
1456/* Setup function for a single channel */
1457static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id,
1458 bool db_enable)
1459{
1460 struct gsi_channel *channel = &gsi->channel[channel_id];
1461 u32 evt_ring_id = channel->evt_ring_id;
1462 int ret;
1463
1464 if (!channel->gsi)
1465 return 0; /* Ignore uninitialized channels */
1466
1467 ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1468 if (ret)
1469 return ret;
1470
1471 gsi_evt_ring_program(gsi, evt_ring_id);
1472
1473 ret = gsi_channel_alloc_command(gsi, channel_id);
1474 if (ret)
1475 goto err_evt_ring_de_alloc;
1476
1477 gsi_channel_program(channel, db_enable);
1478
1479 if (channel->toward_ipa)
1480 netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1481 gsi_channel_poll, NAPI_POLL_WEIGHT);
1482 else
1483 netif_napi_add(&gsi->dummy_dev, &channel->napi,
1484 gsi_channel_poll, NAPI_POLL_WEIGHT);
1485
1486 return 0;
1487
1488err_evt_ring_de_alloc:
1489 /* We've done nothing with the event ring yet so don't reset */
1490 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1491
1492 return ret;
1493}
1494
1495/* Inverse of gsi_channel_setup_one() */
1496static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1497{
1498 struct gsi_channel *channel = &gsi->channel[channel_id];
1499 u32 evt_ring_id = channel->evt_ring_id;
1500
1501 if (!channel->gsi)
1502 return; /* Ignore uninitialized channels */
1503
1504 netif_napi_del(&channel->napi);
1505
1506 gsi_channel_deprogram(channel);
1507 gsi_channel_de_alloc_command(gsi, channel_id);
1508 gsi_evt_ring_reset_command(gsi, evt_ring_id);
1509 gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1510}
1511
1512static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1513 enum gsi_generic_cmd_opcode opcode)
1514{
1515 struct completion *completion = &gsi->completion;
1516 u32 val;
1517
1518 val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1519 val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1520 val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1521
1522 if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion))
1523 return 0; /* Success! */
1524
1525 dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1526 opcode, channel_id);
1527
1528 return -ETIMEDOUT;
1529}
1530
1531static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1532{
1533 return gsi_generic_command(gsi, channel_id,
1534 GSI_GENERIC_ALLOCATE_CHANNEL);
1535}
1536
1537static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1538{
1539 int ret;
1540
1541 ret = gsi_generic_command(gsi, channel_id, GSI_GENERIC_HALT_CHANNEL);
1542 if (ret)
1543 dev_err(gsi->dev, "error %d halting modem channel %u\n",
1544 ret, channel_id);
1545}
1546
1547/* Setup function for channels */
1548static int gsi_channel_setup(struct gsi *gsi, bool db_enable)
1549{
1550 u32 channel_id = 0;
1551 u32 mask;
1552 int ret;
1553
1554 gsi_evt_ring_setup(gsi);
1555 gsi_irq_enable(gsi);
1556
1557 mutex_lock(&gsi->mutex);
1558
1559 do {
1560 ret = gsi_channel_setup_one(gsi, channel_id, db_enable);
1561 if (ret)
1562 goto err_unwind;
1563 } while (++channel_id < gsi->channel_count);
1564
1565 /* Make sure no channels were defined that hardware does not support */
1566 while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1567 struct gsi_channel *channel = &gsi->channel[channel_id++];
1568
1569 if (!channel->gsi)
1570 continue; /* Ignore uninitialized channels */
1571
1572 dev_err(gsi->dev, "channel %u not supported by hardware\n",
1573 channel_id - 1);
1574 channel_id = gsi->channel_count;
1575 goto err_unwind;
1576 }
1577
1578 /* Allocate modem channels if necessary */
1579 mask = gsi->modem_channel_bitmap;
1580 while (mask) {
1581 u32 modem_channel_id = __ffs(mask);
1582
1583 ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1584 if (ret)
1585 goto err_unwind_modem;
1586
1587 /* Clear bit from mask only after success (for unwind) */
1588 mask ^= BIT(modem_channel_id);
1589 }
1590
1591 mutex_unlock(&gsi->mutex);
1592
1593 return 0;
1594
1595err_unwind_modem:
1596 /* Compute which modem channels need to be deallocated */
1597 mask ^= gsi->modem_channel_bitmap;
1598 while (mask) {
1599 u32 channel_id = __fls(mask);
1600
1601 mask ^= BIT(channel_id);
1602
1603 gsi_modem_channel_halt(gsi, channel_id);
1604 }
1605
1606err_unwind:
1607 while (channel_id--)
1608 gsi_channel_teardown_one(gsi, channel_id);
1609
1610 mutex_unlock(&gsi->mutex);
1611
1612 gsi_irq_disable(gsi);
1613 gsi_evt_ring_teardown(gsi);
1614
1615 return ret;
1616}
1617
1618/* Inverse of gsi_channel_setup() */
1619static void gsi_channel_teardown(struct gsi *gsi)
1620{
1621 u32 mask = gsi->modem_channel_bitmap;
1622 u32 channel_id;
1623
1624 mutex_lock(&gsi->mutex);
1625
1626 while (mask) {
1627 u32 channel_id = __fls(mask);
1628
1629 mask ^= BIT(channel_id);
1630
1631 gsi_modem_channel_halt(gsi, channel_id);
1632 }
1633
1634 channel_id = gsi->channel_count - 1;
1635 do
1636 gsi_channel_teardown_one(gsi, channel_id);
1637 while (channel_id--);
1638
1639 mutex_unlock(&gsi->mutex);
1640
1641 gsi_irq_disable(gsi);
1642 gsi_evt_ring_teardown(gsi);
1643}
1644
1645/* Setup function for GSI. GSI firmware must be loaded and initialized */
1646int gsi_setup(struct gsi *gsi, bool db_enable)
1647{
1648 u32 val;
1649
1650 /* Here is where we first touch the GSI hardware */
1651 val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1652 if (!(val & ENABLED_FMASK)) {
1653 dev_err(gsi->dev, "GSI has not been enabled\n");
1654 return -EIO;
1655 }
1656
1657 val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1658
1659 gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1660 if (!gsi->channel_count) {
1661 dev_err(gsi->dev, "GSI reports zero channels supported\n");
1662 return -EINVAL;
1663 }
1664 if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
1665 dev_warn(gsi->dev,
1666 "limiting to %u channels (hardware supports %u)\n",
1667 GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
1668 gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1669 }
1670
1671 gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1672 if (!gsi->evt_ring_count) {
1673 dev_err(gsi->dev, "GSI reports zero event rings supported\n");
1674 return -EINVAL;
1675 }
1676 if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
1677 dev_warn(gsi->dev,
1678 "limiting to %u event rings (hardware supports %u)\n",
1679 GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
1680 gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1681 }
1682
1683 /* Initialize the error log */
1684 iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1685
1686 /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1687 iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1688
1689 return gsi_channel_setup(gsi, db_enable);
1690}
1691
1692/* Inverse of gsi_setup() */
1693void gsi_teardown(struct gsi *gsi)
1694{
1695 gsi_channel_teardown(gsi);
1696}
1697
1698/* Initialize a channel's event ring */
1699static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1700{
1701 struct gsi *gsi = channel->gsi;
1702 struct gsi_evt_ring *evt_ring;
1703 int ret;
1704
1705 ret = gsi_evt_ring_id_alloc(gsi);
1706 if (ret < 0)
1707 return ret;
1708 channel->evt_ring_id = ret;
1709
1710 evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1711 evt_ring->channel = channel;
1712
1713 ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1714 if (!ret)
1715 return 0; /* Success! */
1716
1717 dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1718 ret, gsi_channel_id(channel));
1719
1720 gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1721
1722 return ret;
1723}
1724
1725/* Inverse of gsi_channel_evt_ring_init() */
1726static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
1727{
1728 u32 evt_ring_id = channel->evt_ring_id;
1729 struct gsi *gsi = channel->gsi;
1730 struct gsi_evt_ring *evt_ring;
1731
1732 evt_ring = &gsi->evt_ring[evt_ring_id];
1733 gsi_ring_free(gsi, &evt_ring->ring);
1734 gsi_evt_ring_id_free(gsi, evt_ring_id);
1735}
1736
1737/* Init function for event rings */
1738static void gsi_evt_ring_init(struct gsi *gsi)
1739{
1740 u32 evt_ring_id = 0;
1741
1742 gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
1743 gsi->event_enable_bitmap = 0;
1744 do
1745 init_completion(&gsi->evt_ring[evt_ring_id].completion);
1746 while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
1747}
1748
1749/* Inverse of gsi_evt_ring_init() */
1750static void gsi_evt_ring_exit(struct gsi *gsi)
1751{
1752 /* Nothing to do */
1753}
1754
1755static bool gsi_channel_data_valid(struct gsi *gsi,
1756 const struct ipa_gsi_endpoint_data *data)
1757{
1758#ifdef IPA_VALIDATION
1759 u32 channel_id = data->channel_id;
1760 struct device *dev = gsi->dev;
1761
1762 /* Make sure channel ids are in the range driver supports */
1763 if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
1764 dev_err(dev, "bad channel id %u (must be less than %u)\n",
1765 channel_id, GSI_CHANNEL_COUNT_MAX);
1766 return false;
1767 }
1768
1769 if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
1770 dev_err(dev, "bad EE id %u (AP or modem)\n", data->ee_id);
1771 return false;
1772 }
1773
1774 if (!data->channel.tlv_count ||
1775 data->channel.tlv_count > GSI_TLV_MAX) {
1776 dev_err(dev, "channel %u bad tlv_count %u (must be 1..%u)\n",
1777 channel_id, data->channel.tlv_count, GSI_TLV_MAX);
1778 return false;
1779 }
1780
1781 /* We have to allow at least one maximally-sized transaction to
1782 * be outstanding (which would use tlv_count TREs). Given how
1783 * gsi_channel_tre_max() is computed, tre_count has to be almost
1784 * twice the TLV FIFO size to satisfy this requirement.
1785 */
1786 if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
1787 dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
1788 channel_id, data->channel.tlv_count,
1789 data->channel.tre_count);
1790 return false;
1791 }
1792
1793 if (!is_power_of_2(data->channel.tre_count)) {
1794 dev_err(dev, "channel %u bad tre_count %u (not power of 2)\n",
1795 channel_id, data->channel.tre_count);
1796 return false;
1797 }
1798
1799 if (!is_power_of_2(data->channel.event_count)) {
1800 dev_err(dev, "channel %u bad event_count %u (not power of 2)\n",
1801 channel_id, data->channel.event_count);
1802 return false;
1803 }
1804#endif /* IPA_VALIDATION */
1805
1806 return true;
1807}
1808
1809/* Init function for a single channel */
1810static int gsi_channel_init_one(struct gsi *gsi,
1811 const struct ipa_gsi_endpoint_data *data,
1812 bool command, bool prefetch)
1813{
1814 struct gsi_channel *channel;
1815 u32 tre_count;
1816 int ret;
1817
1818 if (!gsi_channel_data_valid(gsi, data))
1819 return -EINVAL;
1820
1821 /* Worst case we need an event for every outstanding TRE */
1822 if (data->channel.tre_count > data->channel.event_count) {
1823 dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
1824 data->channel_id, data->channel.tre_count);
1825 tre_count = data->channel.event_count;
1826 } else {
1827 tre_count = data->channel.tre_count;
1828 }
1829
1830 channel = &gsi->channel[data->channel_id];
1831 memset(channel, 0, sizeof(*channel));
1832
1833 channel->gsi = gsi;
1834 channel->toward_ipa = data->toward_ipa;
1835 channel->command = command;
1836 channel->use_prefetch = command && prefetch;
1837 channel->tlv_count = data->channel.tlv_count;
1838 channel->tre_count = tre_count;
1839 channel->event_count = data->channel.event_count;
1840 init_completion(&channel->completion);
1841
1842 ret = gsi_channel_evt_ring_init(channel);
1843 if (ret)
1844 goto err_clear_gsi;
1845
1846 ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
1847 if (ret) {
1848 dev_err(gsi->dev, "error %d allocating channel %u ring\n",
1849 ret, data->channel_id);
1850 goto err_channel_evt_ring_exit;
1851 }
1852
1853 ret = gsi_channel_trans_init(gsi, data->channel_id);
1854 if (ret)
1855 goto err_ring_free;
1856
1857 if (command) {
1858 u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
1859
1860 ret = ipa_cmd_pool_init(channel, tre_max);
1861 }
1862 if (!ret)
1863 return 0; /* Success! */
1864
1865 gsi_channel_trans_exit(channel);
1866err_ring_free:
1867 gsi_ring_free(gsi, &channel->tre_ring);
1868err_channel_evt_ring_exit:
1869 gsi_channel_evt_ring_exit(channel);
1870err_clear_gsi:
1871 channel->gsi = NULL; /* Mark it not (fully) initialized */
1872
1873 return ret;
1874}
1875
1876/* Inverse of gsi_channel_init_one() */
1877static void gsi_channel_exit_one(struct gsi_channel *channel)
1878{
1879 if (!channel->gsi)
1880 return; /* Ignore uninitialized channels */
1881
1882 if (channel->command)
1883 ipa_cmd_pool_exit(channel);
1884 gsi_channel_trans_exit(channel);
1885 gsi_ring_free(channel->gsi, &channel->tre_ring);
1886 gsi_channel_evt_ring_exit(channel);
1887}
1888
1889/* Init function for channels */
1890static int gsi_channel_init(struct gsi *gsi, bool prefetch, u32 count,
1891 const struct ipa_gsi_endpoint_data *data,
1892 bool modem_alloc)
1893{
1894 int ret = 0;
1895 u32 i;
1896
1897 gsi_evt_ring_init(gsi);
1898
1899 /* The endpoint data array is indexed by endpoint name */
1900 for (i = 0; i < count; i++) {
1901 bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
1902
1903 if (ipa_gsi_endpoint_data_empty(&data[i]))
1904 continue; /* Skip over empty slots */
1905
1906 /* Mark modem channels to be allocated (hardware workaround) */
1907 if (data[i].ee_id == GSI_EE_MODEM) {
1908 if (modem_alloc)
1909 gsi->modem_channel_bitmap |=
1910 BIT(data[i].channel_id);
1911 continue;
1912 }
1913
1914 ret = gsi_channel_init_one(gsi, &data[i], command, prefetch);
1915 if (ret)
1916 goto err_unwind;
1917 }
1918
1919 return ret;
1920
1921err_unwind:
1922 while (i--) {
1923 if (ipa_gsi_endpoint_data_empty(&data[i]))
1924 continue;
1925 if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
1926 gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
1927 continue;
1928 }
1929 gsi_channel_exit_one(&gsi->channel[data->channel_id]);
1930 }
1931 gsi_evt_ring_exit(gsi);
1932
1933 return ret;
1934}
1935
1936/* Inverse of gsi_channel_init() */
1937static void gsi_channel_exit(struct gsi *gsi)
1938{
1939 u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
1940
1941 do
1942 gsi_channel_exit_one(&gsi->channel[channel_id]);
1943 while (channel_id--);
1944 gsi->modem_channel_bitmap = 0;
1945
1946 gsi_evt_ring_exit(gsi);
1947}
1948
1949/* Init function for GSI. GSI hardware does not need to be "ready" */
1950int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
1951 u32 count, const struct ipa_gsi_endpoint_data *data,
1952 bool modem_alloc)
1953{
1954 struct resource *res;
1955 resource_size_t size;
1956 unsigned int irq;
1957 int ret;
1958
1959 gsi_validate_build();
1960
1961 gsi->dev = &pdev->dev;
1962
1963 /* The GSI layer performs NAPI on all endpoints. NAPI requires a
1964 * network device structure, but the GSI layer does not have one,
1965 * so we must create a dummy network device for this purpose.
1966 */
1967 init_dummy_netdev(&gsi->dummy_dev);
1968
1969 /* Get the GSI IRQ and request for it to wake the system */
1970 ret = platform_get_irq_byname(pdev, "gsi");
1971 if (ret <= 0) {
1972 dev_err(gsi->dev,
1973 "DT error %d getting \"gsi\" IRQ property\n", ret);
1974 return ret ? : -EINVAL;
1975 }
1976 irq = ret;
1977
1978 ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
1979 if (ret) {
1980 dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
1981 return ret;
1982 }
1983 gsi->irq = irq;
1984
1985 ret = enable_irq_wake(gsi->irq);
1986 if (ret)
1987 dev_warn(gsi->dev, "error %d enabling gsi wake irq\n", ret);
1988 gsi->irq_wake_enabled = !ret;
1989
1990 /* Get GSI memory range and map it */
1991 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
1992 if (!res) {
1993 dev_err(gsi->dev,
1994 "DT error getting \"gsi\" memory property\n");
1995 ret = -ENODEV;
1996 goto err_disable_irq_wake;
1997 }
1998
1999 size = resource_size(res);
2000 if (res->start > U32_MAX || size > U32_MAX - res->start) {
2001 dev_err(gsi->dev, "DT memory resource \"gsi\" out of range\n");
2002 ret = -EINVAL;
2003 goto err_disable_irq_wake;
2004 }
2005
2006 gsi->virt = ioremap(res->start, size);
2007 if (!gsi->virt) {
2008 dev_err(gsi->dev, "unable to remap \"gsi\" memory\n");
2009 ret = -ENOMEM;
2010 goto err_disable_irq_wake;
2011 }
2012
2013 ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
2014 if (ret)
2015 goto err_iounmap;
2016
2017 mutex_init(&gsi->mutex);
2018 init_completion(&gsi->completion);
2019
2020 return 0;
2021
2022err_iounmap:
2023 iounmap(gsi->virt);
2024err_disable_irq_wake:
2025 if (gsi->irq_wake_enabled)
2026 (void)disable_irq_wake(gsi->irq);
2027 free_irq(gsi->irq, gsi);
2028
2029 return ret;
2030}
2031
2032/* Inverse of gsi_init() */
2033void gsi_exit(struct gsi *gsi)
2034{
2035 mutex_destroy(&gsi->mutex);
2036 gsi_channel_exit(gsi);
2037 if (gsi->irq_wake_enabled)
2038 (void)disable_irq_wake(gsi->irq);
2039 free_irq(gsi->irq, gsi);
2040 iounmap(gsi->virt);
2041}
2042
2043/* The maximum number of outstanding TREs on a channel. This limits
2044 * a channel's maximum number of transactions outstanding (worst case
2045 * is one TRE per transaction).
2046 *
2047 * The absolute limit is the number of TREs in the channel's TRE ring,
2048 * and in theory we should be able use all of them. But in practice,
2049 * doing that led to the hardware reporting exhaustion of event ring
2050 * slots for writing completion information. So the hardware limit
2051 * would be (tre_count - 1).
2052 *
2053 * We reduce it a bit further though. Transaction resource pools are
2054 * sized to be a little larger than this maximum, to allow resource
2055 * allocations to always be contiguous. The number of entries in a
2056 * TRE ring buffer is a power of 2, and the extra resources in a pool
2057 * tends to nearly double the memory allocated for it. Reducing the
2058 * maximum number of outstanding TREs allows the number of entries in
2059 * a pool to avoid crossing that power-of-2 boundary, and this can
2060 * substantially reduce pool memory requirements. The number we
2061 * reduce it by matches the number added in gsi_trans_pool_init().
2062 */
2063u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2064{
2065 struct gsi_channel *channel = &gsi->channel[channel_id];
2066
2067 /* Hardware limit is channel->tre_count - 1 */
2068 return channel->tre_count - (channel->tlv_count - 1);
2069}
2070
2071/* Returns the maximum number of TREs in a single transaction for a channel */
2072u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2073{
2074 struct gsi_channel *channel = &gsi->channel[channel_id];
2075
2076 return channel->tlv_count;
2077}