blob: 1f757a7eeafecfde63fa9a891dde89b430ac8416 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Daniel Macke5779992010-03-04 19:46:13 +01002/*
Daniel Macke5779992010-03-04 19:46:13 +01003 */
4
Daniel Mackc731bc92011-09-14 12:46:57 +02005#include <linux/gfp.h>
6#include <linux/init.h>
Takashi Iwai80c8a2a2012-01-09 11:37:20 +01007#include <linux/ratelimit.h>
Daniel Mackc731bc92011-09-14 12:46:57 +02008#include <linux/usb.h>
9#include <linux/usb/audio.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020010#include <linux/slab.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020011
12#include <sound/core.h>
13#include <sound/pcm.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020014#include <sound/pcm_params.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020015
16#include "usbaudio.h"
17#include "helper.h"
18#include "card.h"
19#include "endpoint.h"
20#include "pcm.h"
Takashi Iwaibf6313a2020-11-23 09:53:31 +010021#include "clock.h"
Daniel Mack2b58fd52012-09-04 10:23:07 +020022#include "quirks.h"
Daniel Mackc731bc92011-09-14 12:46:57 +020023
Takashi Iwai5c2b3012021-02-06 21:30:51 +010024enum {
25 EP_STATE_STOPPED,
26 EP_STATE_RUNNING,
27 EP_STATE_STOPPING,
28};
Daniel Mack8fdff6a2012-04-12 13:51:11 +020029
Takashi Iwai00272c62021-01-08 08:52:17 +010030/* interface refcounting */
31struct snd_usb_iface_ref {
32 unsigned char iface;
33 bool need_setup;
34 int opened;
35 struct list_head list;
36};
37
Daniel Mackc731bc92011-09-14 12:46:57 +020038/*
Daniel Mack94c27212012-04-12 13:51:15 +020039 * snd_usb_endpoint is a model that abstracts everything related to an
40 * USB endpoint and its streaming.
41 *
42 * There are functions to activate and deactivate the streaming URBs and
Daniel Mack07a5e9d2012-04-24 19:31:24 +020043 * optional callbacks to let the pcm logic handle the actual content of the
Daniel Mack94c27212012-04-12 13:51:15 +020044 * packets for playback and record. Thus, the bus streaming and the audio
45 * handlers are fully decoupled.
46 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020047 * There are two different types of endpoints in audio applications.
Daniel Mack94c27212012-04-12 13:51:15 +020048 *
49 * SND_USB_ENDPOINT_TYPE_DATA handles full audio data payload for both
50 * inbound and outbound traffic.
51 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020052 * SND_USB_ENDPOINT_TYPE_SYNC endpoints are for inbound traffic only and
53 * expect the payload to carry Q10.14 / Q16.16 formatted sync information
54 * (3 or 4 bytes).
Daniel Mack94c27212012-04-12 13:51:15 +020055 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020056 * Each endpoint has to be configured prior to being used by calling
57 * snd_usb_endpoint_set_params().
Daniel Mack94c27212012-04-12 13:51:15 +020058 *
59 * The model incorporates a reference counting, so that multiple users
60 * can call snd_usb_endpoint_start() and snd_usb_endpoint_stop(), and
61 * only the first user will effectively start the URBs, and only the last
Daniel Mack07a5e9d2012-04-24 19:31:24 +020062 * one to stop it will tear the URBs down again.
Daniel Mack94c27212012-04-12 13:51:15 +020063 */
64
65/*
Daniel Mackc731bc92011-09-14 12:46:57 +020066 * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
67 * this will overflow at approx 524 kHz
68 */
69static inline unsigned get_usb_full_speed_rate(unsigned int rate)
70{
71 return ((rate << 13) + 62) / 125;
72}
73
74/*
75 * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
76 * this will overflow at approx 4 MHz
77 */
78static inline unsigned get_usb_high_speed_rate(unsigned int rate)
79{
80 return ((rate << 10) + 62) / 125;
81}
82
83/*
Daniel Mackc731bc92011-09-14 12:46:57 +020084 * release a urb data
85 */
86static void release_urb_ctx(struct snd_urb_ctx *u)
87{
Daniel Mackd399ff92012-04-12 13:51:13 +020088 if (u->buffer_size)
89 usb_free_coherent(u->ep->chip->dev, u->buffer_size,
90 u->urb->transfer_buffer,
91 u->urb->transfer_dma);
92 usb_free_urb(u->urb);
93 u->urb = NULL;
Daniel Mackc731bc92011-09-14 12:46:57 +020094}
95
96static const char *usb_error_string(int err)
97{
98 switch (err) {
99 case -ENODEV:
100 return "no device";
101 case -ENOENT:
102 return "endpoint not enabled";
103 case -EPIPE:
104 return "endpoint stalled";
105 case -ENOSPC:
106 return "not enough bandwidth";
107 case -ESHUTDOWN:
108 return "device disabled";
109 case -EHOSTUNREACH:
110 return "device suspended";
111 case -EINVAL:
112 case -EAGAIN:
113 case -EFBIG:
114 case -EMSGSIZE:
115 return "internal error";
116 default:
117 return "unknown error";
118 }
119}
120
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100121static inline bool ep_state_running(struct snd_usb_endpoint *ep)
122{
123 return atomic_read(&ep->state) == EP_STATE_RUNNING;
124}
125
126static inline bool ep_state_update(struct snd_usb_endpoint *ep, int old, int new)
127{
128 return atomic_cmpxchg(&ep->state, old, new) == old;
129}
130
Daniel Mack94c27212012-04-12 13:51:15 +0200131/**
132 * snd_usb_endpoint_implicit_feedback_sink: Report endpoint usage type
133 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200134 * @ep: The snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200135 *
136 * Determine whether an endpoint is driven by an implicit feedback
137 * data endpoint source.
138 */
Eldad Zack98ae4722013-04-03 23:18:52 +0200139int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200140{
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100141 return ep->implicit_fb_sync && usb_pipeout(ep->pipe);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200142}
143
Daniel Mack94c27212012-04-12 13:51:15 +0200144/*
Takashi Iwai3d587602020-11-23 09:53:37 +0100145 * Return the number of samples to be sent in the next packet
146 * for streaming based on information derived from sync endpoints
Daniel Mack94c27212012-04-12 13:51:15 +0200147 *
Takashi Iwai3d587602020-11-23 09:53:37 +0100148 * This won't be used for implicit feedback which takes the packet size
149 * returned from the sync source
Daniel Mack94c27212012-04-12 13:51:15 +0200150 */
Takashi Iwaid215f632021-09-29 10:08:41 +0200151static int slave_next_packet_size(struct snd_usb_endpoint *ep,
152 unsigned int avail)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200153{
154 unsigned long flags;
Takashi Iwaid215f632021-09-29 10:08:41 +0200155 unsigned int phase;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200156 int ret;
157
158 if (ep->fill_max)
159 return ep->maxframesize;
160
161 spin_lock_irqsave(&ep->lock, flags);
Takashi Iwaid215f632021-09-29 10:08:41 +0200162 phase = (ep->phase & 0xffff) + (ep->freqm << ep->datainterval);
163 ret = min(phase >> 16, ep->maxframesize);
164 if (avail && ret >= avail)
165 ret = -EAGAIN;
166 else
167 ep->phase = phase;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200168 spin_unlock_irqrestore(&ep->lock, flags);
169
170 return ret;
171}
172
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300173/*
Takashi Iwai3d587602020-11-23 09:53:37 +0100174 * Return the number of samples to be sent in the next packet
175 * for adaptive and synchronous endpoints
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300176 */
Takashi Iwaid215f632021-09-29 10:08:41 +0200177static int next_packet_size(struct snd_usb_endpoint *ep, unsigned int avail)
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300178{
Takashi Iwaid215f632021-09-29 10:08:41 +0200179 unsigned int sample_accum;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300180 int ret;
181
182 if (ep->fill_max)
183 return ep->maxframesize;
184
Takashi Iwaid215f632021-09-29 10:08:41 +0200185 sample_accum += ep->sample_rem;
186 if (sample_accum >= ep->pps) {
187 sample_accum -= ep->pps;
Alexander Tsoyb9fd2002020-06-29 05:59:34 +0300188 ret = ep->packsize[1];
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300189 } else {
Alexander Tsoyb9fd2002020-06-29 05:59:34 +0300190 ret = ep->packsize[0];
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300191 }
Takashi Iwaid215f632021-09-29 10:08:41 +0200192 if (avail && ret >= avail)
193 ret = -EAGAIN;
194 else
195 ep->sample_accum = sample_accum;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300196
197 return ret;
198}
199
Takashi Iwai3d587602020-11-23 09:53:37 +0100200/*
201 * snd_usb_endpoint_next_packet_size: Return the number of samples to be sent
202 * in the next packet
Takashi Iwaid215f632021-09-29 10:08:41 +0200203 *
204 * If the size is equal or exceeds @avail, don't proceed but return -EAGAIN
205 * Exception: @avail = 0 for skipping the check.
Takashi Iwai3d587602020-11-23 09:53:37 +0100206 */
207int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
Takashi Iwaid215f632021-09-29 10:08:41 +0200208 struct snd_urb_ctx *ctx, int idx,
209 unsigned int avail)
Takashi Iwai3d587602020-11-23 09:53:37 +0100210{
Takashi Iwaid215f632021-09-29 10:08:41 +0200211 unsigned int packet;
212
213 packet = ctx->packet_size[idx];
214 if (packet) {
215 if (avail && packet >= avail)
216 return -EAGAIN;
217 return packet;
218 }
219
220 if (ep->sync_source)
221 return slave_next_packet_size(ep, avail);
Takashi Iwai3d587602020-11-23 09:53:37 +0100222 else
Takashi Iwaid215f632021-09-29 10:08:41 +0200223 return next_packet_size(ep, avail);
Takashi Iwai3d587602020-11-23 09:53:37 +0100224}
225
Takashi Iwai96e221f2020-11-23 09:53:28 +0100226static void call_retire_callback(struct snd_usb_endpoint *ep,
227 struct urb *urb)
228{
229 struct snd_usb_substream *data_subs;
230
231 data_subs = READ_ONCE(ep->data_subs);
232 if (data_subs && ep->retire_data_urb)
233 ep->retire_data_urb(data_subs, urb);
234}
235
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200236static void retire_outbound_urb(struct snd_usb_endpoint *ep,
237 struct snd_urb_ctx *urb_ctx)
238{
Takashi Iwai96e221f2020-11-23 09:53:28 +0100239 call_retire_callback(ep, urb_ctx->urb);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200240}
241
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100242static void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
243 struct snd_usb_endpoint *sender,
244 const struct urb *urb);
245
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200246static void retire_inbound_urb(struct snd_usb_endpoint *ep,
247 struct snd_urb_ctx *urb_ctx)
248{
249 struct urb *urb = urb_ctx->urb;
Takashi Iwai53837b42020-11-23 09:53:39 +0100250 struct snd_usb_endpoint *sync_sink;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200251
Daniel Mack2b58fd52012-09-04 10:23:07 +0200252 if (unlikely(ep->skip_packets > 0)) {
253 ep->skip_packets--;
254 return;
255 }
256
Takashi Iwai53837b42020-11-23 09:53:39 +0100257 sync_sink = READ_ONCE(ep->sync_sink);
258 if (sync_sink)
259 snd_usb_handle_sync_urb(sync_sink, ep, urb);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200260
Takashi Iwai96e221f2020-11-23 09:53:28 +0100261 call_retire_callback(ep, urb);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200262}
263
Takashi Iwaic1b034a2021-07-29 09:38:50 +0200264static inline bool has_tx_length_quirk(struct snd_usb_audio *chip)
265{
266 return chip->quirk_flags & QUIRK_FLAG_TX_LENGTH;
267}
268
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200269static void prepare_silent_urb(struct snd_usb_endpoint *ep,
270 struct snd_urb_ctx *ctx)
271{
272 struct urb *urb = ctx->urb;
273 unsigned int offs = 0;
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200274 unsigned int extra = 0;
275 __le32 packet_length;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200276 int i;
277
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200278 /* For tx_length_quirk, put packet length at start of packet */
Takashi Iwaic1b034a2021-07-29 09:38:50 +0200279 if (has_tx_length_quirk(ep->chip))
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200280 extra = sizeof(packet_length);
281
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200282 for (i = 0; i < ctx->packets; ++i) {
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200283 unsigned int offset;
284 unsigned int length;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200285 int counts;
286
Takashi Iwaid215f632021-09-29 10:08:41 +0200287 counts = snd_usb_endpoint_next_packet_size(ep, ctx, i, 0);
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200288 length = counts * ep->stride; /* number of silent bytes */
289 offset = offs * ep->stride + extra * i;
290 urb->iso_frame_desc[i].offset = offset;
291 urb->iso_frame_desc[i].length = length + extra;
292 if (extra) {
293 packet_length = cpu_to_le32(length);
294 memcpy(urb->transfer_buffer + offset,
295 &packet_length, sizeof(packet_length));
296 }
297 memset(urb->transfer_buffer + offset + extra,
298 ep->silence_value, length);
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200299 offs += counts;
300 }
301
302 urb->number_of_packets = ctx->packets;
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200303 urb->transfer_buffer_length = offs * ep->stride + ctx->packets * extra;
Takashi Iwaie8a8f092021-06-01 18:24:55 +0200304 ctx->queued = 0;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200305}
306
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200307/*
308 * Prepare a PLAYBACK urb for submission to the bus.
309 */
310static void prepare_outbound_urb(struct snd_usb_endpoint *ep,
311 struct snd_urb_ctx *ctx)
312{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200313 struct urb *urb = ctx->urb;
314 unsigned char *cp = urb->transfer_buffer;
Takashi Iwai96e221f2020-11-23 09:53:28 +0100315 struct snd_usb_substream *data_subs;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200316
317 urb->dev = ep->chip->dev; /* we need to set this at each time */
318
319 switch (ep->type) {
320 case SND_USB_ENDPOINT_TYPE_DATA:
Takashi Iwai96e221f2020-11-23 09:53:28 +0100321 data_subs = READ_ONCE(ep->data_subs);
322 if (data_subs && ep->prepare_data_urb)
323 ep->prepare_data_urb(data_subs, urb);
324 else /* no data provider, so send silence */
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200325 prepare_silent_urb(ep, ctx);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200326 break;
327
328 case SND_USB_ENDPOINT_TYPE_SYNC:
329 if (snd_usb_get_speed(ep->chip->dev) >= USB_SPEED_HIGH) {
330 /*
331 * fill the length and offset of each urb descriptor.
332 * the fixed 12.13 frequency is passed as 16.16 through the pipe.
333 */
334 urb->iso_frame_desc[0].length = 4;
335 urb->iso_frame_desc[0].offset = 0;
336 cp[0] = ep->freqn;
337 cp[1] = ep->freqn >> 8;
338 cp[2] = ep->freqn >> 16;
339 cp[3] = ep->freqn >> 24;
340 } else {
341 /*
342 * fill the length and offset of each urb descriptor.
343 * the fixed 10.14 frequency is passed through the pipe.
344 */
345 urb->iso_frame_desc[0].length = 3;
346 urb->iso_frame_desc[0].offset = 0;
347 cp[0] = ep->freqn >> 2;
348 cp[1] = ep->freqn >> 10;
349 cp[2] = ep->freqn >> 18;
350 }
351
352 break;
353 }
354}
355
356/*
357 * Prepare a CAPTURE or SYNC urb for submission to the bus.
358 */
359static inline void prepare_inbound_urb(struct snd_usb_endpoint *ep,
360 struct snd_urb_ctx *urb_ctx)
361{
362 int i, offs;
363 struct urb *urb = urb_ctx->urb;
364
365 urb->dev = ep->chip->dev; /* we need to set this at each time */
366
367 switch (ep->type) {
368 case SND_USB_ENDPOINT_TYPE_DATA:
369 offs = 0;
370 for (i = 0; i < urb_ctx->packets; i++) {
371 urb->iso_frame_desc[i].offset = offs;
372 urb->iso_frame_desc[i].length = ep->curpacksize;
373 offs += ep->curpacksize;
374 }
375
376 urb->transfer_buffer_length = offs;
377 urb->number_of_packets = urb_ctx->packets;
378 break;
379
380 case SND_USB_ENDPOINT_TYPE_SYNC:
381 urb->iso_frame_desc[0].length = min(4u, ep->syncmaxsize);
382 urb->iso_frame_desc[0].offset = 0;
383 break;
384 }
385}
386
Takashi Iwaic15871e2020-11-23 09:53:32 +0100387/* notify an error as XRUN to the assigned PCM data substream */
388static void notify_xrun(struct snd_usb_endpoint *ep)
389{
390 struct snd_usb_substream *data_subs;
391
392 data_subs = READ_ONCE(ep->data_subs);
393 if (data_subs && data_subs->pcm_substream)
394 snd_pcm_stop_xrun(data_subs->pcm_substream);
395}
396
397static struct snd_usb_packet_info *
398next_packet_fifo_enqueue(struct snd_usb_endpoint *ep)
399{
400 struct snd_usb_packet_info *p;
401
402 p = ep->next_packet + (ep->next_packet_head + ep->next_packet_queued) %
403 ARRAY_SIZE(ep->next_packet);
404 ep->next_packet_queued++;
405 return p;
406}
407
408static struct snd_usb_packet_info *
409next_packet_fifo_dequeue(struct snd_usb_endpoint *ep)
410{
411 struct snd_usb_packet_info *p;
412
413 p = ep->next_packet + ep->next_packet_head;
414 ep->next_packet_head++;
415 ep->next_packet_head %= ARRAY_SIZE(ep->next_packet);
416 ep->next_packet_queued--;
417 return p;
418}
419
Daniel Mack94c27212012-04-12 13:51:15 +0200420/*
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200421 * Send output urbs that have been prepared previously. URBs are dequeued
Randy Dunlap0569b3d2020-10-05 12:12:44 -0700422 * from ep->ready_playback_urbs and in case there aren't any available
Daniel Mack94c27212012-04-12 13:51:15 +0200423 * or there are no packets that have been prepared, this function does
424 * nothing.
425 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200426 * The reason why the functionality of sending and preparing URBs is separated
427 * is that host controllers don't guarantee the order in which they return
428 * inbound and outbound packets to their submitters.
Daniel Mack94c27212012-04-12 13:51:15 +0200429 *
430 * This function is only used for implicit feedback endpoints. For endpoints
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200431 * driven by dedicated sync endpoints, URBs are immediately re-submitted
432 * from their completion handler.
Daniel Mack94c27212012-04-12 13:51:15 +0200433 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200434static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
435{
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100436 while (ep_state_running(ep)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200437
438 unsigned long flags;
Kees Cook3f649ab2020-06-03 13:09:38 -0700439 struct snd_usb_packet_info *packet;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200440 struct snd_urb_ctx *ctx = NULL;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200441 int err, i;
442
443 spin_lock_irqsave(&ep->lock, flags);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100444 if (ep->next_packet_queued > 0 &&
445 !list_empty(&ep->ready_playback_urbs)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200446 /* take URB out of FIFO */
Takashi Iwaic15871e2020-11-23 09:53:32 +0100447 ctx = list_first_entry(&ep->ready_playback_urbs,
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200448 struct snd_urb_ctx, ready_list);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100449 list_del_init(&ctx->ready_list);
450
451 packet = next_packet_fifo_dequeue(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200452 }
453 spin_unlock_irqrestore(&ep->lock, flags);
454
455 if (ctx == NULL)
456 return;
457
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200458 /* copy over the length information */
459 for (i = 0; i < packet->packets; i++)
460 ctx->packet_size[i] = packet->packet_size[i];
461
Daniel Mack94c27212012-04-12 13:51:15 +0200462 /* call the data handler to fill in playback data */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200463 prepare_outbound_urb(ep, ctx);
464
465 err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100466 if (err < 0) {
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100467 usb_audio_err(ep->chip,
Takashi Iwaie93e8902020-11-23 09:53:13 +0100468 "Unable to submit urb #%d: %d at %s\n",
469 ctx->index, err, __func__);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100470 notify_xrun(ep);
471 return;
472 }
473
474 set_bit(ctx->index, &ep->active_mask);
Takashi Iwai86a42ad2021-09-29 10:08:37 +0200475 atomic_inc(&ep->submitted_urbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200476 }
477}
478
479/*
480 * complete callback for urbs
481 */
482static void snd_complete_urb(struct urb *urb)
483{
484 struct snd_urb_ctx *ctx = urb->context;
485 struct snd_usb_endpoint *ep = ctx->ep;
Takashi Iwai67e22502014-11-06 13:04:49 +0100486 unsigned long flags;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200487 int err;
488
489 if (unlikely(urb->status == -ENOENT || /* unlinked */
490 urb->status == -ENODEV || /* device removed */
491 urb->status == -ECONNRESET || /* unlinked */
Takashi Iwai47ab1542015-08-25 16:09:00 +0200492 urb->status == -ESHUTDOWN)) /* device disabled */
493 goto exit_clear;
494 /* device disconnected */
495 if (unlikely(atomic_read(&ep->chip->shutdown)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200496 goto exit_clear;
497
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100498 if (unlikely(!ep_state_running(ep)))
Ioan-Adrian Ratiu13a6c832017-01-05 00:37:47 +0200499 goto exit_clear;
500
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200501 if (usb_pipeout(ep->pipe)) {
502 retire_outbound_urb(ep, ctx);
503 /* can be stopped during retire callback */
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100504 if (unlikely(!ep_state_running(ep)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200505 goto exit_clear;
506
Eldad Zack98ae4722013-04-03 23:18:52 +0200507 if (snd_usb_endpoint_implicit_feedback_sink(ep)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200508 spin_lock_irqsave(&ep->lock, flags);
509 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100510 clear_bit(ctx->index, &ep->active_mask);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200511 spin_unlock_irqrestore(&ep->lock, flags);
512 queue_pending_output_urbs(ep);
Takashi Iwai86a42ad2021-09-29 10:08:37 +0200513 atomic_dec(&ep->submitted_urbs); /* decrement at last */
Takashi Iwaic15871e2020-11-23 09:53:32 +0100514 return;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200515 }
516
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200517 prepare_outbound_urb(ep, ctx);
Henry Lin52869932019-11-13 10:14:19 +0800518 /* can be stopped during prepare callback */
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100519 if (unlikely(!ep_state_running(ep)))
Henry Lin52869932019-11-13 10:14:19 +0800520 goto exit_clear;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200521 } else {
522 retire_inbound_urb(ep, ctx);
523 /* can be stopped during retire callback */
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100524 if (unlikely(!ep_state_running(ep)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200525 goto exit_clear;
526
527 prepare_inbound_urb(ep, ctx);
528 }
529
530 err = usb_submit_urb(urb, GFP_ATOMIC);
531 if (err == 0)
532 return;
533
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100534 usb_audio_err(ep->chip, "cannot submit urb (err = %d)\n", err);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100535 notify_xrun(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200536
537exit_clear:
538 clear_bit(ctx->index, &ep->active_mask);
Takashi Iwai86a42ad2021-09-29 10:08:37 +0200539 atomic_dec(&ep->submitted_urbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200540}
541
Takashi Iwaic7474d02020-11-23 09:53:11 +0100542/*
Takashi Iwai00272c62021-01-08 08:52:17 +0100543 * Find or create a refcount object for the given interface
544 *
545 * The objects are released altogether in snd_usb_endpoint_free_all()
546 */
547static struct snd_usb_iface_ref *
548iface_ref_find(struct snd_usb_audio *chip, int iface)
549{
550 struct snd_usb_iface_ref *ip;
551
552 list_for_each_entry(ip, &chip->iface_ref_list, list)
553 if (ip->iface == iface)
554 return ip;
555
556 ip = kzalloc(sizeof(*ip), GFP_KERNEL);
557 if (!ip)
558 return NULL;
559 ip->iface = iface;
560 list_add_tail(&ip->list, &chip->iface_ref_list);
561 return ip;
562}
563
564/*
Takashi Iwai54cb3192020-11-23 09:53:20 +0100565 * Get the existing endpoint object corresponding EP
Takashi Iwaic7474d02020-11-23 09:53:11 +0100566 * Returns NULL if not present.
Takashi Iwaic7474d02020-11-23 09:53:11 +0100567 */
568struct snd_usb_endpoint *
Takashi Iwai54cb3192020-11-23 09:53:20 +0100569snd_usb_get_endpoint(struct snd_usb_audio *chip, int ep_num)
Takashi Iwaic7474d02020-11-23 09:53:11 +0100570{
571 struct snd_usb_endpoint *ep;
572
573 list_for_each_entry(ep, &chip->ep_list, list) {
Takashi Iwai54cb3192020-11-23 09:53:20 +0100574 if (ep->ep_num == ep_num)
Takashi Iwaic7474d02020-11-23 09:53:11 +0100575 return ep;
576 }
Takashi Iwai54cb3192020-11-23 09:53:20 +0100577
Takashi Iwaic7474d02020-11-23 09:53:11 +0100578 return NULL;
579}
580
Takashi Iwai5a6c3e12020-11-23 09:53:16 +0100581#define ep_type_name(type) \
582 (type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync")
583
Daniel Mack94c27212012-04-12 13:51:15 +0200584/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200585 * snd_usb_add_endpoint: Add an endpoint to an USB audio chip
Daniel Mack94c27212012-04-12 13:51:15 +0200586 *
587 * @chip: The chip
Daniel Mack94c27212012-04-12 13:51:15 +0200588 * @ep_num: The number of the endpoint to use
Daniel Mack94c27212012-04-12 13:51:15 +0200589 * @type: SND_USB_ENDPOINT_TYPE_DATA or SND_USB_ENDPOINT_TYPE_SYNC
590 *
591 * If the requested endpoint has not been added to the given chip before,
Takashi Iwai54cb3192020-11-23 09:53:20 +0100592 * a new instance is created.
593 *
594 * Returns zero on success or a negative error code.
Daniel Mack94c27212012-04-12 13:51:15 +0200595 *
Takashi Iwai00272c62021-01-08 08:52:17 +0100596 * New endpoints will be added to chip->ep_list and freed by
597 * calling snd_usb_endpoint_free_all().
Takashi Iwai447d6272016-03-15 15:20:58 +0100598 *
599 * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
600 * bNumEndpoints > 1 beforehand.
Daniel Mack94c27212012-04-12 13:51:15 +0200601 */
Takashi Iwai54cb3192020-11-23 09:53:20 +0100602int snd_usb_add_endpoint(struct snd_usb_audio *chip, int ep_num, int type)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200603{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200604 struct snd_usb_endpoint *ep;
Takashi Iwai54cb3192020-11-23 09:53:20 +0100605 bool is_playback;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200606
Takashi Iwai54cb3192020-11-23 09:53:20 +0100607 ep = snd_usb_get_endpoint(chip, ep_num);
608 if (ep)
609 return 0;
Eldad Zacke7e58df2013-08-03 10:51:15 +0200610
Takashi Iwai54cb3192020-11-23 09:53:20 +0100611 usb_audio_dbg(chip, "Creating new %s endpoint #%x\n",
Takashi Iwai5a6c3e12020-11-23 09:53:16 +0100612 ep_type_name(type),
613 ep_num);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200614 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
615 if (!ep)
Takashi Iwai54cb3192020-11-23 09:53:20 +0100616 return -ENOMEM;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200617
618 ep->chip = chip;
619 spin_lock_init(&ep->lock);
620 ep->type = type;
621 ep->ep_num = ep_num;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200622 INIT_LIST_HEAD(&ep->ready_playback_urbs);
Takashi Iwai86a42ad2021-09-29 10:08:37 +0200623 atomic_set(&ep->submitted_urbs, 0);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200624
Takashi Iwai54cb3192020-11-23 09:53:20 +0100625 is_playback = ((ep_num & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
626 ep_num &= USB_ENDPOINT_NUMBER_MASK;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200627 if (is_playback)
628 ep->pipe = usb_sndisocpipe(chip->dev, ep_num);
629 else
630 ep->pipe = usb_rcvisocpipe(chip->dev, ep_num);
631
Takashi Iwai54cb3192020-11-23 09:53:20 +0100632 list_add_tail(&ep->list, &chip->ep_list);
633 return 0;
634}
635
636/* Set up syncinterval and maxsyncsize for a sync EP */
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100637static void endpoint_set_syncinterval(struct snd_usb_audio *chip,
638 struct snd_usb_endpoint *ep)
Takashi Iwai54cb3192020-11-23 09:53:20 +0100639{
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100640 struct usb_host_interface *alts;
641 struct usb_endpoint_descriptor *desc;
Takashi Iwai54cb3192020-11-23 09:53:20 +0100642
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100643 alts = snd_usb_get_host_interface(chip, ep->iface, ep->altsetting);
644 if (!alts)
645 return;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200646
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100647 desc = get_endpoint(alts, ep->ep_idx);
648 if (desc->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
649 desc->bRefresh >= 1 && desc->bRefresh <= 9)
650 ep->syncinterval = desc->bRefresh;
651 else if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL)
652 ep->syncinterval = 1;
653 else if (desc->bInterval >= 1 && desc->bInterval <= 16)
654 ep->syncinterval = desc->bInterval - 1;
655 else
656 ep->syncinterval = 3;
657
658 ep->syncmaxsize = le16_to_cpu(desc->wMaxPacketSize);
659}
660
661static bool endpoint_compatible(struct snd_usb_endpoint *ep,
662 const struct audioformat *fp,
663 const struct snd_pcm_hw_params *params)
664{
665 if (!ep->opened)
666 return false;
667 if (ep->cur_audiofmt != fp)
668 return false;
669 if (ep->cur_rate != params_rate(params) ||
670 ep->cur_format != params_format(params) ||
671 ep->cur_period_frames != params_period_size(params) ||
672 ep->cur_buffer_periods != params_periods(params))
673 return false;
674 return true;
675}
676
677/*
gushengxianff630b62021-07-05 05:00:52 -0700678 * Check whether the given fp and hw params are compatible with the current
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100679 * setup of the target EP for implicit feedback sync
680 */
681bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
682 struct snd_usb_endpoint *ep,
683 const struct audioformat *fp,
684 const struct snd_pcm_hw_params *params)
685{
686 bool ret;
687
688 mutex_lock(&chip->mutex);
689 ret = endpoint_compatible(ep, fp, params);
690 mutex_unlock(&chip->mutex);
691 return ret;
692}
693
694/*
695 * snd_usb_endpoint_open: Open the endpoint
696 *
697 * Called from hw_params to assign the endpoint to the substream.
698 * It's reference-counted, and only the first opener is allowed to set up
699 * arbitrary parameters. The later opener must be compatible with the
700 * former opened parameters.
701 * The endpoint needs to be closed via snd_usb_endpoint_close() later.
702 *
703 * Note that this function doesn't configure the endpoint. The substream
704 * needs to set it up later via snd_usb_endpoint_configure().
705 */
706struct snd_usb_endpoint *
707snd_usb_endpoint_open(struct snd_usb_audio *chip,
Takashi Iwaicab941b2020-11-23 09:53:33 +0100708 const struct audioformat *fp,
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100709 const struct snd_pcm_hw_params *params,
710 bool is_sync_ep)
711{
712 struct snd_usb_endpoint *ep;
713 int ep_num = is_sync_ep ? fp->sync_ep : fp->endpoint;
714
715 mutex_lock(&chip->mutex);
716 ep = snd_usb_get_endpoint(chip, ep_num);
717 if (!ep) {
718 usb_audio_err(chip, "Cannot find EP 0x%x to open\n", ep_num);
719 goto unlock;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200720 }
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100721
722 if (!ep->opened) {
723 if (is_sync_ep) {
724 ep->iface = fp->sync_iface;
725 ep->altsetting = fp->sync_altsetting;
726 ep->ep_idx = fp->sync_ep_idx;
727 } else {
728 ep->iface = fp->iface;
729 ep->altsetting = fp->altsetting;
Takashi Iwaieae4d052021-01-08 08:52:18 +0100730 ep->ep_idx = fp->ep_idx;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100731 }
732 usb_audio_dbg(chip, "Open EP 0x%x, iface=%d:%d, idx=%d\n",
733 ep_num, ep->iface, ep->altsetting, ep->ep_idx);
734
Takashi Iwai00272c62021-01-08 08:52:17 +0100735 ep->iface_ref = iface_ref_find(chip, ep->iface);
736 if (!ep->iface_ref) {
737 ep = NULL;
738 goto unlock;
739 }
740
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100741 ep->cur_audiofmt = fp;
742 ep->cur_channels = fp->channels;
743 ep->cur_rate = params_rate(params);
744 ep->cur_format = params_format(params);
745 ep->cur_frame_bytes = snd_pcm_format_physical_width(ep->cur_format) *
746 ep->cur_channels / 8;
747 ep->cur_period_frames = params_period_size(params);
748 ep->cur_period_bytes = ep->cur_period_frames * ep->cur_frame_bytes;
749 ep->cur_buffer_periods = params_periods(params);
Takashi Iwai4e7cf1f2021-09-29 10:08:36 +0200750 ep->cur_clock = fp->clock;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100751
752 if (ep->type == SND_USB_ENDPOINT_TYPE_SYNC)
753 endpoint_set_syncinterval(chip, ep);
754
755 ep->implicit_fb_sync = fp->implicit_fb;
756 ep->need_setup = true;
757
758 usb_audio_dbg(chip, " channels=%d, rate=%d, format=%s, period_bytes=%d, periods=%d, implicit_fb=%d\n",
759 ep->cur_channels, ep->cur_rate,
760 snd_pcm_format_name(ep->cur_format),
761 ep->cur_period_bytes, ep->cur_buffer_periods,
762 ep->implicit_fb_sync);
763
764 } else {
Takashi Iwai00272c62021-01-08 08:52:17 +0100765 if (WARN_ON(!ep->iface_ref)) {
766 ep = NULL;
767 goto unlock;
768 }
769
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100770 if (!endpoint_compatible(ep, fp, params)) {
771 usb_audio_err(chip, "Incompatible EP setup for 0x%x\n",
772 ep_num);
773 ep = NULL;
774 goto unlock;
775 }
776
777 usb_audio_dbg(chip, "Reopened EP 0x%x (count %d)\n",
778 ep_num, ep->opened);
779 }
780
Takashi Iwai00272c62021-01-08 08:52:17 +0100781 if (!ep->iface_ref->opened++)
782 ep->iface_ref->need_setup = true;
783
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100784 ep->opened++;
785
786 unlock:
787 mutex_unlock(&chip->mutex);
788 return ep;
789}
790
791/*
792 * snd_usb_endpoint_set_sync: Link data and sync endpoints
793 *
794 * Pass NULL to sync_ep to unlink again
795 */
796void snd_usb_endpoint_set_sync(struct snd_usb_audio *chip,
797 struct snd_usb_endpoint *data_ep,
798 struct snd_usb_endpoint *sync_ep)
799{
Takashi Iwai53837b42020-11-23 09:53:39 +0100800 data_ep->sync_source = sync_ep;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200801}
802
803/*
Takashi Iwai96e221f2020-11-23 09:53:28 +0100804 * Set data endpoint callbacks and the assigned data stream
805 *
806 * Called at PCM trigger and cleanups.
807 * Pass NULL to deactivate each callback.
808 */
809void snd_usb_endpoint_set_callback(struct snd_usb_endpoint *ep,
810 void (*prepare)(struct snd_usb_substream *subs,
811 struct urb *urb),
812 void (*retire)(struct snd_usb_substream *subs,
813 struct urb *urb),
814 struct snd_usb_substream *data_subs)
815{
816 ep->prepare_data_urb = prepare;
817 ep->retire_data_urb = retire;
Takashi Iwai9c9a3b92021-09-29 10:08:38 +0200818 if (data_subs)
819 ep->lowlatency_playback = data_subs->lowlatency_playback;
820 else
821 ep->lowlatency_playback = false;
Takashi Iwai96e221f2020-11-23 09:53:28 +0100822 WRITE_ONCE(ep->data_subs, data_subs);
823}
824
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100825static int endpoint_set_interface(struct snd_usb_audio *chip,
826 struct snd_usb_endpoint *ep,
827 bool set)
828{
829 int altset = set ? ep->altsetting : 0;
830 int err;
831
832 usb_audio_dbg(chip, "Setting usb interface %d:%d for EP 0x%x\n",
833 ep->iface, altset, ep->ep_num);
834 err = usb_set_interface(chip->dev, ep->iface, altset);
835 if (err < 0) {
836 usb_audio_err(chip, "%d:%d: usb_set_interface failed (%d)\n",
837 ep->iface, altset, err);
838 return err;
839 }
840
Takashi Iwai1f074fe2021-07-29 09:38:55 +0200841 if (chip->quirk_flags & QUIRK_FLAG_IFACE_DELAY)
842 msleep(50);
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100843 return 0;
844}
845
846/*
847 * snd_usb_endpoint_close: Close the endpoint
848 *
849 * Unreference the already opened endpoint via snd_usb_endpoint_open().
850 */
851void snd_usb_endpoint_close(struct snd_usb_audio *chip,
852 struct snd_usb_endpoint *ep)
853{
854 mutex_lock(&chip->mutex);
855 usb_audio_dbg(chip, "Closing EP 0x%x (count %d)\n",
856 ep->ep_num, ep->opened);
Takashi Iwai00272c62021-01-08 08:52:17 +0100857
858 if (!--ep->iface_ref->opened)
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100859 endpoint_set_interface(chip, ep, false);
Takashi Iwai00272c62021-01-08 08:52:17 +0100860
861 if (!--ep->opened) {
Takashi Iwai89fa3f62020-11-23 09:53:40 +0100862 ep->iface = 0;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100863 ep->altsetting = 0;
864 ep->cur_audiofmt = NULL;
865 ep->cur_rate = 0;
Takashi Iwai4e7cf1f2021-09-29 10:08:36 +0200866 ep->cur_clock = 0;
Takashi Iwai00272c62021-01-08 08:52:17 +0100867 ep->iface_ref = NULL;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100868 usb_audio_dbg(chip, "EP 0x%x closed\n", ep->ep_num);
869 }
870 mutex_unlock(&chip->mutex);
871}
872
873/* Prepare for suspening EP, called from the main suspend handler */
874void snd_usb_endpoint_suspend(struct snd_usb_endpoint *ep)
875{
876 ep->need_setup = true;
Takashi Iwai00272c62021-01-08 08:52:17 +0100877 if (ep->iface_ref)
878 ep->iface_ref->need_setup = true;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100879}
880
Takashi Iwai96e221f2020-11-23 09:53:28 +0100881/*
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200882 * wait until all urbs are processed.
883 */
884static int wait_clear_urbs(struct snd_usb_endpoint *ep)
885{
886 unsigned long end_time = jiffies + msecs_to_jiffies(1000);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200887 int alive;
888
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100889 if (atomic_read(&ep->state) != EP_STATE_STOPPING)
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100890 return 0;
891
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200892 do {
Takashi Iwai86a42ad2021-09-29 10:08:37 +0200893 alive = atomic_read(&ep->submitted_urbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200894 if (!alive)
895 break;
896
897 schedule_timeout_uninterruptible(1);
898 } while (time_before(jiffies, end_time));
899
900 if (alive)
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100901 usb_audio_err(ep->chip,
902 "timeout: still %d active urbs on EP #%x\n",
903 alive, ep->ep_num);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200904
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100905 if (ep_state_update(ep, EP_STATE_STOPPING, EP_STATE_STOPPED)) {
906 ep->sync_sink = NULL;
907 snd_usb_endpoint_set_callback(ep, NULL, NULL, NULL);
908 }
Ioan-Adrian Ratiu1d0f9532017-01-05 00:37:46 +0200909
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200910 return 0;
911}
912
Takashi Iwaif58161b2012-11-08 08:52:45 +0100913/* sync the pending stop operation;
914 * this function itself doesn't trigger the stop operation
915 */
916void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep)
917{
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100918 if (ep)
Takashi Iwaif58161b2012-11-08 08:52:45 +0100919 wait_clear_urbs(ep);
920}
921
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200922/*
Takashi Iwaid6cda462021-02-06 21:30:50 +0100923 * Stop active urbs
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100924 *
Takashi Iwaid6cda462021-02-06 21:30:50 +0100925 * This function moves the EP to STOPPING state if it's being RUNNING.
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200926 */
Takashi Iwaid6cda462021-02-06 21:30:50 +0100927static int stop_urbs(struct snd_usb_endpoint *ep, bool force)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200928{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200929 unsigned int i;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200930
Takashi Iwaid6cda462021-02-06 21:30:50 +0100931 if (!force && atomic_read(&ep->running))
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100932 return -EBUSY;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200933
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100934 if (!ep_state_update(ep, EP_STATE_RUNNING, EP_STATE_STOPPING))
Takashi Iwaid6cda462021-02-06 21:30:50 +0100935 return 0;
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100936
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200937 INIT_LIST_HEAD(&ep->ready_playback_urbs);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100938 ep->next_packet_head = 0;
939 ep->next_packet_queued = 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200940
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200941 for (i = 0; i < ep->nurbs; i++) {
942 if (test_bit(i, &ep->active_mask)) {
943 if (!test_and_set_bit(i, &ep->unlink_mask)) {
944 struct urb *u = ep->urb[i].urb;
Takashi Iwaiccc16962012-11-21 08:22:52 +0100945 usb_unlink_urb(u);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200946 }
947 }
948 }
949
950 return 0;
951}
952
953/*
954 * release an endpoint's urbs
955 */
Takashi Iwaid6cda462021-02-06 21:30:50 +0100956static int release_urbs(struct snd_usb_endpoint *ep, bool force)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200957{
Takashi Iwaid6cda462021-02-06 21:30:50 +0100958 int i, err;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200959
960 /* route incoming urbs to nirvana */
Takashi Iwai96e221f2020-11-23 09:53:28 +0100961 snd_usb_endpoint_set_callback(ep, NULL, NULL, NULL);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200962
Takashi Iwaid6cda462021-02-06 21:30:50 +0100963 /* stop and unlink urbs */
964 err = stop_urbs(ep, force);
965 if (err)
966 return err;
967
968 wait_clear_urbs(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200969
970 for (i = 0; i < ep->nurbs; i++)
971 release_urb_ctx(&ep->urb[i]);
972
Xu Wang2e5a8e12020-07-27 02:52:08 +0000973 usb_free_coherent(ep->chip->dev, SYNC_URBS * 4,
974 ep->syncbuf, ep->sync_dma);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200975
976 ep->syncbuf = NULL;
977 ep->nurbs = 0;
Takashi Iwaid6cda462021-02-06 21:30:50 +0100978 return 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200979}
980
Daniel Mack94c27212012-04-12 13:51:15 +0200981/*
982 * configure a data endpoint
983 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100984static int data_ep_set_params(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200985{
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100986 struct snd_usb_audio *chip = ep->chip;
Alan Stern976b6c02013-09-24 15:51:58 -0400987 unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb;
988 unsigned int max_packs_per_period, urbs_per_period, urb_packs;
989 unsigned int max_urbs, i;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100990 const struct audioformat *fmt = ep->cur_audiofmt;
991 int frame_bits = ep->cur_frame_bytes * 8;
Takashi Iwaic1b034a2021-07-29 09:38:50 +0200992 int tx_length_quirk = (has_tx_length_quirk(chip) &&
Ricard Wanderlof759c90f2015-10-19 08:52:54 +0200993 usb_pipeout(ep->pipe));
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200994
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100995 usb_audio_dbg(chip, "Setting params for data EP 0x%x, pipe 0x%x\n",
996 ep->ep_num, ep->pipe);
997
998 if (ep->cur_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) {
Daniel Mackd24f5062013-04-17 00:01:38 +0800999 /*
1000 * When operating in DSD DOP mode, the size of a sample frame
1001 * in hardware differs from the actual physical format width
1002 * because we need to make room for the DOP markers.
1003 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001004 frame_bits += ep->cur_channels << 3;
Daniel Mackd24f5062013-04-17 00:01:38 +08001005 }
1006
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001007 ep->datainterval = fmt->datainterval;
1008 ep->stride = frame_bits >> 3;
Nobutaka Okabe01200732016-12-13 02:52:58 +09001009
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001010 switch (ep->cur_format) {
Nobutaka Okabe01200732016-12-13 02:52:58 +09001011 case SNDRV_PCM_FORMAT_U8:
1012 ep->silence_value = 0x80;
1013 break;
1014 case SNDRV_PCM_FORMAT_DSD_U8:
1015 case SNDRV_PCM_FORMAT_DSD_U16_LE:
1016 case SNDRV_PCM_FORMAT_DSD_U32_LE:
1017 case SNDRV_PCM_FORMAT_DSD_U16_BE:
1018 case SNDRV_PCM_FORMAT_DSD_U32_BE:
1019 ep->silence_value = 0x69;
1020 break;
1021 default:
1022 ep->silence_value = 0;
1023 }
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001024
Andreas Papefd1a5052016-12-06 14:46:14 +09001025 /* assume max. frequency is 50% higher than nominal */
1026 ep->freqmax = ep->freqn + (ep->freqn >> 1);
Ricard Wanderlofab309652015-10-11 20:54:51 +02001027 /* Round up freqmax to nearest integer in order to calculate maximum
1028 * packet size, which must represent a whole number of frames.
1029 * This is accomplished by adding 0x0.ffff before converting the
1030 * Q16.16 format into integer.
1031 * In order to accurately calculate the maximum packet size when
1032 * the data interval is more than 1 (i.e. ep->datainterval > 0),
1033 * multiply by the data interval prior to rounding. For instance,
1034 * a freqmax of 41 kHz will result in a max packet size of 6 (5.125)
1035 * frames with a data interval of 1, but 11 (10.25) frames with a
1036 * data interval of 2.
1037 * (ep->freqmax << ep->datainterval overflows at 8.192 MHz for the
1038 * maximum datainterval value of 3, at USB full speed, higher for
1039 * USB high speed, noting that ep->freqmax is in units of
1040 * frames per packet in Q16.16 format.)
1041 */
1042 maxsize = (((ep->freqmax << ep->datainterval) + 0xffff) >> 16) *
1043 (frame_bits >> 3);
Ricard Wanderlof759c90f2015-10-19 08:52:54 +02001044 if (tx_length_quirk)
1045 maxsize += sizeof(__le32); /* Space for length descriptor */
Clemens Ladisch57e6dae2013-08-08 11:24:55 +02001046 /* but wMaxPacketSize might reduce this */
1047 if (ep->maxpacksize && ep->maxpacksize < maxsize) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001048 /* whatever fits into a max. size packet */
Ricard Wanderlof759c90f2015-10-19 08:52:54 +02001049 unsigned int data_maxsize = maxsize = ep->maxpacksize;
1050
1051 if (tx_length_quirk)
1052 /* Need to remove the length descriptor to calc freq */
1053 data_maxsize -= sizeof(__le32);
1054 ep->freqmax = (data_maxsize / (frame_bits >> 3))
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001055 << (16 - ep->datainterval);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001056 }
1057
1058 if (ep->fill_max)
1059 ep->curpacksize = ep->maxpacksize;
1060 else
1061 ep->curpacksize = maxsize;
1062
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001063 if (snd_usb_get_speed(chip->dev) != USB_SPEED_FULL) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001064 packs_per_ms = 8 >> ep->datainterval;
Alan Stern976b6c02013-09-24 15:51:58 -04001065 max_packs_per_urb = MAX_PACKS_HS;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001066 } else {
Alan Stern976b6c02013-09-24 15:51:58 -04001067 packs_per_ms = 1;
1068 max_packs_per_urb = MAX_PACKS;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001069 }
Takashi Iwai53837b42020-11-23 09:53:39 +01001070 if (ep->sync_source && !ep->implicit_fb_sync)
Alan Stern976b6c02013-09-24 15:51:58 -04001071 max_packs_per_urb = min(max_packs_per_urb,
Takashi Iwai53837b42020-11-23 09:53:39 +01001072 1U << ep->sync_source->syncinterval);
Alan Stern976b6c02013-09-24 15:51:58 -04001073 max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001074
Alan Stern976b6c02013-09-24 15:51:58 -04001075 /*
1076 * Capture endpoints need to use small URBs because there's no way
1077 * to tell in advance where the next period will end, and we don't
1078 * want the next URB to complete much after the period ends.
1079 *
1080 * Playback endpoints with implicit sync much use the same parameters
1081 * as their corresponding capture endpoint.
1082 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001083 if (usb_pipein(ep->pipe) || ep->implicit_fb_sync) {
Alan Stern976b6c02013-09-24 15:51:58 -04001084
Thomas Pugliesea93455e2013-11-26 13:58:15 -06001085 urb_packs = packs_per_ms;
1086 /*
1087 * Wireless devices can poll at a max rate of once per 4ms.
1088 * For dataintervals less than 5, increase the packet count to
1089 * allow the host controller to use bursting to fill in the
1090 * gaps.
1091 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001092 if (snd_usb_get_speed(chip->dev) == USB_SPEED_WIRELESS) {
Thomas Pugliesea93455e2013-11-26 13:58:15 -06001093 int interval = ep->datainterval;
1094 while (interval < 5) {
1095 urb_packs <<= 1;
1096 ++interval;
1097 }
1098 }
Alan Stern976b6c02013-09-24 15:51:58 -04001099 /* make capture URBs <= 1 ms and smaller than a period */
Thomas Pugliesea93455e2013-11-26 13:58:15 -06001100 urb_packs = min(max_packs_per_urb, urb_packs);
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001101 while (urb_packs > 1 && urb_packs * maxsize >= ep->cur_period_bytes)
Alan Stern976b6c02013-09-24 15:51:58 -04001102 urb_packs >>= 1;
1103 ep->nurbs = MAX_URBS;
1104
1105 /*
1106 * Playback endpoints without implicit sync are adjusted so that
1107 * a period fits as evenly as possible in the smallest number of
1108 * URBs. The total number of URBs is adjusted to the size of the
1109 * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits.
1110 */
1111 } else {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001112 /* determine how small a packet can be */
Alan Stern976b6c02013-09-24 15:51:58 -04001113 minsize = (ep->freqn >> (16 - ep->datainterval)) *
1114 (frame_bits >> 3);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001115 /* with sync from device, assume it can be 12% lower */
Takashi Iwai53837b42020-11-23 09:53:39 +01001116 if (ep->sync_source)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001117 minsize -= minsize >> 3;
1118 minsize = max(minsize, 1u);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001119
Alan Stern976b6c02013-09-24 15:51:58 -04001120 /* how many packets will contain an entire ALSA period? */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001121 max_packs_per_period = DIV_ROUND_UP(ep->cur_period_bytes, minsize);
Alan Stern976b6c02013-09-24 15:51:58 -04001122
1123 /* how many URBs will contain a period? */
1124 urbs_per_period = DIV_ROUND_UP(max_packs_per_period,
1125 max_packs_per_urb);
1126 /* how many packets are needed in each URB? */
1127 urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period);
1128
1129 /* limit the number of frames in a single URB */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001130 ep->max_urb_frames = DIV_ROUND_UP(ep->cur_period_frames,
1131 urbs_per_period);
Alan Stern976b6c02013-09-24 15:51:58 -04001132
1133 /* try to use enough URBs to contain an entire ALSA buffer */
1134 max_urbs = min((unsigned) MAX_URBS,
1135 MAX_QUEUE * packs_per_ms / urb_packs);
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001136 ep->nurbs = min(max_urbs, urbs_per_period * ep->cur_buffer_periods);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001137 }
1138
1139 /* allocate and initialize data urbs */
1140 for (i = 0; i < ep->nurbs; i++) {
1141 struct snd_urb_ctx *u = &ep->urb[i];
1142 u->index = i;
1143 u->ep = ep;
Alan Stern976b6c02013-09-24 15:51:58 -04001144 u->packets = urb_packs;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001145 u->buffer_size = maxsize * u->packets;
1146
1147 if (fmt->fmt_type == UAC_FORMAT_TYPE_II)
1148 u->packets++; /* for transfer delimiter */
1149 u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
1150 if (!u->urb)
1151 goto out_of_memory;
1152
1153 u->urb->transfer_buffer =
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001154 usb_alloc_coherent(chip->dev, u->buffer_size,
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001155 GFP_KERNEL, &u->urb->transfer_dma);
1156 if (!u->urb->transfer_buffer)
1157 goto out_of_memory;
1158 u->urb->pipe = ep->pipe;
Clemens Ladischc75c5ab2013-04-27 12:10:32 +02001159 u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001160 u->urb->interval = 1 << ep->datainterval;
1161 u->urb->context = u;
1162 u->urb->complete = snd_complete_urb;
1163 INIT_LIST_HEAD(&u->ready_list);
1164 }
1165
Takashi Iwai4267c5a2021-08-27 22:33:11 +02001166 /* total buffer bytes of all URBs plus the next queue;
1167 * referred in pcm.c
1168 */
1169 ep->nominal_queue_size = maxsize * urb_packs * (ep->nurbs + 1);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001170 return 0;
1171
1172out_of_memory:
Takashi Iwaid6cda462021-02-06 21:30:50 +01001173 release_urbs(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001174 return -ENOMEM;
1175}
1176
Daniel Mack94c27212012-04-12 13:51:15 +02001177/*
1178 * configure a sync endpoint
1179 */
Eldad Zack93721032013-10-06 22:31:06 +02001180static int sync_ep_set_params(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001181{
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001182 struct snd_usb_audio *chip = ep->chip;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001183 int i;
1184
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001185 usb_audio_dbg(chip, "Setting params for sync EP 0x%x, pipe 0x%x\n",
1186 ep->ep_num, ep->pipe);
1187
1188 ep->syncbuf = usb_alloc_coherent(chip->dev, SYNC_URBS * 4,
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001189 GFP_KERNEL, &ep->sync_dma);
1190 if (!ep->syncbuf)
1191 return -ENOMEM;
1192
1193 for (i = 0; i < SYNC_URBS; i++) {
1194 struct snd_urb_ctx *u = &ep->urb[i];
1195 u->index = i;
1196 u->ep = ep;
1197 u->packets = 1;
1198 u->urb = usb_alloc_urb(1, GFP_KERNEL);
1199 if (!u->urb)
1200 goto out_of_memory;
1201 u->urb->transfer_buffer = ep->syncbuf + i * 4;
1202 u->urb->transfer_dma = ep->sync_dma + i * 4;
1203 u->urb->transfer_buffer_length = 4;
1204 u->urb->pipe = ep->pipe;
Clemens Ladischc75c5ab2013-04-27 12:10:32 +02001205 u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001206 u->urb->number_of_packets = 1;
1207 u->urb->interval = 1 << ep->syncinterval;
1208 u->urb->context = u;
1209 u->urb->complete = snd_complete_urb;
1210 }
1211
1212 ep->nurbs = SYNC_URBS;
1213
1214 return 0;
1215
1216out_of_memory:
Takashi Iwaid6cda462021-02-06 21:30:50 +01001217 release_urbs(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001218 return -ENOMEM;
1219}
1220
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001221/*
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001222 * snd_usb_endpoint_set_params: configure an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +02001223 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001224 * Determine the number of URBs to be used on this endpoint.
Daniel Mack94c27212012-04-12 13:51:15 +02001225 * An endpoint must be configured before it can be started.
1226 * An endpoint that is already running can not be reconfigured.
1227 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001228static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
1229 struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001230{
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001231 const struct audioformat *fmt = ep->cur_audiofmt;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001232 int err;
1233
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001234 /* release old buffers, if any */
Takashi Iwaid6cda462021-02-06 21:30:50 +01001235 err = release_urbs(ep, false);
1236 if (err < 0)
1237 return err;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001238
1239 ep->datainterval = fmt->datainterval;
1240 ep->maxpacksize = fmt->maxpacksize;
Takashi Iwai85f71932012-04-13 12:41:54 +02001241 ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001242
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001243 if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL) {
1244 ep->freqn = get_usb_full_speed_rate(ep->cur_rate);
Alexander Tsoyb9fd2002020-06-29 05:59:34 +03001245 ep->pps = 1000 >> ep->datainterval;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001246 } else {
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001247 ep->freqn = get_usb_high_speed_rate(ep->cur_rate);
Alexander Tsoyb9fd2002020-06-29 05:59:34 +03001248 ep->pps = 8000 >> ep->datainterval;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001249 }
1250
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001251 ep->sample_rem = ep->cur_rate % ep->pps;
1252 ep->packsize[0] = ep->cur_rate / ep->pps;
1253 ep->packsize[1] = (ep->cur_rate + (ep->pps - 1)) / ep->pps;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001254
1255 /* calculate the frequency in 16.16 format */
1256 ep->freqm = ep->freqn;
1257 ep->freqshift = INT_MIN;
1258
1259 ep->phase = 0;
1260
1261 switch (ep->type) {
1262 case SND_USB_ENDPOINT_TYPE_DATA:
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001263 err = data_ep_set_params(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001264 break;
1265 case SND_USB_ENDPOINT_TYPE_SYNC:
Eldad Zack93721032013-10-06 22:31:06 +02001266 err = sync_ep_set_params(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001267 break;
1268 default:
1269 err = -EINVAL;
1270 }
1271
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001272 usb_audio_dbg(chip, "Set up %d URBS, ret=%d\n", ep->nurbs, err);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001273
Takashi Iwai5a6c3e12020-11-23 09:53:16 +01001274 if (err < 0)
1275 return err;
1276
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001277 /* some unit conversions in runtime */
1278 ep->maxframesize = ep->maxpacksize / ep->cur_frame_bytes;
1279 ep->curframesize = ep->curpacksize / ep->cur_frame_bytes;
Takashi Iwai5a6c3e12020-11-23 09:53:16 +01001280
1281 return 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001282}
1283
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001284/*
1285 * snd_usb_endpoint_configure: Configure the endpoint
1286 *
1287 * This function sets up the EP to be fully usable state.
1288 * It's called either from hw_params or prepare callback.
gushengxianff630b62021-07-05 05:00:52 -07001289 * The function checks need_setup flag, and performs nothing unless needed,
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001290 * so it's safe to call this multiple times.
1291 *
1292 * This returns zero if unchanged, 1 if the configuration has changed,
1293 * or a negative error code.
1294 */
1295int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
1296 struct snd_usb_endpoint *ep)
1297{
1298 bool iface_first;
1299 int err = 0;
1300
1301 mutex_lock(&chip->mutex);
Takashi Iwai00272c62021-01-08 08:52:17 +01001302 if (WARN_ON(!ep->iface_ref))
1303 goto unlock;
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001304 if (!ep->need_setup)
1305 goto unlock;
1306
Takashi Iwai00272c62021-01-08 08:52:17 +01001307 /* If the interface has been already set up, just set EP parameters */
1308 if (!ep->iface_ref->need_setup) {
Takashi Iwai3784d442021-01-18 08:58:15 +01001309 /* sample rate setup of UAC1 is per endpoint, and we need
1310 * to update at each EP configuration
1311 */
1312 if (ep->cur_audiofmt->protocol == UAC_VERSION_1) {
1313 err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt,
1314 ep->cur_rate);
1315 if (err < 0)
1316 goto unlock;
1317 }
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001318 err = snd_usb_endpoint_set_params(chip, ep);
1319 if (err < 0)
1320 goto unlock;
1321 goto done;
1322 }
1323
1324 /* Need to deselect altsetting at first */
1325 endpoint_set_interface(chip, ep, false);
1326
1327 /* Some UAC1 devices (e.g. Yamaha THR10) need the host interface
1328 * to be set up before parameter setups
1329 */
1330 iface_first = ep->cur_audiofmt->protocol == UAC_VERSION_1;
Takashi Iwai6e413402021-08-24 07:57:20 +02001331 /* Workaround for devices that require the interface setup at first like UAC1 */
1332 if (chip->quirk_flags & QUIRK_FLAG_SET_IFACE_FIRST)
Takashi Iwai7af5a142021-08-24 07:47:00 +02001333 iface_first = true;
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001334 if (iface_first) {
1335 err = endpoint_set_interface(chip, ep, true);
1336 if (err < 0)
1337 goto unlock;
1338 }
1339
1340 err = snd_usb_init_pitch(chip, ep->cur_audiofmt);
1341 if (err < 0)
1342 goto unlock;
1343
1344 err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt, ep->cur_rate);
1345 if (err < 0)
1346 goto unlock;
1347
1348 err = snd_usb_endpoint_set_params(chip, ep);
1349 if (err < 0)
1350 goto unlock;
1351
1352 err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt);
1353 if (err < 0)
1354 goto unlock;
1355
1356 /* for UAC2/3, enable the interface altset here at last */
1357 if (!iface_first) {
1358 err = endpoint_set_interface(chip, ep, true);
1359 if (err < 0)
1360 goto unlock;
1361 }
1362
Takashi Iwai00272c62021-01-08 08:52:17 +01001363 ep->iface_ref->need_setup = false;
1364
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001365 done:
1366 ep->need_setup = false;
1367 err = 1;
1368
1369unlock:
1370 mutex_unlock(&chip->mutex);
1371 return err;
1372}
1373
Takashi Iwai4e7cf1f2021-09-29 10:08:36 +02001374/* get the current rate set to the given clock by any endpoint */
1375int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock)
1376{
1377 struct snd_usb_endpoint *ep;
1378 int rate = 0;
1379
1380 if (!clock)
1381 return 0;
1382 mutex_lock(&chip->mutex);
1383 list_for_each_entry(ep, &chip->ep_list, list) {
1384 if (ep->cur_clock == clock && ep->cur_rate) {
1385 rate = ep->cur_rate;
1386 break;
1387 }
1388 }
1389 mutex_unlock(&chip->mutex);
1390 return rate;
1391}
1392
Daniel Mack94c27212012-04-12 13:51:15 +02001393/**
1394 * snd_usb_endpoint_start: start an snd_usb_endpoint
1395 *
Ioan-Adrian Ratiu1d0f9532017-01-05 00:37:46 +02001396 * @ep: the endpoint to start
Daniel Mack94c27212012-04-12 13:51:15 +02001397 *
Takashi Iwai43b81e82020-11-23 09:53:34 +01001398 * A call to this function will increment the running count of the endpoint.
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001399 * In case it is not already running, the URBs for this endpoint will be
Daniel Mack94c27212012-04-12 13:51:15 +02001400 * submitted. Otherwise, this function does nothing.
1401 *
1402 * Must be balanced to calls of snd_usb_endpoint_stop().
1403 *
1404 * Returns an error if the URB submission failed, 0 in all other cases.
1405 */
Ioan-Adrian Ratiu1d0f9532017-01-05 00:37:46 +02001406int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001407{
1408 int err;
1409 unsigned int i;
1410
Takashi Iwai47ab1542015-08-25 16:09:00 +02001411 if (atomic_read(&ep->chip->shutdown))
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001412 return -EBADFD;
1413
Takashi Iwai53837b42020-11-23 09:53:39 +01001414 if (ep->sync_source)
1415 WRITE_ONCE(ep->sync_source->sync_sink, ep);
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001416
Takashi Iwai43b81e82020-11-23 09:53:34 +01001417 usb_audio_dbg(ep->chip, "Starting %s EP 0x%x (running %d)\n",
1418 ep_type_name(ep->type), ep->ep_num,
1419 atomic_read(&ep->running));
Takashi Iwai57234bc2020-11-23 09:53:27 +01001420
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001421 /* already running? */
Takashi Iwai43b81e82020-11-23 09:53:34 +01001422 if (atomic_inc_return(&ep->running) != 1)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001423 return 0;
1424
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001425 ep->active_mask = 0;
1426 ep->unlink_mask = 0;
1427 ep->phase = 0;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001428 ep->sample_accum = 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001429
Daniel Mack2b58fd52012-09-04 10:23:07 +02001430 snd_usb_endpoint_start_quirk(ep);
1431
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001432 /*
1433 * If this endpoint has a data endpoint as implicit feedback source,
1434 * don't start the urbs here. Instead, mark them all as available,
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001435 * wait for the record urbs to return and queue the playback urbs
1436 * from that context.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001437 */
1438
Takashi Iwai5c2b3012021-02-06 21:30:51 +01001439 if (!ep_state_update(ep, EP_STATE_STOPPED, EP_STATE_RUNNING))
1440 goto __error;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001441
Takashi Iwaiebe8dc52021-04-14 10:32:55 +02001442 if (snd_usb_endpoint_implicit_feedback_sink(ep) &&
Takashi Iwai019c7f92021-07-29 09:38:51 +02001443 !(ep->chip->quirk_flags & QUIRK_FLAG_PLAYBACK_FIRST)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001444 for (i = 0; i < ep->nurbs; i++) {
1445 struct snd_urb_ctx *ctx = ep->urb + i;
1446 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
1447 }
1448
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001449 usb_audio_dbg(ep->chip, "No URB submission due to implicit fb sync\n");
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001450 return 0;
1451 }
1452
1453 for (i = 0; i < ep->nurbs; i++) {
1454 struct urb *urb = ep->urb[i].urb;
1455
1456 if (snd_BUG_ON(!urb))
1457 goto __error;
1458
1459 if (usb_pipeout(ep->pipe)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001460 prepare_outbound_urb(ep, urb->context);
1461 } else {
1462 prepare_inbound_urb(ep, urb->context);
1463 }
1464
1465 err = usb_submit_urb(urb, GFP_ATOMIC);
1466 if (err < 0) {
Takashi Iwai0ba41d92014-02-26 13:02:17 +01001467 usb_audio_err(ep->chip,
1468 "cannot submit urb %d, error %d: %s\n",
1469 i, err, usb_error_string(err));
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001470 goto __error;
1471 }
1472 set_bit(i, &ep->active_mask);
Takashi Iwai86a42ad2021-09-29 10:08:37 +02001473 atomic_inc(&ep->submitted_urbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001474 }
1475
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001476 usb_audio_dbg(ep->chip, "%d URBs submitted for EP 0x%x\n",
1477 ep->nurbs, ep->ep_num);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001478 return 0;
1479
1480__error:
Takashi Iwaid0f09d12020-11-23 09:53:35 +01001481 snd_usb_endpoint_stop(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001482 return -EPIPE;
1483}
1484
Daniel Mack94c27212012-04-12 13:51:15 +02001485/**
1486 * snd_usb_endpoint_stop: stop an snd_usb_endpoint
1487 *
1488 * @ep: the endpoint to stop (may be NULL)
1489 *
Takashi Iwai43b81e82020-11-23 09:53:34 +01001490 * A call to this function will decrement the running count of the endpoint.
Daniel Mack94c27212012-04-12 13:51:15 +02001491 * In case the last user has requested the endpoint stop, the URBs will
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001492 * actually be deactivated.
Daniel Mack94c27212012-04-12 13:51:15 +02001493 *
1494 * Must be balanced to calls of snd_usb_endpoint_start().
Takashi Iwaib2eb9502012-11-21 08:30:48 +01001495 *
1496 * The caller needs to synchronize the pending stop operation via
1497 * snd_usb_endpoint_sync_pending_stop().
Daniel Mack94c27212012-04-12 13:51:15 +02001498 */
Takashi Iwaib2eb9502012-11-21 08:30:48 +01001499void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001500{
1501 if (!ep)
1502 return;
1503
Takashi Iwai43b81e82020-11-23 09:53:34 +01001504 usb_audio_dbg(ep->chip, "Stopping %s EP 0x%x (running %d)\n",
1505 ep_type_name(ep->type), ep->ep_num,
1506 atomic_read(&ep->running));
Takashi Iwai57234bc2020-11-23 09:53:27 +01001507
Takashi Iwai43b81e82020-11-23 09:53:34 +01001508 if (snd_BUG_ON(!atomic_read(&ep->running)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001509 return;
1510
Takashi Iwai988cc172021-04-26 08:33:49 +02001511 if (!atomic_dec_return(&ep->running)) {
1512 if (ep->sync_source)
1513 WRITE_ONCE(ep->sync_source->sync_sink, NULL);
Takashi Iwaid6cda462021-02-06 21:30:50 +01001514 stop_urbs(ep, false);
Takashi Iwai988cc172021-04-26 08:33:49 +02001515 }
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001516}
1517
Daniel Mack94c27212012-04-12 13:51:15 +02001518/**
Takashi Iwai92a586b2014-06-25 14:24:47 +02001519 * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
1520 *
1521 * @ep: the endpoint to release
1522 *
Takashi Iwai43b81e82020-11-23 09:53:34 +01001523 * This function does not care for the endpoint's running count but will tear
Takashi Iwai92a586b2014-06-25 14:24:47 +02001524 * down all the streaming URBs immediately.
1525 */
1526void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
1527{
Takashi Iwaid6cda462021-02-06 21:30:50 +01001528 release_urbs(ep, true);
Takashi Iwai92a586b2014-06-25 14:24:47 +02001529}
1530
1531/**
Takashi Iwai00272c62021-01-08 08:52:17 +01001532 * snd_usb_endpoint_free_all: Free the resources of an snd_usb_endpoint
Takashi Iwai036f90d2021-02-05 09:28:37 +01001533 * @chip: The chip
Daniel Mack94c27212012-04-12 13:51:15 +02001534 *
Takashi Iwai00272c62021-01-08 08:52:17 +01001535 * This free all endpoints and those resources
Daniel Mack94c27212012-04-12 13:51:15 +02001536 */
Takashi Iwai00272c62021-01-08 08:52:17 +01001537void snd_usb_endpoint_free_all(struct snd_usb_audio *chip)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001538{
Takashi Iwai00272c62021-01-08 08:52:17 +01001539 struct snd_usb_endpoint *ep, *en;
1540 struct snd_usb_iface_ref *ip, *in;
1541
1542 list_for_each_entry_safe(ep, en, &chip->ep_list, list)
1543 kfree(ep);
1544
1545 list_for_each_entry_safe(ip, in, &chip->iface_ref_list, list)
1546 kfree(ip);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001547}
1548
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001549/*
Daniel Mack94c27212012-04-12 13:51:15 +02001550 * snd_usb_handle_sync_urb: parse an USB sync packet
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001551 *
Daniel Mack94c27212012-04-12 13:51:15 +02001552 * @ep: the endpoint to handle the packet
1553 * @sender: the sending endpoint
1554 * @urb: the received packet
1555 *
1556 * This function is called from the context of an endpoint that received
1557 * the packet and is used to let another endpoint object handle the payload.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001558 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001559static void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
1560 struct snd_usb_endpoint *sender,
1561 const struct urb *urb)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001562{
1563 int shift;
1564 unsigned int f;
1565 unsigned long flags;
1566
1567 snd_BUG_ON(ep == sender);
1568
Daniel Mack94c27212012-04-12 13:51:15 +02001569 /*
1570 * In case the endpoint is operating in implicit feedback mode, prepare
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001571 * a new outbound URB that has the same layout as the received packet
1572 * and add it to the list of pending urbs. queue_pending_output_urbs()
1573 * will take care of them later.
Daniel Mack94c27212012-04-12 13:51:15 +02001574 */
Eldad Zack98ae4722013-04-03 23:18:52 +02001575 if (snd_usb_endpoint_implicit_feedback_sink(ep) &&
Takashi Iwai43b81e82020-11-23 09:53:34 +01001576 atomic_read(&ep->running)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001577
1578 /* implicit feedback case */
1579 int i, bytes = 0;
1580 struct snd_urb_ctx *in_ctx;
1581 struct snd_usb_packet_info *out_packet;
1582
1583 in_ctx = urb->context;
1584
1585 /* Count overall packet size */
1586 for (i = 0; i < in_ctx->packets; i++)
1587 if (urb->iso_frame_desc[i].status == 0)
1588 bytes += urb->iso_frame_desc[i].actual_length;
1589
1590 /*
1591 * skip empty packets. At least M-Audio's Fast Track Ultra stops
1592 * streaming once it received a 0-byte OUT URB
1593 */
1594 if (bytes == 0)
1595 return;
1596
1597 spin_lock_irqsave(&ep->lock, flags);
Takashi Iwaic15871e2020-11-23 09:53:32 +01001598 if (ep->next_packet_queued >= ARRAY_SIZE(ep->next_packet)) {
1599 spin_unlock_irqrestore(&ep->lock, flags);
1600 usb_audio_err(ep->chip,
1601 "next package FIFO overflow EP 0x%x\n",
1602 ep->ep_num);
1603 notify_xrun(ep);
1604 return;
1605 }
1606
1607 out_packet = next_packet_fifo_enqueue(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001608
1609 /*
1610 * Iterate through the inbound packet and prepare the lengths
1611 * for the output packet. The OUT packet we are about to send
Eldad Zack28acb122012-11-28 23:55:34 +01001612 * will have the same amount of payload bytes per stride as the
1613 * IN packet we just received. Since the actual size is scaled
1614 * by the stride, use the sender stride to calculate the length
1615 * in case the number of channels differ between the implicitly
1616 * fed-back endpoint and the synchronizing endpoint.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001617 */
1618
1619 out_packet->packets = in_ctx->packets;
1620 for (i = 0; i < in_ctx->packets; i++) {
1621 if (urb->iso_frame_desc[i].status == 0)
1622 out_packet->packet_size[i] =
Eldad Zack28acb122012-11-28 23:55:34 +01001623 urb->iso_frame_desc[i].actual_length / sender->stride;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001624 else
1625 out_packet->packet_size[i] = 0;
1626 }
1627
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001628 spin_unlock_irqrestore(&ep->lock, flags);
1629 queue_pending_output_urbs(ep);
1630
1631 return;
1632 }
1633
Daniel Mack94c27212012-04-12 13:51:15 +02001634 /*
1635 * process after playback sync complete
1636 *
1637 * Full speed devices report feedback values in 10.14 format as samples
1638 * per frame, high speed devices in 16.16 format as samples per
1639 * microframe.
1640 *
1641 * Because the Audio Class 1 spec was written before USB 2.0, many high
1642 * speed devices use a wrong interpretation, some others use an
1643 * entirely different format.
1644 *
1645 * Therefore, we cannot predict what format any particular device uses
1646 * and must detect it automatically.
1647 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001648
1649 if (urb->iso_frame_desc[0].status != 0 ||
1650 urb->iso_frame_desc[0].actual_length < 3)
1651 return;
1652
1653 f = le32_to_cpup(urb->transfer_buffer);
1654 if (urb->iso_frame_desc[0].actual_length == 3)
1655 f &= 0x00ffffff;
1656 else
1657 f &= 0x0fffffff;
1658
1659 if (f == 0)
1660 return;
1661
Daniel Mackca0dd272016-08-22 08:53:37 +02001662 if (unlikely(sender->tenor_fb_quirk)) {
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001663 /*
Daniel Mackca0dd272016-08-22 08:53:37 +02001664 * Devices based on Tenor 8802 chipsets (TEAC UD-H01
1665 * and others) sometimes change the feedback value
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001666 * by +/- 0x1.0000.
1667 */
1668 if (f < ep->freqn - 0x8000)
Daniel Mack36e1ac32016-08-22 08:53:38 +02001669 f += 0xf000;
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001670 else if (f > ep->freqn + 0x8000)
Daniel Mack36e1ac32016-08-22 08:53:38 +02001671 f -= 0xf000;
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001672 } else if (unlikely(ep->freqshift == INT_MIN)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001673 /*
1674 * The first time we see a feedback value, determine its format
1675 * by shifting it left or right until it matches the nominal
1676 * frequency value. This assumes that the feedback does not
1677 * differ from the nominal value more than +50% or -25%.
1678 */
1679 shift = 0;
1680 while (f < ep->freqn - ep->freqn / 4) {
1681 f <<= 1;
1682 shift++;
1683 }
1684 while (f > ep->freqn + ep->freqn / 2) {
1685 f >>= 1;
1686 shift--;
1687 }
1688 ep->freqshift = shift;
1689 } else if (ep->freqshift >= 0)
1690 f <<= ep->freqshift;
1691 else
1692 f >>= -ep->freqshift;
1693
1694 if (likely(f >= ep->freqn - ep->freqn / 8 && f <= ep->freqmax)) {
1695 /*
1696 * If the frequency looks valid, set it.
1697 * This value is referred to in prepare_playback_urb().
1698 */
1699 spin_lock_irqsave(&ep->lock, flags);
1700 ep->freqm = f;
1701 spin_unlock_irqrestore(&ep->lock, flags);
1702 } else {
1703 /*
1704 * Out of range; maybe the shift value is wrong.
1705 * Reset it so that we autodetect again the next time.
1706 */
1707 ep->freqshift = INT_MIN;
1708 }
1709}
1710