blob: f4c3d2b38abbe224ccda9a7974d89fc8b1b5bfd6 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Daniel Macke5779992010-03-04 19:46:13 +01002/*
Daniel Macke5779992010-03-04 19:46:13 +01003 */
4
Daniel Mackc731bc92011-09-14 12:46:57 +02005#include <linux/gfp.h>
6#include <linux/init.h>
Takashi Iwai80c8a2a2012-01-09 11:37:20 +01007#include <linux/ratelimit.h>
Daniel Mackc731bc92011-09-14 12:46:57 +02008#include <linux/usb.h>
9#include <linux/usb/audio.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020010#include <linux/slab.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020011
12#include <sound/core.h>
13#include <sound/pcm.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020014#include <sound/pcm_params.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020015
16#include "usbaudio.h"
17#include "helper.h"
18#include "card.h"
19#include "endpoint.h"
20#include "pcm.h"
Takashi Iwaibf6313a2020-11-23 09:53:31 +010021#include "clock.h"
Daniel Mack2b58fd52012-09-04 10:23:07 +020022#include "quirks.h"
Daniel Mackc731bc92011-09-14 12:46:57 +020023
Takashi Iwai5c2b3012021-02-06 21:30:51 +010024enum {
25 EP_STATE_STOPPED,
26 EP_STATE_RUNNING,
27 EP_STATE_STOPPING,
28};
Daniel Mack8fdff6a2012-04-12 13:51:11 +020029
Takashi Iwai00272c62021-01-08 08:52:17 +010030/* interface refcounting */
31struct snd_usb_iface_ref {
32 unsigned char iface;
33 bool need_setup;
34 int opened;
35 struct list_head list;
36};
37
Daniel Mackc731bc92011-09-14 12:46:57 +020038/*
Daniel Mack94c27212012-04-12 13:51:15 +020039 * snd_usb_endpoint is a model that abstracts everything related to an
40 * USB endpoint and its streaming.
41 *
42 * There are functions to activate and deactivate the streaming URBs and
Daniel Mack07a5e9d2012-04-24 19:31:24 +020043 * optional callbacks to let the pcm logic handle the actual content of the
Daniel Mack94c27212012-04-12 13:51:15 +020044 * packets for playback and record. Thus, the bus streaming and the audio
45 * handlers are fully decoupled.
46 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020047 * There are two different types of endpoints in audio applications.
Daniel Mack94c27212012-04-12 13:51:15 +020048 *
49 * SND_USB_ENDPOINT_TYPE_DATA handles full audio data payload for both
50 * inbound and outbound traffic.
51 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020052 * SND_USB_ENDPOINT_TYPE_SYNC endpoints are for inbound traffic only and
53 * expect the payload to carry Q10.14 / Q16.16 formatted sync information
54 * (3 or 4 bytes).
Daniel Mack94c27212012-04-12 13:51:15 +020055 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020056 * Each endpoint has to be configured prior to being used by calling
57 * snd_usb_endpoint_set_params().
Daniel Mack94c27212012-04-12 13:51:15 +020058 *
59 * The model incorporates a reference counting, so that multiple users
60 * can call snd_usb_endpoint_start() and snd_usb_endpoint_stop(), and
61 * only the first user will effectively start the URBs, and only the last
Daniel Mack07a5e9d2012-04-24 19:31:24 +020062 * one to stop it will tear the URBs down again.
Daniel Mack94c27212012-04-12 13:51:15 +020063 */
64
65/*
Daniel Mackc731bc92011-09-14 12:46:57 +020066 * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
67 * this will overflow at approx 524 kHz
68 */
69static inline unsigned get_usb_full_speed_rate(unsigned int rate)
70{
71 return ((rate << 13) + 62) / 125;
72}
73
74/*
75 * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
76 * this will overflow at approx 4 MHz
77 */
78static inline unsigned get_usb_high_speed_rate(unsigned int rate)
79{
80 return ((rate << 10) + 62) / 125;
81}
82
83/*
Daniel Mackc731bc92011-09-14 12:46:57 +020084 * release a urb data
85 */
86static void release_urb_ctx(struct snd_urb_ctx *u)
87{
Daniel Mackd399ff92012-04-12 13:51:13 +020088 if (u->buffer_size)
89 usb_free_coherent(u->ep->chip->dev, u->buffer_size,
90 u->urb->transfer_buffer,
91 u->urb->transfer_dma);
92 usb_free_urb(u->urb);
93 u->urb = NULL;
Daniel Mackc731bc92011-09-14 12:46:57 +020094}
95
96static const char *usb_error_string(int err)
97{
98 switch (err) {
99 case -ENODEV:
100 return "no device";
101 case -ENOENT:
102 return "endpoint not enabled";
103 case -EPIPE:
104 return "endpoint stalled";
105 case -ENOSPC:
106 return "not enough bandwidth";
107 case -ESHUTDOWN:
108 return "device disabled";
109 case -EHOSTUNREACH:
110 return "device suspended";
111 case -EINVAL:
112 case -EAGAIN:
113 case -EFBIG:
114 case -EMSGSIZE:
115 return "internal error";
116 default:
117 return "unknown error";
118 }
119}
120
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100121static inline bool ep_state_running(struct snd_usb_endpoint *ep)
122{
123 return atomic_read(&ep->state) == EP_STATE_RUNNING;
124}
125
126static inline bool ep_state_update(struct snd_usb_endpoint *ep, int old, int new)
127{
128 return atomic_cmpxchg(&ep->state, old, new) == old;
129}
130
Daniel Mack94c27212012-04-12 13:51:15 +0200131/**
132 * snd_usb_endpoint_implicit_feedback_sink: Report endpoint usage type
133 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200134 * @ep: The snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200135 *
136 * Determine whether an endpoint is driven by an implicit feedback
137 * data endpoint source.
138 */
Eldad Zack98ae4722013-04-03 23:18:52 +0200139int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200140{
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100141 return ep->implicit_fb_sync && usb_pipeout(ep->pipe);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200142}
143
Daniel Mack94c27212012-04-12 13:51:15 +0200144/*
Takashi Iwai3d587602020-11-23 09:53:37 +0100145 * Return the number of samples to be sent in the next packet
146 * for streaming based on information derived from sync endpoints
Daniel Mack94c27212012-04-12 13:51:15 +0200147 *
Takashi Iwai3d587602020-11-23 09:53:37 +0100148 * This won't be used for implicit feedback which takes the packet size
149 * returned from the sync source
Daniel Mack94c27212012-04-12 13:51:15 +0200150 */
Takashi Iwai3d587602020-11-23 09:53:37 +0100151static int slave_next_packet_size(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200152{
153 unsigned long flags;
154 int ret;
155
156 if (ep->fill_max)
157 return ep->maxframesize;
158
159 spin_lock_irqsave(&ep->lock, flags);
160 ep->phase = (ep->phase & 0xffff)
161 + (ep->freqm << ep->datainterval);
162 ret = min(ep->phase >> 16, ep->maxframesize);
163 spin_unlock_irqrestore(&ep->lock, flags);
164
165 return ret;
166}
167
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300168/*
Takashi Iwai3d587602020-11-23 09:53:37 +0100169 * Return the number of samples to be sent in the next packet
170 * for adaptive and synchronous endpoints
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300171 */
Takashi Iwai3d587602020-11-23 09:53:37 +0100172static int next_packet_size(struct snd_usb_endpoint *ep)
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300173{
174 int ret;
175
176 if (ep->fill_max)
177 return ep->maxframesize;
178
179 ep->sample_accum += ep->sample_rem;
Alexander Tsoyb9fd2002020-06-29 05:59:34 +0300180 if (ep->sample_accum >= ep->pps) {
181 ep->sample_accum -= ep->pps;
182 ret = ep->packsize[1];
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300183 } else {
Alexander Tsoyb9fd2002020-06-29 05:59:34 +0300184 ret = ep->packsize[0];
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300185 }
186
187 return ret;
188}
189
Takashi Iwai3d587602020-11-23 09:53:37 +0100190/*
191 * snd_usb_endpoint_next_packet_size: Return the number of samples to be sent
192 * in the next packet
193 */
194int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
195 struct snd_urb_ctx *ctx, int idx)
196{
197 if (ctx->packet_size[idx])
198 return ctx->packet_size[idx];
Takashi Iwai53837b42020-11-23 09:53:39 +0100199 else if (ep->sync_source)
Takashi Iwai3d587602020-11-23 09:53:37 +0100200 return slave_next_packet_size(ep);
201 else
202 return next_packet_size(ep);
203}
204
Takashi Iwai96e221f2020-11-23 09:53:28 +0100205static void call_retire_callback(struct snd_usb_endpoint *ep,
206 struct urb *urb)
207{
208 struct snd_usb_substream *data_subs;
209
210 data_subs = READ_ONCE(ep->data_subs);
211 if (data_subs && ep->retire_data_urb)
212 ep->retire_data_urb(data_subs, urb);
213}
214
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200215static void retire_outbound_urb(struct snd_usb_endpoint *ep,
216 struct snd_urb_ctx *urb_ctx)
217{
Takashi Iwai96e221f2020-11-23 09:53:28 +0100218 call_retire_callback(ep, urb_ctx->urb);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200219}
220
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100221static void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
222 struct snd_usb_endpoint *sender,
223 const struct urb *urb);
224
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200225static void retire_inbound_urb(struct snd_usb_endpoint *ep,
226 struct snd_urb_ctx *urb_ctx)
227{
228 struct urb *urb = urb_ctx->urb;
Takashi Iwai53837b42020-11-23 09:53:39 +0100229 struct snd_usb_endpoint *sync_sink;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200230
Daniel Mack2b58fd52012-09-04 10:23:07 +0200231 if (unlikely(ep->skip_packets > 0)) {
232 ep->skip_packets--;
233 return;
234 }
235
Takashi Iwai53837b42020-11-23 09:53:39 +0100236 sync_sink = READ_ONCE(ep->sync_sink);
237 if (sync_sink)
238 snd_usb_handle_sync_urb(sync_sink, ep, urb);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200239
Takashi Iwai96e221f2020-11-23 09:53:28 +0100240 call_retire_callback(ep, urb);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200241}
242
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200243static void prepare_silent_urb(struct snd_usb_endpoint *ep,
244 struct snd_urb_ctx *ctx)
245{
246 struct urb *urb = ctx->urb;
247 unsigned int offs = 0;
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200248 unsigned int extra = 0;
249 __le32 packet_length;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200250 int i;
251
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200252 /* For tx_length_quirk, put packet length at start of packet */
253 if (ep->chip->tx_length_quirk)
254 extra = sizeof(packet_length);
255
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200256 for (i = 0; i < ctx->packets; ++i) {
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200257 unsigned int offset;
258 unsigned int length;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200259 int counts;
260
Takashi Iwai3d587602020-11-23 09:53:37 +0100261 counts = snd_usb_endpoint_next_packet_size(ep, ctx, i);
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200262 length = counts * ep->stride; /* number of silent bytes */
263 offset = offs * ep->stride + extra * i;
264 urb->iso_frame_desc[i].offset = offset;
265 urb->iso_frame_desc[i].length = length + extra;
266 if (extra) {
267 packet_length = cpu_to_le32(length);
268 memcpy(urb->transfer_buffer + offset,
269 &packet_length, sizeof(packet_length));
270 }
271 memset(urb->transfer_buffer + offset + extra,
272 ep->silence_value, length);
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200273 offs += counts;
274 }
275
276 urb->number_of_packets = ctx->packets;
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200277 urb->transfer_buffer_length = offs * ep->stride + ctx->packets * extra;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200278}
279
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200280/*
281 * Prepare a PLAYBACK urb for submission to the bus.
282 */
283static void prepare_outbound_urb(struct snd_usb_endpoint *ep,
284 struct snd_urb_ctx *ctx)
285{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200286 struct urb *urb = ctx->urb;
287 unsigned char *cp = urb->transfer_buffer;
Takashi Iwai96e221f2020-11-23 09:53:28 +0100288 struct snd_usb_substream *data_subs;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200289
290 urb->dev = ep->chip->dev; /* we need to set this at each time */
291
292 switch (ep->type) {
293 case SND_USB_ENDPOINT_TYPE_DATA:
Takashi Iwai96e221f2020-11-23 09:53:28 +0100294 data_subs = READ_ONCE(ep->data_subs);
295 if (data_subs && ep->prepare_data_urb)
296 ep->prepare_data_urb(data_subs, urb);
297 else /* no data provider, so send silence */
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200298 prepare_silent_urb(ep, ctx);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200299 break;
300
301 case SND_USB_ENDPOINT_TYPE_SYNC:
302 if (snd_usb_get_speed(ep->chip->dev) >= USB_SPEED_HIGH) {
303 /*
304 * fill the length and offset of each urb descriptor.
305 * the fixed 12.13 frequency is passed as 16.16 through the pipe.
306 */
307 urb->iso_frame_desc[0].length = 4;
308 urb->iso_frame_desc[0].offset = 0;
309 cp[0] = ep->freqn;
310 cp[1] = ep->freqn >> 8;
311 cp[2] = ep->freqn >> 16;
312 cp[3] = ep->freqn >> 24;
313 } else {
314 /*
315 * fill the length and offset of each urb descriptor.
316 * the fixed 10.14 frequency is passed through the pipe.
317 */
318 urb->iso_frame_desc[0].length = 3;
319 urb->iso_frame_desc[0].offset = 0;
320 cp[0] = ep->freqn >> 2;
321 cp[1] = ep->freqn >> 10;
322 cp[2] = ep->freqn >> 18;
323 }
324
325 break;
326 }
327}
328
329/*
330 * Prepare a CAPTURE or SYNC urb for submission to the bus.
331 */
332static inline void prepare_inbound_urb(struct snd_usb_endpoint *ep,
333 struct snd_urb_ctx *urb_ctx)
334{
335 int i, offs;
336 struct urb *urb = urb_ctx->urb;
337
338 urb->dev = ep->chip->dev; /* we need to set this at each time */
339
340 switch (ep->type) {
341 case SND_USB_ENDPOINT_TYPE_DATA:
342 offs = 0;
343 for (i = 0; i < urb_ctx->packets; i++) {
344 urb->iso_frame_desc[i].offset = offs;
345 urb->iso_frame_desc[i].length = ep->curpacksize;
346 offs += ep->curpacksize;
347 }
348
349 urb->transfer_buffer_length = offs;
350 urb->number_of_packets = urb_ctx->packets;
351 break;
352
353 case SND_USB_ENDPOINT_TYPE_SYNC:
354 urb->iso_frame_desc[0].length = min(4u, ep->syncmaxsize);
355 urb->iso_frame_desc[0].offset = 0;
356 break;
357 }
358}
359
Takashi Iwaic15871e2020-11-23 09:53:32 +0100360/* notify an error as XRUN to the assigned PCM data substream */
361static void notify_xrun(struct snd_usb_endpoint *ep)
362{
363 struct snd_usb_substream *data_subs;
364
365 data_subs = READ_ONCE(ep->data_subs);
366 if (data_subs && data_subs->pcm_substream)
367 snd_pcm_stop_xrun(data_subs->pcm_substream);
368}
369
370static struct snd_usb_packet_info *
371next_packet_fifo_enqueue(struct snd_usb_endpoint *ep)
372{
373 struct snd_usb_packet_info *p;
374
375 p = ep->next_packet + (ep->next_packet_head + ep->next_packet_queued) %
376 ARRAY_SIZE(ep->next_packet);
377 ep->next_packet_queued++;
378 return p;
379}
380
381static struct snd_usb_packet_info *
382next_packet_fifo_dequeue(struct snd_usb_endpoint *ep)
383{
384 struct snd_usb_packet_info *p;
385
386 p = ep->next_packet + ep->next_packet_head;
387 ep->next_packet_head++;
388 ep->next_packet_head %= ARRAY_SIZE(ep->next_packet);
389 ep->next_packet_queued--;
390 return p;
391}
392
Daniel Mack94c27212012-04-12 13:51:15 +0200393/*
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200394 * Send output urbs that have been prepared previously. URBs are dequeued
Randy Dunlap0569b3d2020-10-05 12:12:44 -0700395 * from ep->ready_playback_urbs and in case there aren't any available
Daniel Mack94c27212012-04-12 13:51:15 +0200396 * or there are no packets that have been prepared, this function does
397 * nothing.
398 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200399 * The reason why the functionality of sending and preparing URBs is separated
400 * is that host controllers don't guarantee the order in which they return
401 * inbound and outbound packets to their submitters.
Daniel Mack94c27212012-04-12 13:51:15 +0200402 *
403 * This function is only used for implicit feedback endpoints. For endpoints
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200404 * driven by dedicated sync endpoints, URBs are immediately re-submitted
405 * from their completion handler.
Daniel Mack94c27212012-04-12 13:51:15 +0200406 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200407static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
408{
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100409 while (ep_state_running(ep)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200410
411 unsigned long flags;
Kees Cook3f649ab2020-06-03 13:09:38 -0700412 struct snd_usb_packet_info *packet;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200413 struct snd_urb_ctx *ctx = NULL;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200414 int err, i;
415
416 spin_lock_irqsave(&ep->lock, flags);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100417 if (ep->next_packet_queued > 0 &&
418 !list_empty(&ep->ready_playback_urbs)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200419 /* take URB out of FIFO */
Takashi Iwaic15871e2020-11-23 09:53:32 +0100420 ctx = list_first_entry(&ep->ready_playback_urbs,
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200421 struct snd_urb_ctx, ready_list);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100422 list_del_init(&ctx->ready_list);
423
424 packet = next_packet_fifo_dequeue(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200425 }
426 spin_unlock_irqrestore(&ep->lock, flags);
427
428 if (ctx == NULL)
429 return;
430
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200431 /* copy over the length information */
432 for (i = 0; i < packet->packets; i++)
433 ctx->packet_size[i] = packet->packet_size[i];
434
Daniel Mack94c27212012-04-12 13:51:15 +0200435 /* call the data handler to fill in playback data */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200436 prepare_outbound_urb(ep, ctx);
437
438 err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100439 if (err < 0) {
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100440 usb_audio_err(ep->chip,
Takashi Iwaie93e8902020-11-23 09:53:13 +0100441 "Unable to submit urb #%d: %d at %s\n",
442 ctx->index, err, __func__);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100443 notify_xrun(ep);
444 return;
445 }
446
447 set_bit(ctx->index, &ep->active_mask);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200448 }
449}
450
451/*
452 * complete callback for urbs
453 */
454static void snd_complete_urb(struct urb *urb)
455{
456 struct snd_urb_ctx *ctx = urb->context;
457 struct snd_usb_endpoint *ep = ctx->ep;
Takashi Iwai67e22502014-11-06 13:04:49 +0100458 unsigned long flags;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200459 int err;
460
461 if (unlikely(urb->status == -ENOENT || /* unlinked */
462 urb->status == -ENODEV || /* device removed */
463 urb->status == -ECONNRESET || /* unlinked */
Takashi Iwai47ab1542015-08-25 16:09:00 +0200464 urb->status == -ESHUTDOWN)) /* device disabled */
465 goto exit_clear;
466 /* device disconnected */
467 if (unlikely(atomic_read(&ep->chip->shutdown)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200468 goto exit_clear;
469
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100470 if (unlikely(!ep_state_running(ep)))
Ioan-Adrian Ratiu13a6c832017-01-05 00:37:47 +0200471 goto exit_clear;
472
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200473 if (usb_pipeout(ep->pipe)) {
474 retire_outbound_urb(ep, ctx);
475 /* can be stopped during retire callback */
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100476 if (unlikely(!ep_state_running(ep)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200477 goto exit_clear;
478
Eldad Zack98ae4722013-04-03 23:18:52 +0200479 if (snd_usb_endpoint_implicit_feedback_sink(ep)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200480 spin_lock_irqsave(&ep->lock, flags);
481 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100482 clear_bit(ctx->index, &ep->active_mask);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200483 spin_unlock_irqrestore(&ep->lock, flags);
484 queue_pending_output_urbs(ep);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100485 return;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200486 }
487
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200488 prepare_outbound_urb(ep, ctx);
Henry Lin52869932019-11-13 10:14:19 +0800489 /* can be stopped during prepare callback */
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100490 if (unlikely(!ep_state_running(ep)))
Henry Lin52869932019-11-13 10:14:19 +0800491 goto exit_clear;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200492 } else {
493 retire_inbound_urb(ep, ctx);
494 /* can be stopped during retire callback */
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100495 if (unlikely(!ep_state_running(ep)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200496 goto exit_clear;
497
498 prepare_inbound_urb(ep, ctx);
499 }
500
501 err = usb_submit_urb(urb, GFP_ATOMIC);
502 if (err == 0)
503 return;
504
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100505 usb_audio_err(ep->chip, "cannot submit urb (err = %d)\n", err);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100506 notify_xrun(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200507
508exit_clear:
509 clear_bit(ctx->index, &ep->active_mask);
510}
511
Takashi Iwaic7474d02020-11-23 09:53:11 +0100512/*
Takashi Iwai00272c62021-01-08 08:52:17 +0100513 * Find or create a refcount object for the given interface
514 *
515 * The objects are released altogether in snd_usb_endpoint_free_all()
516 */
517static struct snd_usb_iface_ref *
518iface_ref_find(struct snd_usb_audio *chip, int iface)
519{
520 struct snd_usb_iface_ref *ip;
521
522 list_for_each_entry(ip, &chip->iface_ref_list, list)
523 if (ip->iface == iface)
524 return ip;
525
526 ip = kzalloc(sizeof(*ip), GFP_KERNEL);
527 if (!ip)
528 return NULL;
529 ip->iface = iface;
530 list_add_tail(&ip->list, &chip->iface_ref_list);
531 return ip;
532}
533
534/*
Takashi Iwai54cb3192020-11-23 09:53:20 +0100535 * Get the existing endpoint object corresponding EP
Takashi Iwaic7474d02020-11-23 09:53:11 +0100536 * Returns NULL if not present.
Takashi Iwaic7474d02020-11-23 09:53:11 +0100537 */
538struct snd_usb_endpoint *
Takashi Iwai54cb3192020-11-23 09:53:20 +0100539snd_usb_get_endpoint(struct snd_usb_audio *chip, int ep_num)
Takashi Iwaic7474d02020-11-23 09:53:11 +0100540{
541 struct snd_usb_endpoint *ep;
542
543 list_for_each_entry(ep, &chip->ep_list, list) {
Takashi Iwai54cb3192020-11-23 09:53:20 +0100544 if (ep->ep_num == ep_num)
Takashi Iwaic7474d02020-11-23 09:53:11 +0100545 return ep;
546 }
Takashi Iwai54cb3192020-11-23 09:53:20 +0100547
Takashi Iwaic7474d02020-11-23 09:53:11 +0100548 return NULL;
549}
550
Takashi Iwai5a6c3e12020-11-23 09:53:16 +0100551#define ep_type_name(type) \
552 (type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync")
553
Daniel Mack94c27212012-04-12 13:51:15 +0200554/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200555 * snd_usb_add_endpoint: Add an endpoint to an USB audio chip
Daniel Mack94c27212012-04-12 13:51:15 +0200556 *
557 * @chip: The chip
Daniel Mack94c27212012-04-12 13:51:15 +0200558 * @ep_num: The number of the endpoint to use
Daniel Mack94c27212012-04-12 13:51:15 +0200559 * @type: SND_USB_ENDPOINT_TYPE_DATA or SND_USB_ENDPOINT_TYPE_SYNC
560 *
561 * If the requested endpoint has not been added to the given chip before,
Takashi Iwai54cb3192020-11-23 09:53:20 +0100562 * a new instance is created.
563 *
564 * Returns zero on success or a negative error code.
Daniel Mack94c27212012-04-12 13:51:15 +0200565 *
Takashi Iwai00272c62021-01-08 08:52:17 +0100566 * New endpoints will be added to chip->ep_list and freed by
567 * calling snd_usb_endpoint_free_all().
Takashi Iwai447d6272016-03-15 15:20:58 +0100568 *
569 * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
570 * bNumEndpoints > 1 beforehand.
Daniel Mack94c27212012-04-12 13:51:15 +0200571 */
Takashi Iwai54cb3192020-11-23 09:53:20 +0100572int snd_usb_add_endpoint(struct snd_usb_audio *chip, int ep_num, int type)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200573{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200574 struct snd_usb_endpoint *ep;
Takashi Iwai54cb3192020-11-23 09:53:20 +0100575 bool is_playback;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200576
Takashi Iwai54cb3192020-11-23 09:53:20 +0100577 ep = snd_usb_get_endpoint(chip, ep_num);
578 if (ep)
579 return 0;
Eldad Zacke7e58df2013-08-03 10:51:15 +0200580
Takashi Iwai54cb3192020-11-23 09:53:20 +0100581 usb_audio_dbg(chip, "Creating new %s endpoint #%x\n",
Takashi Iwai5a6c3e12020-11-23 09:53:16 +0100582 ep_type_name(type),
583 ep_num);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200584 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
585 if (!ep)
Takashi Iwai54cb3192020-11-23 09:53:20 +0100586 return -ENOMEM;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200587
588 ep->chip = chip;
589 spin_lock_init(&ep->lock);
590 ep->type = type;
591 ep->ep_num = ep_num;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200592 INIT_LIST_HEAD(&ep->ready_playback_urbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200593
Takashi Iwai54cb3192020-11-23 09:53:20 +0100594 is_playback = ((ep_num & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
595 ep_num &= USB_ENDPOINT_NUMBER_MASK;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200596 if (is_playback)
597 ep->pipe = usb_sndisocpipe(chip->dev, ep_num);
598 else
599 ep->pipe = usb_rcvisocpipe(chip->dev, ep_num);
600
Takashi Iwai54cb3192020-11-23 09:53:20 +0100601 list_add_tail(&ep->list, &chip->ep_list);
602 return 0;
603}
604
605/* Set up syncinterval and maxsyncsize for a sync EP */
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100606static void endpoint_set_syncinterval(struct snd_usb_audio *chip,
607 struct snd_usb_endpoint *ep)
Takashi Iwai54cb3192020-11-23 09:53:20 +0100608{
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100609 struct usb_host_interface *alts;
610 struct usb_endpoint_descriptor *desc;
Takashi Iwai54cb3192020-11-23 09:53:20 +0100611
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100612 alts = snd_usb_get_host_interface(chip, ep->iface, ep->altsetting);
613 if (!alts)
614 return;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200615
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100616 desc = get_endpoint(alts, ep->ep_idx);
617 if (desc->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
618 desc->bRefresh >= 1 && desc->bRefresh <= 9)
619 ep->syncinterval = desc->bRefresh;
620 else if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL)
621 ep->syncinterval = 1;
622 else if (desc->bInterval >= 1 && desc->bInterval <= 16)
623 ep->syncinterval = desc->bInterval - 1;
624 else
625 ep->syncinterval = 3;
626
627 ep->syncmaxsize = le16_to_cpu(desc->wMaxPacketSize);
628}
629
630static bool endpoint_compatible(struct snd_usb_endpoint *ep,
631 const struct audioformat *fp,
632 const struct snd_pcm_hw_params *params)
633{
634 if (!ep->opened)
635 return false;
636 if (ep->cur_audiofmt != fp)
637 return false;
638 if (ep->cur_rate != params_rate(params) ||
639 ep->cur_format != params_format(params) ||
640 ep->cur_period_frames != params_period_size(params) ||
641 ep->cur_buffer_periods != params_periods(params))
642 return false;
643 return true;
644}
645
646/*
647 * Check whether the given fp and hw params are compatbile with the current
648 * setup of the target EP for implicit feedback sync
649 */
650bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
651 struct snd_usb_endpoint *ep,
652 const struct audioformat *fp,
653 const struct snd_pcm_hw_params *params)
654{
655 bool ret;
656
657 mutex_lock(&chip->mutex);
658 ret = endpoint_compatible(ep, fp, params);
659 mutex_unlock(&chip->mutex);
660 return ret;
661}
662
663/*
664 * snd_usb_endpoint_open: Open the endpoint
665 *
666 * Called from hw_params to assign the endpoint to the substream.
667 * It's reference-counted, and only the first opener is allowed to set up
668 * arbitrary parameters. The later opener must be compatible with the
669 * former opened parameters.
670 * The endpoint needs to be closed via snd_usb_endpoint_close() later.
671 *
672 * Note that this function doesn't configure the endpoint. The substream
673 * needs to set it up later via snd_usb_endpoint_configure().
674 */
675struct snd_usb_endpoint *
676snd_usb_endpoint_open(struct snd_usb_audio *chip,
Takashi Iwaicab941b2020-11-23 09:53:33 +0100677 const struct audioformat *fp,
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100678 const struct snd_pcm_hw_params *params,
679 bool is_sync_ep)
680{
681 struct snd_usb_endpoint *ep;
682 int ep_num = is_sync_ep ? fp->sync_ep : fp->endpoint;
683
684 mutex_lock(&chip->mutex);
685 ep = snd_usb_get_endpoint(chip, ep_num);
686 if (!ep) {
687 usb_audio_err(chip, "Cannot find EP 0x%x to open\n", ep_num);
688 goto unlock;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200689 }
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100690
691 if (!ep->opened) {
692 if (is_sync_ep) {
693 ep->iface = fp->sync_iface;
694 ep->altsetting = fp->sync_altsetting;
695 ep->ep_idx = fp->sync_ep_idx;
696 } else {
697 ep->iface = fp->iface;
698 ep->altsetting = fp->altsetting;
Takashi Iwaieae4d052021-01-08 08:52:18 +0100699 ep->ep_idx = fp->ep_idx;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100700 }
701 usb_audio_dbg(chip, "Open EP 0x%x, iface=%d:%d, idx=%d\n",
702 ep_num, ep->iface, ep->altsetting, ep->ep_idx);
703
Takashi Iwai00272c62021-01-08 08:52:17 +0100704 ep->iface_ref = iface_ref_find(chip, ep->iface);
705 if (!ep->iface_ref) {
706 ep = NULL;
707 goto unlock;
708 }
709
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100710 ep->cur_audiofmt = fp;
711 ep->cur_channels = fp->channels;
712 ep->cur_rate = params_rate(params);
713 ep->cur_format = params_format(params);
714 ep->cur_frame_bytes = snd_pcm_format_physical_width(ep->cur_format) *
715 ep->cur_channels / 8;
716 ep->cur_period_frames = params_period_size(params);
717 ep->cur_period_bytes = ep->cur_period_frames * ep->cur_frame_bytes;
718 ep->cur_buffer_periods = params_periods(params);
719
720 if (ep->type == SND_USB_ENDPOINT_TYPE_SYNC)
721 endpoint_set_syncinterval(chip, ep);
722
723 ep->implicit_fb_sync = fp->implicit_fb;
724 ep->need_setup = true;
725
726 usb_audio_dbg(chip, " channels=%d, rate=%d, format=%s, period_bytes=%d, periods=%d, implicit_fb=%d\n",
727 ep->cur_channels, ep->cur_rate,
728 snd_pcm_format_name(ep->cur_format),
729 ep->cur_period_bytes, ep->cur_buffer_periods,
730 ep->implicit_fb_sync);
731
732 } else {
Takashi Iwai00272c62021-01-08 08:52:17 +0100733 if (WARN_ON(!ep->iface_ref)) {
734 ep = NULL;
735 goto unlock;
736 }
737
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100738 if (!endpoint_compatible(ep, fp, params)) {
739 usb_audio_err(chip, "Incompatible EP setup for 0x%x\n",
740 ep_num);
741 ep = NULL;
742 goto unlock;
743 }
744
745 usb_audio_dbg(chip, "Reopened EP 0x%x (count %d)\n",
746 ep_num, ep->opened);
747 }
748
Takashi Iwai00272c62021-01-08 08:52:17 +0100749 if (!ep->iface_ref->opened++)
750 ep->iface_ref->need_setup = true;
751
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100752 ep->opened++;
753
754 unlock:
755 mutex_unlock(&chip->mutex);
756 return ep;
757}
758
759/*
760 * snd_usb_endpoint_set_sync: Link data and sync endpoints
761 *
762 * Pass NULL to sync_ep to unlink again
763 */
764void snd_usb_endpoint_set_sync(struct snd_usb_audio *chip,
765 struct snd_usb_endpoint *data_ep,
766 struct snd_usb_endpoint *sync_ep)
767{
Takashi Iwai53837b42020-11-23 09:53:39 +0100768 data_ep->sync_source = sync_ep;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200769}
770
771/*
Takashi Iwai96e221f2020-11-23 09:53:28 +0100772 * Set data endpoint callbacks and the assigned data stream
773 *
774 * Called at PCM trigger and cleanups.
775 * Pass NULL to deactivate each callback.
776 */
777void snd_usb_endpoint_set_callback(struct snd_usb_endpoint *ep,
778 void (*prepare)(struct snd_usb_substream *subs,
779 struct urb *urb),
780 void (*retire)(struct snd_usb_substream *subs,
781 struct urb *urb),
782 struct snd_usb_substream *data_subs)
783{
784 ep->prepare_data_urb = prepare;
785 ep->retire_data_urb = retire;
786 WRITE_ONCE(ep->data_subs, data_subs);
787}
788
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100789static int endpoint_set_interface(struct snd_usb_audio *chip,
790 struct snd_usb_endpoint *ep,
791 bool set)
792{
793 int altset = set ? ep->altsetting : 0;
794 int err;
795
796 usb_audio_dbg(chip, "Setting usb interface %d:%d for EP 0x%x\n",
797 ep->iface, altset, ep->ep_num);
798 err = usb_set_interface(chip->dev, ep->iface, altset);
799 if (err < 0) {
800 usb_audio_err(chip, "%d:%d: usb_set_interface failed (%d)\n",
801 ep->iface, altset, err);
802 return err;
803 }
804
805 snd_usb_set_interface_quirk(chip);
806 return 0;
807}
808
809/*
810 * snd_usb_endpoint_close: Close the endpoint
811 *
812 * Unreference the already opened endpoint via snd_usb_endpoint_open().
813 */
814void snd_usb_endpoint_close(struct snd_usb_audio *chip,
815 struct snd_usb_endpoint *ep)
816{
817 mutex_lock(&chip->mutex);
818 usb_audio_dbg(chip, "Closing EP 0x%x (count %d)\n",
819 ep->ep_num, ep->opened);
Takashi Iwai00272c62021-01-08 08:52:17 +0100820
821 if (!--ep->iface_ref->opened)
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100822 endpoint_set_interface(chip, ep, false);
Takashi Iwai00272c62021-01-08 08:52:17 +0100823
824 if (!--ep->opened) {
Takashi Iwai89fa3f62020-11-23 09:53:40 +0100825 ep->iface = 0;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100826 ep->altsetting = 0;
827 ep->cur_audiofmt = NULL;
828 ep->cur_rate = 0;
Takashi Iwai00272c62021-01-08 08:52:17 +0100829 ep->iface_ref = NULL;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100830 usb_audio_dbg(chip, "EP 0x%x closed\n", ep->ep_num);
831 }
832 mutex_unlock(&chip->mutex);
833}
834
835/* Prepare for suspening EP, called from the main suspend handler */
836void snd_usb_endpoint_suspend(struct snd_usb_endpoint *ep)
837{
838 ep->need_setup = true;
Takashi Iwai00272c62021-01-08 08:52:17 +0100839 if (ep->iface_ref)
840 ep->iface_ref->need_setup = true;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100841}
842
Takashi Iwai96e221f2020-11-23 09:53:28 +0100843/*
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200844 * wait until all urbs are processed.
845 */
846static int wait_clear_urbs(struct snd_usb_endpoint *ep)
847{
848 unsigned long end_time = jiffies + msecs_to_jiffies(1000);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200849 int alive;
850
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100851 if (atomic_read(&ep->state) != EP_STATE_STOPPING)
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100852 return 0;
853
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200854 do {
Joe Perches190006f2012-11-16 23:35:16 -0800855 alive = bitmap_weight(&ep->active_mask, ep->nurbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200856 if (!alive)
857 break;
858
859 schedule_timeout_uninterruptible(1);
860 } while (time_before(jiffies, end_time));
861
862 if (alive)
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100863 usb_audio_err(ep->chip,
864 "timeout: still %d active urbs on EP #%x\n",
865 alive, ep->ep_num);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200866
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100867 if (ep_state_update(ep, EP_STATE_STOPPING, EP_STATE_STOPPED)) {
868 ep->sync_sink = NULL;
869 snd_usb_endpoint_set_callback(ep, NULL, NULL, NULL);
870 }
Ioan-Adrian Ratiu1d0f9532017-01-05 00:37:46 +0200871
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200872 return 0;
873}
874
Takashi Iwaif58161b2012-11-08 08:52:45 +0100875/* sync the pending stop operation;
876 * this function itself doesn't trigger the stop operation
877 */
878void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep)
879{
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100880 if (ep)
Takashi Iwaif58161b2012-11-08 08:52:45 +0100881 wait_clear_urbs(ep);
882}
883
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200884/*
Takashi Iwaid6cda462021-02-06 21:30:50 +0100885 * Stop active urbs
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100886 *
Takashi Iwaid6cda462021-02-06 21:30:50 +0100887 * This function moves the EP to STOPPING state if it's being RUNNING.
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200888 */
Takashi Iwaid6cda462021-02-06 21:30:50 +0100889static int stop_urbs(struct snd_usb_endpoint *ep, bool force)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200890{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200891 unsigned int i;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200892
Takashi Iwaid6cda462021-02-06 21:30:50 +0100893 if (!force && atomic_read(&ep->running))
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100894 return -EBUSY;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200895
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100896 if (!ep_state_update(ep, EP_STATE_RUNNING, EP_STATE_STOPPING))
Takashi Iwaid6cda462021-02-06 21:30:50 +0100897 return 0;
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100898
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200899 INIT_LIST_HEAD(&ep->ready_playback_urbs);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100900 ep->next_packet_head = 0;
901 ep->next_packet_queued = 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200902
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200903 for (i = 0; i < ep->nurbs; i++) {
904 if (test_bit(i, &ep->active_mask)) {
905 if (!test_and_set_bit(i, &ep->unlink_mask)) {
906 struct urb *u = ep->urb[i].urb;
Takashi Iwaiccc16962012-11-21 08:22:52 +0100907 usb_unlink_urb(u);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200908 }
909 }
910 }
911
912 return 0;
913}
914
915/*
916 * release an endpoint's urbs
917 */
Takashi Iwaid6cda462021-02-06 21:30:50 +0100918static int release_urbs(struct snd_usb_endpoint *ep, bool force)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200919{
Takashi Iwaid6cda462021-02-06 21:30:50 +0100920 int i, err;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200921
922 /* route incoming urbs to nirvana */
Takashi Iwai96e221f2020-11-23 09:53:28 +0100923 snd_usb_endpoint_set_callback(ep, NULL, NULL, NULL);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200924
Takashi Iwaid6cda462021-02-06 21:30:50 +0100925 /* stop and unlink urbs */
926 err = stop_urbs(ep, force);
927 if (err)
928 return err;
929
930 wait_clear_urbs(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200931
932 for (i = 0; i < ep->nurbs; i++)
933 release_urb_ctx(&ep->urb[i]);
934
Xu Wang2e5a8e12020-07-27 02:52:08 +0000935 usb_free_coherent(ep->chip->dev, SYNC_URBS * 4,
936 ep->syncbuf, ep->sync_dma);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200937
938 ep->syncbuf = NULL;
939 ep->nurbs = 0;
Takashi Iwaid6cda462021-02-06 21:30:50 +0100940 return 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200941}
942
Daniel Mack94c27212012-04-12 13:51:15 +0200943/*
944 * configure a data endpoint
945 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100946static int data_ep_set_params(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200947{
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100948 struct snd_usb_audio *chip = ep->chip;
Alan Stern976b6c02013-09-24 15:51:58 -0400949 unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb;
950 unsigned int max_packs_per_period, urbs_per_period, urb_packs;
951 unsigned int max_urbs, i;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100952 const struct audioformat *fmt = ep->cur_audiofmt;
953 int frame_bits = ep->cur_frame_bytes * 8;
954 int tx_length_quirk = (chip->tx_length_quirk &&
Ricard Wanderlof759c90f2015-10-19 08:52:54 +0200955 usb_pipeout(ep->pipe));
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200956
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100957 usb_audio_dbg(chip, "Setting params for data EP 0x%x, pipe 0x%x\n",
958 ep->ep_num, ep->pipe);
959
960 if (ep->cur_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) {
Daniel Mackd24f5062013-04-17 00:01:38 +0800961 /*
962 * When operating in DSD DOP mode, the size of a sample frame
963 * in hardware differs from the actual physical format width
964 * because we need to make room for the DOP markers.
965 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100966 frame_bits += ep->cur_channels << 3;
Daniel Mackd24f5062013-04-17 00:01:38 +0800967 }
968
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200969 ep->datainterval = fmt->datainterval;
970 ep->stride = frame_bits >> 3;
Nobutaka Okabe01200732016-12-13 02:52:58 +0900971
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100972 switch (ep->cur_format) {
Nobutaka Okabe01200732016-12-13 02:52:58 +0900973 case SNDRV_PCM_FORMAT_U8:
974 ep->silence_value = 0x80;
975 break;
976 case SNDRV_PCM_FORMAT_DSD_U8:
977 case SNDRV_PCM_FORMAT_DSD_U16_LE:
978 case SNDRV_PCM_FORMAT_DSD_U32_LE:
979 case SNDRV_PCM_FORMAT_DSD_U16_BE:
980 case SNDRV_PCM_FORMAT_DSD_U32_BE:
981 ep->silence_value = 0x69;
982 break;
983 default:
984 ep->silence_value = 0;
985 }
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200986
Andreas Papefd1a5052016-12-06 14:46:14 +0900987 /* assume max. frequency is 50% higher than nominal */
988 ep->freqmax = ep->freqn + (ep->freqn >> 1);
Ricard Wanderlofab309652015-10-11 20:54:51 +0200989 /* Round up freqmax to nearest integer in order to calculate maximum
990 * packet size, which must represent a whole number of frames.
991 * This is accomplished by adding 0x0.ffff before converting the
992 * Q16.16 format into integer.
993 * In order to accurately calculate the maximum packet size when
994 * the data interval is more than 1 (i.e. ep->datainterval > 0),
995 * multiply by the data interval prior to rounding. For instance,
996 * a freqmax of 41 kHz will result in a max packet size of 6 (5.125)
997 * frames with a data interval of 1, but 11 (10.25) frames with a
998 * data interval of 2.
999 * (ep->freqmax << ep->datainterval overflows at 8.192 MHz for the
1000 * maximum datainterval value of 3, at USB full speed, higher for
1001 * USB high speed, noting that ep->freqmax is in units of
1002 * frames per packet in Q16.16 format.)
1003 */
1004 maxsize = (((ep->freqmax << ep->datainterval) + 0xffff) >> 16) *
1005 (frame_bits >> 3);
Ricard Wanderlof759c90f2015-10-19 08:52:54 +02001006 if (tx_length_quirk)
1007 maxsize += sizeof(__le32); /* Space for length descriptor */
Clemens Ladisch57e6dae2013-08-08 11:24:55 +02001008 /* but wMaxPacketSize might reduce this */
1009 if (ep->maxpacksize && ep->maxpacksize < maxsize) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001010 /* whatever fits into a max. size packet */
Ricard Wanderlof759c90f2015-10-19 08:52:54 +02001011 unsigned int data_maxsize = maxsize = ep->maxpacksize;
1012
1013 if (tx_length_quirk)
1014 /* Need to remove the length descriptor to calc freq */
1015 data_maxsize -= sizeof(__le32);
1016 ep->freqmax = (data_maxsize / (frame_bits >> 3))
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001017 << (16 - ep->datainterval);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001018 }
1019
1020 if (ep->fill_max)
1021 ep->curpacksize = ep->maxpacksize;
1022 else
1023 ep->curpacksize = maxsize;
1024
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001025 if (snd_usb_get_speed(chip->dev) != USB_SPEED_FULL) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001026 packs_per_ms = 8 >> ep->datainterval;
Alan Stern976b6c02013-09-24 15:51:58 -04001027 max_packs_per_urb = MAX_PACKS_HS;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001028 } else {
Alan Stern976b6c02013-09-24 15:51:58 -04001029 packs_per_ms = 1;
1030 max_packs_per_urb = MAX_PACKS;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001031 }
Takashi Iwai53837b42020-11-23 09:53:39 +01001032 if (ep->sync_source && !ep->implicit_fb_sync)
Alan Stern976b6c02013-09-24 15:51:58 -04001033 max_packs_per_urb = min(max_packs_per_urb,
Takashi Iwai53837b42020-11-23 09:53:39 +01001034 1U << ep->sync_source->syncinterval);
Alan Stern976b6c02013-09-24 15:51:58 -04001035 max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001036
Alan Stern976b6c02013-09-24 15:51:58 -04001037 /*
1038 * Capture endpoints need to use small URBs because there's no way
1039 * to tell in advance where the next period will end, and we don't
1040 * want the next URB to complete much after the period ends.
1041 *
1042 * Playback endpoints with implicit sync much use the same parameters
1043 * as their corresponding capture endpoint.
1044 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001045 if (usb_pipein(ep->pipe) || ep->implicit_fb_sync) {
Alan Stern976b6c02013-09-24 15:51:58 -04001046
Thomas Pugliesea93455e2013-11-26 13:58:15 -06001047 urb_packs = packs_per_ms;
1048 /*
1049 * Wireless devices can poll at a max rate of once per 4ms.
1050 * For dataintervals less than 5, increase the packet count to
1051 * allow the host controller to use bursting to fill in the
1052 * gaps.
1053 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001054 if (snd_usb_get_speed(chip->dev) == USB_SPEED_WIRELESS) {
Thomas Pugliesea93455e2013-11-26 13:58:15 -06001055 int interval = ep->datainterval;
1056 while (interval < 5) {
1057 urb_packs <<= 1;
1058 ++interval;
1059 }
1060 }
Alan Stern976b6c02013-09-24 15:51:58 -04001061 /* make capture URBs <= 1 ms and smaller than a period */
Thomas Pugliesea93455e2013-11-26 13:58:15 -06001062 urb_packs = min(max_packs_per_urb, urb_packs);
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001063 while (urb_packs > 1 && urb_packs * maxsize >= ep->cur_period_bytes)
Alan Stern976b6c02013-09-24 15:51:58 -04001064 urb_packs >>= 1;
1065 ep->nurbs = MAX_URBS;
1066
1067 /*
1068 * Playback endpoints without implicit sync are adjusted so that
1069 * a period fits as evenly as possible in the smallest number of
1070 * URBs. The total number of URBs is adjusted to the size of the
1071 * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits.
1072 */
1073 } else {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001074 /* determine how small a packet can be */
Alan Stern976b6c02013-09-24 15:51:58 -04001075 minsize = (ep->freqn >> (16 - ep->datainterval)) *
1076 (frame_bits >> 3);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001077 /* with sync from device, assume it can be 12% lower */
Takashi Iwai53837b42020-11-23 09:53:39 +01001078 if (ep->sync_source)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001079 minsize -= minsize >> 3;
1080 minsize = max(minsize, 1u);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001081
Alan Stern976b6c02013-09-24 15:51:58 -04001082 /* how many packets will contain an entire ALSA period? */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001083 max_packs_per_period = DIV_ROUND_UP(ep->cur_period_bytes, minsize);
Alan Stern976b6c02013-09-24 15:51:58 -04001084
1085 /* how many URBs will contain a period? */
1086 urbs_per_period = DIV_ROUND_UP(max_packs_per_period,
1087 max_packs_per_urb);
1088 /* how many packets are needed in each URB? */
1089 urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period);
1090
1091 /* limit the number of frames in a single URB */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001092 ep->max_urb_frames = DIV_ROUND_UP(ep->cur_period_frames,
1093 urbs_per_period);
Alan Stern976b6c02013-09-24 15:51:58 -04001094
1095 /* try to use enough URBs to contain an entire ALSA buffer */
1096 max_urbs = min((unsigned) MAX_URBS,
1097 MAX_QUEUE * packs_per_ms / urb_packs);
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001098 ep->nurbs = min(max_urbs, urbs_per_period * ep->cur_buffer_periods);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001099 }
1100
1101 /* allocate and initialize data urbs */
1102 for (i = 0; i < ep->nurbs; i++) {
1103 struct snd_urb_ctx *u = &ep->urb[i];
1104 u->index = i;
1105 u->ep = ep;
Alan Stern976b6c02013-09-24 15:51:58 -04001106 u->packets = urb_packs;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001107 u->buffer_size = maxsize * u->packets;
1108
1109 if (fmt->fmt_type == UAC_FORMAT_TYPE_II)
1110 u->packets++; /* for transfer delimiter */
1111 u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
1112 if (!u->urb)
1113 goto out_of_memory;
1114
1115 u->urb->transfer_buffer =
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001116 usb_alloc_coherent(chip->dev, u->buffer_size,
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001117 GFP_KERNEL, &u->urb->transfer_dma);
1118 if (!u->urb->transfer_buffer)
1119 goto out_of_memory;
1120 u->urb->pipe = ep->pipe;
Clemens Ladischc75c5ab2013-04-27 12:10:32 +02001121 u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001122 u->urb->interval = 1 << ep->datainterval;
1123 u->urb->context = u;
1124 u->urb->complete = snd_complete_urb;
1125 INIT_LIST_HEAD(&u->ready_list);
1126 }
1127
1128 return 0;
1129
1130out_of_memory:
Takashi Iwaid6cda462021-02-06 21:30:50 +01001131 release_urbs(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001132 return -ENOMEM;
1133}
1134
Daniel Mack94c27212012-04-12 13:51:15 +02001135/*
1136 * configure a sync endpoint
1137 */
Eldad Zack93721032013-10-06 22:31:06 +02001138static int sync_ep_set_params(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001139{
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001140 struct snd_usb_audio *chip = ep->chip;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001141 int i;
1142
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001143 usb_audio_dbg(chip, "Setting params for sync EP 0x%x, pipe 0x%x\n",
1144 ep->ep_num, ep->pipe);
1145
1146 ep->syncbuf = usb_alloc_coherent(chip->dev, SYNC_URBS * 4,
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001147 GFP_KERNEL, &ep->sync_dma);
1148 if (!ep->syncbuf)
1149 return -ENOMEM;
1150
1151 for (i = 0; i < SYNC_URBS; i++) {
1152 struct snd_urb_ctx *u = &ep->urb[i];
1153 u->index = i;
1154 u->ep = ep;
1155 u->packets = 1;
1156 u->urb = usb_alloc_urb(1, GFP_KERNEL);
1157 if (!u->urb)
1158 goto out_of_memory;
1159 u->urb->transfer_buffer = ep->syncbuf + i * 4;
1160 u->urb->transfer_dma = ep->sync_dma + i * 4;
1161 u->urb->transfer_buffer_length = 4;
1162 u->urb->pipe = ep->pipe;
Clemens Ladischc75c5ab2013-04-27 12:10:32 +02001163 u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001164 u->urb->number_of_packets = 1;
1165 u->urb->interval = 1 << ep->syncinterval;
1166 u->urb->context = u;
1167 u->urb->complete = snd_complete_urb;
1168 }
1169
1170 ep->nurbs = SYNC_URBS;
1171
1172 return 0;
1173
1174out_of_memory:
Takashi Iwaid6cda462021-02-06 21:30:50 +01001175 release_urbs(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001176 return -ENOMEM;
1177}
1178
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001179/*
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001180 * snd_usb_endpoint_set_params: configure an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +02001181 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001182 * Determine the number of URBs to be used on this endpoint.
Daniel Mack94c27212012-04-12 13:51:15 +02001183 * An endpoint must be configured before it can be started.
1184 * An endpoint that is already running can not be reconfigured.
1185 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001186static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
1187 struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001188{
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001189 const struct audioformat *fmt = ep->cur_audiofmt;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001190 int err;
1191
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001192 /* release old buffers, if any */
Takashi Iwaid6cda462021-02-06 21:30:50 +01001193 err = release_urbs(ep, false);
1194 if (err < 0)
1195 return err;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001196
1197 ep->datainterval = fmt->datainterval;
1198 ep->maxpacksize = fmt->maxpacksize;
Takashi Iwai85f71932012-04-13 12:41:54 +02001199 ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001200
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001201 if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL) {
1202 ep->freqn = get_usb_full_speed_rate(ep->cur_rate);
Alexander Tsoyb9fd2002020-06-29 05:59:34 +03001203 ep->pps = 1000 >> ep->datainterval;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001204 } else {
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001205 ep->freqn = get_usb_high_speed_rate(ep->cur_rate);
Alexander Tsoyb9fd2002020-06-29 05:59:34 +03001206 ep->pps = 8000 >> ep->datainterval;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001207 }
1208
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001209 ep->sample_rem = ep->cur_rate % ep->pps;
1210 ep->packsize[0] = ep->cur_rate / ep->pps;
1211 ep->packsize[1] = (ep->cur_rate + (ep->pps - 1)) / ep->pps;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001212
1213 /* calculate the frequency in 16.16 format */
1214 ep->freqm = ep->freqn;
1215 ep->freqshift = INT_MIN;
1216
1217 ep->phase = 0;
1218
1219 switch (ep->type) {
1220 case SND_USB_ENDPOINT_TYPE_DATA:
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001221 err = data_ep_set_params(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001222 break;
1223 case SND_USB_ENDPOINT_TYPE_SYNC:
Eldad Zack93721032013-10-06 22:31:06 +02001224 err = sync_ep_set_params(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001225 break;
1226 default:
1227 err = -EINVAL;
1228 }
1229
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001230 usb_audio_dbg(chip, "Set up %d URBS, ret=%d\n", ep->nurbs, err);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001231
Takashi Iwai5a6c3e12020-11-23 09:53:16 +01001232 if (err < 0)
1233 return err;
1234
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001235 /* some unit conversions in runtime */
1236 ep->maxframesize = ep->maxpacksize / ep->cur_frame_bytes;
1237 ep->curframesize = ep->curpacksize / ep->cur_frame_bytes;
Takashi Iwai5a6c3e12020-11-23 09:53:16 +01001238
1239 return 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001240}
1241
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001242/*
1243 * snd_usb_endpoint_configure: Configure the endpoint
1244 *
1245 * This function sets up the EP to be fully usable state.
1246 * It's called either from hw_params or prepare callback.
1247 * The function checks need_setup flag, and perfoms nothing unless needed,
1248 * so it's safe to call this multiple times.
1249 *
1250 * This returns zero if unchanged, 1 if the configuration has changed,
1251 * or a negative error code.
1252 */
1253int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
1254 struct snd_usb_endpoint *ep)
1255{
1256 bool iface_first;
1257 int err = 0;
1258
1259 mutex_lock(&chip->mutex);
Takashi Iwai00272c62021-01-08 08:52:17 +01001260 if (WARN_ON(!ep->iface_ref))
1261 goto unlock;
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001262 if (!ep->need_setup)
1263 goto unlock;
1264
Takashi Iwai00272c62021-01-08 08:52:17 +01001265 /* If the interface has been already set up, just set EP parameters */
1266 if (!ep->iface_ref->need_setup) {
Takashi Iwai3784d442021-01-18 08:58:15 +01001267 /* sample rate setup of UAC1 is per endpoint, and we need
1268 * to update at each EP configuration
1269 */
1270 if (ep->cur_audiofmt->protocol == UAC_VERSION_1) {
1271 err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt,
1272 ep->cur_rate);
1273 if (err < 0)
1274 goto unlock;
1275 }
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001276 err = snd_usb_endpoint_set_params(chip, ep);
1277 if (err < 0)
1278 goto unlock;
1279 goto done;
1280 }
1281
1282 /* Need to deselect altsetting at first */
1283 endpoint_set_interface(chip, ep, false);
1284
1285 /* Some UAC1 devices (e.g. Yamaha THR10) need the host interface
1286 * to be set up before parameter setups
1287 */
1288 iface_first = ep->cur_audiofmt->protocol == UAC_VERSION_1;
1289 if (iface_first) {
1290 err = endpoint_set_interface(chip, ep, true);
1291 if (err < 0)
1292 goto unlock;
1293 }
1294
1295 err = snd_usb_init_pitch(chip, ep->cur_audiofmt);
1296 if (err < 0)
1297 goto unlock;
1298
1299 err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt, ep->cur_rate);
1300 if (err < 0)
1301 goto unlock;
1302
1303 err = snd_usb_endpoint_set_params(chip, ep);
1304 if (err < 0)
1305 goto unlock;
1306
1307 err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt);
1308 if (err < 0)
1309 goto unlock;
1310
1311 /* for UAC2/3, enable the interface altset here at last */
1312 if (!iface_first) {
1313 err = endpoint_set_interface(chip, ep, true);
1314 if (err < 0)
1315 goto unlock;
1316 }
1317
Takashi Iwai00272c62021-01-08 08:52:17 +01001318 ep->iface_ref->need_setup = false;
1319
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001320 done:
1321 ep->need_setup = false;
1322 err = 1;
1323
1324unlock:
1325 mutex_unlock(&chip->mutex);
1326 return err;
1327}
1328
Daniel Mack94c27212012-04-12 13:51:15 +02001329/**
1330 * snd_usb_endpoint_start: start an snd_usb_endpoint
1331 *
Ioan-Adrian Ratiu1d0f9532017-01-05 00:37:46 +02001332 * @ep: the endpoint to start
Daniel Mack94c27212012-04-12 13:51:15 +02001333 *
Takashi Iwai43b81e82020-11-23 09:53:34 +01001334 * A call to this function will increment the running count of the endpoint.
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001335 * In case it is not already running, the URBs for this endpoint will be
Daniel Mack94c27212012-04-12 13:51:15 +02001336 * submitted. Otherwise, this function does nothing.
1337 *
1338 * Must be balanced to calls of snd_usb_endpoint_stop().
1339 *
1340 * Returns an error if the URB submission failed, 0 in all other cases.
1341 */
Ioan-Adrian Ratiu1d0f9532017-01-05 00:37:46 +02001342int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001343{
1344 int err;
1345 unsigned int i;
1346
Takashi Iwai47ab1542015-08-25 16:09:00 +02001347 if (atomic_read(&ep->chip->shutdown))
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001348 return -EBADFD;
1349
Takashi Iwai53837b42020-11-23 09:53:39 +01001350 if (ep->sync_source)
1351 WRITE_ONCE(ep->sync_source->sync_sink, ep);
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001352
Takashi Iwai43b81e82020-11-23 09:53:34 +01001353 usb_audio_dbg(ep->chip, "Starting %s EP 0x%x (running %d)\n",
1354 ep_type_name(ep->type), ep->ep_num,
1355 atomic_read(&ep->running));
Takashi Iwai57234bc2020-11-23 09:53:27 +01001356
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001357 /* already running? */
Takashi Iwai43b81e82020-11-23 09:53:34 +01001358 if (atomic_inc_return(&ep->running) != 1)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001359 return 0;
1360
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001361 ep->active_mask = 0;
1362 ep->unlink_mask = 0;
1363 ep->phase = 0;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001364 ep->sample_accum = 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001365
Daniel Mack2b58fd52012-09-04 10:23:07 +02001366 snd_usb_endpoint_start_quirk(ep);
1367
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001368 /*
1369 * If this endpoint has a data endpoint as implicit feedback source,
1370 * don't start the urbs here. Instead, mark them all as available,
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001371 * wait for the record urbs to return and queue the playback urbs
1372 * from that context.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001373 */
1374
Takashi Iwai5c2b3012021-02-06 21:30:51 +01001375 if (!ep_state_update(ep, EP_STATE_STOPPED, EP_STATE_RUNNING))
1376 goto __error;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001377
Takashi Iwaiebe8dc52021-04-14 10:32:55 +02001378 if (snd_usb_endpoint_implicit_feedback_sink(ep) &&
1379 !ep->chip->playback_first) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001380 for (i = 0; i < ep->nurbs; i++) {
1381 struct snd_urb_ctx *ctx = ep->urb + i;
1382 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
1383 }
1384
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001385 usb_audio_dbg(ep->chip, "No URB submission due to implicit fb sync\n");
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001386 return 0;
1387 }
1388
1389 for (i = 0; i < ep->nurbs; i++) {
1390 struct urb *urb = ep->urb[i].urb;
1391
1392 if (snd_BUG_ON(!urb))
1393 goto __error;
1394
1395 if (usb_pipeout(ep->pipe)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001396 prepare_outbound_urb(ep, urb->context);
1397 } else {
1398 prepare_inbound_urb(ep, urb->context);
1399 }
1400
1401 err = usb_submit_urb(urb, GFP_ATOMIC);
1402 if (err < 0) {
Takashi Iwai0ba41d92014-02-26 13:02:17 +01001403 usb_audio_err(ep->chip,
1404 "cannot submit urb %d, error %d: %s\n",
1405 i, err, usb_error_string(err));
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001406 goto __error;
1407 }
1408 set_bit(i, &ep->active_mask);
1409 }
1410
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001411 usb_audio_dbg(ep->chip, "%d URBs submitted for EP 0x%x\n",
1412 ep->nurbs, ep->ep_num);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001413 return 0;
1414
1415__error:
Takashi Iwaid0f09d12020-11-23 09:53:35 +01001416 snd_usb_endpoint_stop(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001417 return -EPIPE;
1418}
1419
Daniel Mack94c27212012-04-12 13:51:15 +02001420/**
1421 * snd_usb_endpoint_stop: stop an snd_usb_endpoint
1422 *
1423 * @ep: the endpoint to stop (may be NULL)
1424 *
Takashi Iwai43b81e82020-11-23 09:53:34 +01001425 * A call to this function will decrement the running count of the endpoint.
Daniel Mack94c27212012-04-12 13:51:15 +02001426 * In case the last user has requested the endpoint stop, the URBs will
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001427 * actually be deactivated.
Daniel Mack94c27212012-04-12 13:51:15 +02001428 *
1429 * Must be balanced to calls of snd_usb_endpoint_start().
Takashi Iwaib2eb9502012-11-21 08:30:48 +01001430 *
1431 * The caller needs to synchronize the pending stop operation via
1432 * snd_usb_endpoint_sync_pending_stop().
Daniel Mack94c27212012-04-12 13:51:15 +02001433 */
Takashi Iwaib2eb9502012-11-21 08:30:48 +01001434void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001435{
1436 if (!ep)
1437 return;
1438
Takashi Iwai43b81e82020-11-23 09:53:34 +01001439 usb_audio_dbg(ep->chip, "Stopping %s EP 0x%x (running %d)\n",
1440 ep_type_name(ep->type), ep->ep_num,
1441 atomic_read(&ep->running));
Takashi Iwai57234bc2020-11-23 09:53:27 +01001442
Takashi Iwai43b81e82020-11-23 09:53:34 +01001443 if (snd_BUG_ON(!atomic_read(&ep->running)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001444 return;
1445
Takashi Iwai53837b42020-11-23 09:53:39 +01001446 if (ep->sync_source)
1447 WRITE_ONCE(ep->sync_source->sync_sink, NULL);
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001448
Takashi Iwaid0f09d12020-11-23 09:53:35 +01001449 if (!atomic_dec_return(&ep->running))
Takashi Iwaid6cda462021-02-06 21:30:50 +01001450 stop_urbs(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001451}
1452
Daniel Mack94c27212012-04-12 13:51:15 +02001453/**
Takashi Iwai92a586b2014-06-25 14:24:47 +02001454 * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
1455 *
1456 * @ep: the endpoint to release
1457 *
Takashi Iwai43b81e82020-11-23 09:53:34 +01001458 * This function does not care for the endpoint's running count but will tear
Takashi Iwai92a586b2014-06-25 14:24:47 +02001459 * down all the streaming URBs immediately.
1460 */
1461void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
1462{
Takashi Iwaid6cda462021-02-06 21:30:50 +01001463 release_urbs(ep, true);
Takashi Iwai92a586b2014-06-25 14:24:47 +02001464}
1465
1466/**
Takashi Iwai00272c62021-01-08 08:52:17 +01001467 * snd_usb_endpoint_free_all: Free the resources of an snd_usb_endpoint
Takashi Iwai036f90d2021-02-05 09:28:37 +01001468 * @chip: The chip
Daniel Mack94c27212012-04-12 13:51:15 +02001469 *
Takashi Iwai00272c62021-01-08 08:52:17 +01001470 * This free all endpoints and those resources
Daniel Mack94c27212012-04-12 13:51:15 +02001471 */
Takashi Iwai00272c62021-01-08 08:52:17 +01001472void snd_usb_endpoint_free_all(struct snd_usb_audio *chip)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001473{
Takashi Iwai00272c62021-01-08 08:52:17 +01001474 struct snd_usb_endpoint *ep, *en;
1475 struct snd_usb_iface_ref *ip, *in;
1476
1477 list_for_each_entry_safe(ep, en, &chip->ep_list, list)
1478 kfree(ep);
1479
1480 list_for_each_entry_safe(ip, in, &chip->iface_ref_list, list)
1481 kfree(ip);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001482}
1483
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001484/*
Daniel Mack94c27212012-04-12 13:51:15 +02001485 * snd_usb_handle_sync_urb: parse an USB sync packet
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001486 *
Daniel Mack94c27212012-04-12 13:51:15 +02001487 * @ep: the endpoint to handle the packet
1488 * @sender: the sending endpoint
1489 * @urb: the received packet
1490 *
1491 * This function is called from the context of an endpoint that received
1492 * the packet and is used to let another endpoint object handle the payload.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001493 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001494static void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
1495 struct snd_usb_endpoint *sender,
1496 const struct urb *urb)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001497{
1498 int shift;
1499 unsigned int f;
1500 unsigned long flags;
1501
1502 snd_BUG_ON(ep == sender);
1503
Daniel Mack94c27212012-04-12 13:51:15 +02001504 /*
1505 * In case the endpoint is operating in implicit feedback mode, prepare
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001506 * a new outbound URB that has the same layout as the received packet
1507 * and add it to the list of pending urbs. queue_pending_output_urbs()
1508 * will take care of them later.
Daniel Mack94c27212012-04-12 13:51:15 +02001509 */
Eldad Zack98ae4722013-04-03 23:18:52 +02001510 if (snd_usb_endpoint_implicit_feedback_sink(ep) &&
Takashi Iwai43b81e82020-11-23 09:53:34 +01001511 atomic_read(&ep->running)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001512
1513 /* implicit feedback case */
1514 int i, bytes = 0;
1515 struct snd_urb_ctx *in_ctx;
1516 struct snd_usb_packet_info *out_packet;
1517
1518 in_ctx = urb->context;
1519
1520 /* Count overall packet size */
1521 for (i = 0; i < in_ctx->packets; i++)
1522 if (urb->iso_frame_desc[i].status == 0)
1523 bytes += urb->iso_frame_desc[i].actual_length;
1524
1525 /*
1526 * skip empty packets. At least M-Audio's Fast Track Ultra stops
1527 * streaming once it received a 0-byte OUT URB
1528 */
1529 if (bytes == 0)
1530 return;
1531
1532 spin_lock_irqsave(&ep->lock, flags);
Takashi Iwaic15871e2020-11-23 09:53:32 +01001533 if (ep->next_packet_queued >= ARRAY_SIZE(ep->next_packet)) {
1534 spin_unlock_irqrestore(&ep->lock, flags);
1535 usb_audio_err(ep->chip,
1536 "next package FIFO overflow EP 0x%x\n",
1537 ep->ep_num);
1538 notify_xrun(ep);
1539 return;
1540 }
1541
1542 out_packet = next_packet_fifo_enqueue(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001543
1544 /*
1545 * Iterate through the inbound packet and prepare the lengths
1546 * for the output packet. The OUT packet we are about to send
Eldad Zack28acb122012-11-28 23:55:34 +01001547 * will have the same amount of payload bytes per stride as the
1548 * IN packet we just received. Since the actual size is scaled
1549 * by the stride, use the sender stride to calculate the length
1550 * in case the number of channels differ between the implicitly
1551 * fed-back endpoint and the synchronizing endpoint.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001552 */
1553
1554 out_packet->packets = in_ctx->packets;
1555 for (i = 0; i < in_ctx->packets; i++) {
1556 if (urb->iso_frame_desc[i].status == 0)
1557 out_packet->packet_size[i] =
Eldad Zack28acb122012-11-28 23:55:34 +01001558 urb->iso_frame_desc[i].actual_length / sender->stride;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001559 else
1560 out_packet->packet_size[i] = 0;
1561 }
1562
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001563 spin_unlock_irqrestore(&ep->lock, flags);
1564 queue_pending_output_urbs(ep);
1565
1566 return;
1567 }
1568
Daniel Mack94c27212012-04-12 13:51:15 +02001569 /*
1570 * process after playback sync complete
1571 *
1572 * Full speed devices report feedback values in 10.14 format as samples
1573 * per frame, high speed devices in 16.16 format as samples per
1574 * microframe.
1575 *
1576 * Because the Audio Class 1 spec was written before USB 2.0, many high
1577 * speed devices use a wrong interpretation, some others use an
1578 * entirely different format.
1579 *
1580 * Therefore, we cannot predict what format any particular device uses
1581 * and must detect it automatically.
1582 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001583
1584 if (urb->iso_frame_desc[0].status != 0 ||
1585 urb->iso_frame_desc[0].actual_length < 3)
1586 return;
1587
1588 f = le32_to_cpup(urb->transfer_buffer);
1589 if (urb->iso_frame_desc[0].actual_length == 3)
1590 f &= 0x00ffffff;
1591 else
1592 f &= 0x0fffffff;
1593
1594 if (f == 0)
1595 return;
1596
Daniel Mackca0dd272016-08-22 08:53:37 +02001597 if (unlikely(sender->tenor_fb_quirk)) {
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001598 /*
Daniel Mackca0dd272016-08-22 08:53:37 +02001599 * Devices based on Tenor 8802 chipsets (TEAC UD-H01
1600 * and others) sometimes change the feedback value
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001601 * by +/- 0x1.0000.
1602 */
1603 if (f < ep->freqn - 0x8000)
Daniel Mack36e1ac32016-08-22 08:53:38 +02001604 f += 0xf000;
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001605 else if (f > ep->freqn + 0x8000)
Daniel Mack36e1ac32016-08-22 08:53:38 +02001606 f -= 0xf000;
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001607 } else if (unlikely(ep->freqshift == INT_MIN)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001608 /*
1609 * The first time we see a feedback value, determine its format
1610 * by shifting it left or right until it matches the nominal
1611 * frequency value. This assumes that the feedback does not
1612 * differ from the nominal value more than +50% or -25%.
1613 */
1614 shift = 0;
1615 while (f < ep->freqn - ep->freqn / 4) {
1616 f <<= 1;
1617 shift++;
1618 }
1619 while (f > ep->freqn + ep->freqn / 2) {
1620 f >>= 1;
1621 shift--;
1622 }
1623 ep->freqshift = shift;
1624 } else if (ep->freqshift >= 0)
1625 f <<= ep->freqshift;
1626 else
1627 f >>= -ep->freqshift;
1628
1629 if (likely(f >= ep->freqn - ep->freqn / 8 && f <= ep->freqmax)) {
1630 /*
1631 * If the frequency looks valid, set it.
1632 * This value is referred to in prepare_playback_urb().
1633 */
1634 spin_lock_irqsave(&ep->lock, flags);
1635 ep->freqm = f;
1636 spin_unlock_irqrestore(&ep->lock, flags);
1637 } else {
1638 /*
1639 * Out of range; maybe the shift value is wrong.
1640 * Reset it so that we autodetect again the next time.
1641 */
1642 ep->freqshift = INT_MIN;
1643 }
1644}
1645