blob: 743b8287cfcddf7a951dcbdd6079acddba498f15 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Daniel Macke5779992010-03-04 19:46:13 +01002/*
Daniel Macke5779992010-03-04 19:46:13 +01003 */
4
Daniel Mackc731bc92011-09-14 12:46:57 +02005#include <linux/gfp.h>
6#include <linux/init.h>
Takashi Iwai80c8a2a2012-01-09 11:37:20 +01007#include <linux/ratelimit.h>
Daniel Mackc731bc92011-09-14 12:46:57 +02008#include <linux/usb.h>
9#include <linux/usb/audio.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020010#include <linux/slab.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020011
12#include <sound/core.h>
13#include <sound/pcm.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020014#include <sound/pcm_params.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020015
16#include "usbaudio.h"
17#include "helper.h"
18#include "card.h"
19#include "endpoint.h"
20#include "pcm.h"
Takashi Iwaibf6313a2020-11-23 09:53:31 +010021#include "clock.h"
Daniel Mack2b58fd52012-09-04 10:23:07 +020022#include "quirks.h"
Daniel Mackc731bc92011-09-14 12:46:57 +020023
Takashi Iwai5c2b3012021-02-06 21:30:51 +010024enum {
25 EP_STATE_STOPPED,
26 EP_STATE_RUNNING,
27 EP_STATE_STOPPING,
28};
Daniel Mack8fdff6a2012-04-12 13:51:11 +020029
Takashi Iwai00272c62021-01-08 08:52:17 +010030/* interface refcounting */
31struct snd_usb_iface_ref {
32 unsigned char iface;
33 bool need_setup;
34 int opened;
35 struct list_head list;
36};
37
Daniel Mackc731bc92011-09-14 12:46:57 +020038/*
Daniel Mack94c27212012-04-12 13:51:15 +020039 * snd_usb_endpoint is a model that abstracts everything related to an
40 * USB endpoint and its streaming.
41 *
42 * There are functions to activate and deactivate the streaming URBs and
Daniel Mack07a5e9d2012-04-24 19:31:24 +020043 * optional callbacks to let the pcm logic handle the actual content of the
Daniel Mack94c27212012-04-12 13:51:15 +020044 * packets for playback and record. Thus, the bus streaming and the audio
45 * handlers are fully decoupled.
46 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020047 * There are two different types of endpoints in audio applications.
Daniel Mack94c27212012-04-12 13:51:15 +020048 *
49 * SND_USB_ENDPOINT_TYPE_DATA handles full audio data payload for both
50 * inbound and outbound traffic.
51 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020052 * SND_USB_ENDPOINT_TYPE_SYNC endpoints are for inbound traffic only and
53 * expect the payload to carry Q10.14 / Q16.16 formatted sync information
54 * (3 or 4 bytes).
Daniel Mack94c27212012-04-12 13:51:15 +020055 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020056 * Each endpoint has to be configured prior to being used by calling
57 * snd_usb_endpoint_set_params().
Daniel Mack94c27212012-04-12 13:51:15 +020058 *
59 * The model incorporates a reference counting, so that multiple users
60 * can call snd_usb_endpoint_start() and snd_usb_endpoint_stop(), and
61 * only the first user will effectively start the URBs, and only the last
Daniel Mack07a5e9d2012-04-24 19:31:24 +020062 * one to stop it will tear the URBs down again.
Daniel Mack94c27212012-04-12 13:51:15 +020063 */
64
65/*
Daniel Mackc731bc92011-09-14 12:46:57 +020066 * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
67 * this will overflow at approx 524 kHz
68 */
69static inline unsigned get_usb_full_speed_rate(unsigned int rate)
70{
71 return ((rate << 13) + 62) / 125;
72}
73
74/*
75 * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
76 * this will overflow at approx 4 MHz
77 */
78static inline unsigned get_usb_high_speed_rate(unsigned int rate)
79{
80 return ((rate << 10) + 62) / 125;
81}
82
83/*
Daniel Mackc731bc92011-09-14 12:46:57 +020084 * release a urb data
85 */
86static void release_urb_ctx(struct snd_urb_ctx *u)
87{
Daniel Mackd399ff92012-04-12 13:51:13 +020088 if (u->buffer_size)
89 usb_free_coherent(u->ep->chip->dev, u->buffer_size,
90 u->urb->transfer_buffer,
91 u->urb->transfer_dma);
92 usb_free_urb(u->urb);
93 u->urb = NULL;
Daniel Mackc731bc92011-09-14 12:46:57 +020094}
95
96static const char *usb_error_string(int err)
97{
98 switch (err) {
99 case -ENODEV:
100 return "no device";
101 case -ENOENT:
102 return "endpoint not enabled";
103 case -EPIPE:
104 return "endpoint stalled";
105 case -ENOSPC:
106 return "not enough bandwidth";
107 case -ESHUTDOWN:
108 return "device disabled";
109 case -EHOSTUNREACH:
110 return "device suspended";
111 case -EINVAL:
112 case -EAGAIN:
113 case -EFBIG:
114 case -EMSGSIZE:
115 return "internal error";
116 default:
117 return "unknown error";
118 }
119}
120
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100121static inline bool ep_state_running(struct snd_usb_endpoint *ep)
122{
123 return atomic_read(&ep->state) == EP_STATE_RUNNING;
124}
125
126static inline bool ep_state_update(struct snd_usb_endpoint *ep, int old, int new)
127{
128 return atomic_cmpxchg(&ep->state, old, new) == old;
129}
130
Daniel Mack94c27212012-04-12 13:51:15 +0200131/**
132 * snd_usb_endpoint_implicit_feedback_sink: Report endpoint usage type
133 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200134 * @ep: The snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200135 *
136 * Determine whether an endpoint is driven by an implicit feedback
137 * data endpoint source.
138 */
Eldad Zack98ae4722013-04-03 23:18:52 +0200139int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200140{
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100141 return ep->implicit_fb_sync && usb_pipeout(ep->pipe);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200142}
143
Daniel Mack94c27212012-04-12 13:51:15 +0200144/*
Takashi Iwai3d587602020-11-23 09:53:37 +0100145 * Return the number of samples to be sent in the next packet
146 * for streaming based on information derived from sync endpoints
Daniel Mack94c27212012-04-12 13:51:15 +0200147 *
Takashi Iwai3d587602020-11-23 09:53:37 +0100148 * This won't be used for implicit feedback which takes the packet size
149 * returned from the sync source
Daniel Mack94c27212012-04-12 13:51:15 +0200150 */
Takashi Iwaid215f632021-09-29 10:08:41 +0200151static int slave_next_packet_size(struct snd_usb_endpoint *ep,
152 unsigned int avail)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200153{
154 unsigned long flags;
Takashi Iwaid215f632021-09-29 10:08:41 +0200155 unsigned int phase;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200156 int ret;
157
158 if (ep->fill_max)
159 return ep->maxframesize;
160
161 spin_lock_irqsave(&ep->lock, flags);
Takashi Iwaid215f632021-09-29 10:08:41 +0200162 phase = (ep->phase & 0xffff) + (ep->freqm << ep->datainterval);
163 ret = min(phase >> 16, ep->maxframesize);
164 if (avail && ret >= avail)
165 ret = -EAGAIN;
166 else
167 ep->phase = phase;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200168 spin_unlock_irqrestore(&ep->lock, flags);
169
170 return ret;
171}
172
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300173/*
Takashi Iwai3d587602020-11-23 09:53:37 +0100174 * Return the number of samples to be sent in the next packet
175 * for adaptive and synchronous endpoints
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300176 */
Takashi Iwaid215f632021-09-29 10:08:41 +0200177static int next_packet_size(struct snd_usb_endpoint *ep, unsigned int avail)
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300178{
Takashi Iwaid215f632021-09-29 10:08:41 +0200179 unsigned int sample_accum;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300180 int ret;
181
182 if (ep->fill_max)
183 return ep->maxframesize;
184
Takashi Iwai23939112021-10-01 12:54:25 +0200185 sample_accum = ep->sample_accum + ep->sample_rem;
Takashi Iwaid215f632021-09-29 10:08:41 +0200186 if (sample_accum >= ep->pps) {
187 sample_accum -= ep->pps;
Alexander Tsoyb9fd2002020-06-29 05:59:34 +0300188 ret = ep->packsize[1];
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300189 } else {
Alexander Tsoyb9fd2002020-06-29 05:59:34 +0300190 ret = ep->packsize[0];
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300191 }
Takashi Iwaid215f632021-09-29 10:08:41 +0200192 if (avail && ret >= avail)
193 ret = -EAGAIN;
194 else
195 ep->sample_accum = sample_accum;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300196
197 return ret;
198}
199
Takashi Iwai3d587602020-11-23 09:53:37 +0100200/*
201 * snd_usb_endpoint_next_packet_size: Return the number of samples to be sent
202 * in the next packet
Takashi Iwaid215f632021-09-29 10:08:41 +0200203 *
204 * If the size is equal or exceeds @avail, don't proceed but return -EAGAIN
205 * Exception: @avail = 0 for skipping the check.
Takashi Iwai3d587602020-11-23 09:53:37 +0100206 */
207int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
Takashi Iwaid215f632021-09-29 10:08:41 +0200208 struct snd_urb_ctx *ctx, int idx,
209 unsigned int avail)
Takashi Iwai3d587602020-11-23 09:53:37 +0100210{
Takashi Iwaid215f632021-09-29 10:08:41 +0200211 unsigned int packet;
212
213 packet = ctx->packet_size[idx];
214 if (packet) {
215 if (avail && packet >= avail)
216 return -EAGAIN;
217 return packet;
218 }
219
220 if (ep->sync_source)
221 return slave_next_packet_size(ep, avail);
Takashi Iwai3d587602020-11-23 09:53:37 +0100222 else
Takashi Iwaid215f632021-09-29 10:08:41 +0200223 return next_packet_size(ep, avail);
Takashi Iwai3d587602020-11-23 09:53:37 +0100224}
225
Takashi Iwai96e221f2020-11-23 09:53:28 +0100226static void call_retire_callback(struct snd_usb_endpoint *ep,
227 struct urb *urb)
228{
229 struct snd_usb_substream *data_subs;
230
231 data_subs = READ_ONCE(ep->data_subs);
232 if (data_subs && ep->retire_data_urb)
233 ep->retire_data_urb(data_subs, urb);
234}
235
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200236static void retire_outbound_urb(struct snd_usb_endpoint *ep,
237 struct snd_urb_ctx *urb_ctx)
238{
Takashi Iwai96e221f2020-11-23 09:53:28 +0100239 call_retire_callback(ep, urb_ctx->urb);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200240}
241
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100242static void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
243 struct snd_usb_endpoint *sender,
244 const struct urb *urb);
245
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200246static void retire_inbound_urb(struct snd_usb_endpoint *ep,
247 struct snd_urb_ctx *urb_ctx)
248{
249 struct urb *urb = urb_ctx->urb;
Takashi Iwai53837b42020-11-23 09:53:39 +0100250 struct snd_usb_endpoint *sync_sink;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200251
Daniel Mack2b58fd52012-09-04 10:23:07 +0200252 if (unlikely(ep->skip_packets > 0)) {
253 ep->skip_packets--;
254 return;
255 }
256
Takashi Iwai53837b42020-11-23 09:53:39 +0100257 sync_sink = READ_ONCE(ep->sync_sink);
258 if (sync_sink)
259 snd_usb_handle_sync_urb(sync_sink, ep, urb);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200260
Takashi Iwai96e221f2020-11-23 09:53:28 +0100261 call_retire_callback(ep, urb);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200262}
263
Takashi Iwaic1b034a2021-07-29 09:38:50 +0200264static inline bool has_tx_length_quirk(struct snd_usb_audio *chip)
265{
266 return chip->quirk_flags & QUIRK_FLAG_TX_LENGTH;
267}
268
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200269static void prepare_silent_urb(struct snd_usb_endpoint *ep,
270 struct snd_urb_ctx *ctx)
271{
272 struct urb *urb = ctx->urb;
273 unsigned int offs = 0;
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200274 unsigned int extra = 0;
275 __le32 packet_length;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200276 int i;
277
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200278 /* For tx_length_quirk, put packet length at start of packet */
Takashi Iwaic1b034a2021-07-29 09:38:50 +0200279 if (has_tx_length_quirk(ep->chip))
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200280 extra = sizeof(packet_length);
281
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200282 for (i = 0; i < ctx->packets; ++i) {
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200283 unsigned int offset;
284 unsigned int length;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200285 int counts;
286
Takashi Iwaid215f632021-09-29 10:08:41 +0200287 counts = snd_usb_endpoint_next_packet_size(ep, ctx, i, 0);
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200288 length = counts * ep->stride; /* number of silent bytes */
289 offset = offs * ep->stride + extra * i;
290 urb->iso_frame_desc[i].offset = offset;
291 urb->iso_frame_desc[i].length = length + extra;
292 if (extra) {
293 packet_length = cpu_to_le32(length);
294 memcpy(urb->transfer_buffer + offset,
295 &packet_length, sizeof(packet_length));
296 }
297 memset(urb->transfer_buffer + offset + extra,
298 ep->silence_value, length);
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200299 offs += counts;
300 }
301
302 urb->number_of_packets = ctx->packets;
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200303 urb->transfer_buffer_length = offs * ep->stride + ctx->packets * extra;
Takashi Iwaie8a8f092021-06-01 18:24:55 +0200304 ctx->queued = 0;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200305}
306
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200307/*
308 * Prepare a PLAYBACK urb for submission to the bus.
309 */
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200310static int prepare_outbound_urb(struct snd_usb_endpoint *ep,
311 struct snd_urb_ctx *ctx,
312 bool in_stream_lock)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200313{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200314 struct urb *urb = ctx->urb;
315 unsigned char *cp = urb->transfer_buffer;
Takashi Iwai96e221f2020-11-23 09:53:28 +0100316 struct snd_usb_substream *data_subs;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200317
318 urb->dev = ep->chip->dev; /* we need to set this at each time */
319
320 switch (ep->type) {
321 case SND_USB_ENDPOINT_TYPE_DATA:
Takashi Iwai96e221f2020-11-23 09:53:28 +0100322 data_subs = READ_ONCE(ep->data_subs);
323 if (data_subs && ep->prepare_data_urb)
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200324 return ep->prepare_data_urb(data_subs, urb, in_stream_lock);
325 /* no data provider, so send silence */
326 prepare_silent_urb(ep, ctx);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200327 break;
328
329 case SND_USB_ENDPOINT_TYPE_SYNC:
330 if (snd_usb_get_speed(ep->chip->dev) >= USB_SPEED_HIGH) {
331 /*
332 * fill the length and offset of each urb descriptor.
333 * the fixed 12.13 frequency is passed as 16.16 through the pipe.
334 */
335 urb->iso_frame_desc[0].length = 4;
336 urb->iso_frame_desc[0].offset = 0;
337 cp[0] = ep->freqn;
338 cp[1] = ep->freqn >> 8;
339 cp[2] = ep->freqn >> 16;
340 cp[3] = ep->freqn >> 24;
341 } else {
342 /*
343 * fill the length and offset of each urb descriptor.
344 * the fixed 10.14 frequency is passed through the pipe.
345 */
346 urb->iso_frame_desc[0].length = 3;
347 urb->iso_frame_desc[0].offset = 0;
348 cp[0] = ep->freqn >> 2;
349 cp[1] = ep->freqn >> 10;
350 cp[2] = ep->freqn >> 18;
351 }
352
353 break;
354 }
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200355 return 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200356}
357
358/*
359 * Prepare a CAPTURE or SYNC urb for submission to the bus.
360 */
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200361static int prepare_inbound_urb(struct snd_usb_endpoint *ep,
362 struct snd_urb_ctx *urb_ctx)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200363{
364 int i, offs;
365 struct urb *urb = urb_ctx->urb;
366
367 urb->dev = ep->chip->dev; /* we need to set this at each time */
368
369 switch (ep->type) {
370 case SND_USB_ENDPOINT_TYPE_DATA:
371 offs = 0;
372 for (i = 0; i < urb_ctx->packets; i++) {
373 urb->iso_frame_desc[i].offset = offs;
374 urb->iso_frame_desc[i].length = ep->curpacksize;
375 offs += ep->curpacksize;
376 }
377
378 urb->transfer_buffer_length = offs;
379 urb->number_of_packets = urb_ctx->packets;
380 break;
381
382 case SND_USB_ENDPOINT_TYPE_SYNC:
383 urb->iso_frame_desc[0].length = min(4u, ep->syncmaxsize);
384 urb->iso_frame_desc[0].offset = 0;
385 break;
386 }
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200387 return 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200388}
389
Takashi Iwaic15871e2020-11-23 09:53:32 +0100390/* notify an error as XRUN to the assigned PCM data substream */
391static void notify_xrun(struct snd_usb_endpoint *ep)
392{
393 struct snd_usb_substream *data_subs;
394
395 data_subs = READ_ONCE(ep->data_subs);
396 if (data_subs && data_subs->pcm_substream)
397 snd_pcm_stop_xrun(data_subs->pcm_substream);
398}
399
400static struct snd_usb_packet_info *
401next_packet_fifo_enqueue(struct snd_usb_endpoint *ep)
402{
403 struct snd_usb_packet_info *p;
404
405 p = ep->next_packet + (ep->next_packet_head + ep->next_packet_queued) %
406 ARRAY_SIZE(ep->next_packet);
407 ep->next_packet_queued++;
408 return p;
409}
410
411static struct snd_usb_packet_info *
412next_packet_fifo_dequeue(struct snd_usb_endpoint *ep)
413{
414 struct snd_usb_packet_info *p;
415
416 p = ep->next_packet + ep->next_packet_head;
417 ep->next_packet_head++;
418 ep->next_packet_head %= ARRAY_SIZE(ep->next_packet);
419 ep->next_packet_queued--;
420 return p;
421}
422
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200423static void push_back_to_ready_list(struct snd_usb_endpoint *ep,
424 struct snd_urb_ctx *ctx)
425{
426 unsigned long flags;
427
428 spin_lock_irqsave(&ep->lock, flags);
429 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
430 spin_unlock_irqrestore(&ep->lock, flags);
431}
432
Daniel Mack94c27212012-04-12 13:51:15 +0200433/*
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200434 * Send output urbs that have been prepared previously. URBs are dequeued
Randy Dunlap0569b3d2020-10-05 12:12:44 -0700435 * from ep->ready_playback_urbs and in case there aren't any available
Daniel Mack94c27212012-04-12 13:51:15 +0200436 * or there are no packets that have been prepared, this function does
437 * nothing.
438 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200439 * The reason why the functionality of sending and preparing URBs is separated
440 * is that host controllers don't guarantee the order in which they return
441 * inbound and outbound packets to their submitters.
Daniel Mack94c27212012-04-12 13:51:15 +0200442 *
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200443 * This function is used both for implicit feedback endpoints and in low-
444 * latency playback mode.
Daniel Mack94c27212012-04-12 13:51:15 +0200445 */
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200446void snd_usb_queue_pending_output_urbs(struct snd_usb_endpoint *ep,
447 bool in_stream_lock)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200448{
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200449 bool implicit_fb = snd_usb_endpoint_implicit_feedback_sink(ep);
450
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100451 while (ep_state_running(ep)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200452
453 unsigned long flags;
Kees Cook3f649ab2020-06-03 13:09:38 -0700454 struct snd_usb_packet_info *packet;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200455 struct snd_urb_ctx *ctx = NULL;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200456 int err, i;
457
458 spin_lock_irqsave(&ep->lock, flags);
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200459 if ((!implicit_fb || ep->next_packet_queued > 0) &&
Takashi Iwaic15871e2020-11-23 09:53:32 +0100460 !list_empty(&ep->ready_playback_urbs)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200461 /* take URB out of FIFO */
Takashi Iwaic15871e2020-11-23 09:53:32 +0100462 ctx = list_first_entry(&ep->ready_playback_urbs,
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200463 struct snd_urb_ctx, ready_list);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100464 list_del_init(&ctx->ready_list);
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200465 if (implicit_fb)
466 packet = next_packet_fifo_dequeue(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200467 }
468 spin_unlock_irqrestore(&ep->lock, flags);
469
470 if (ctx == NULL)
471 return;
472
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200473 /* copy over the length information */
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200474 if (implicit_fb) {
475 for (i = 0; i < packet->packets; i++)
476 ctx->packet_size[i] = packet->packet_size[i];
477 }
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200478
Daniel Mack94c27212012-04-12 13:51:15 +0200479 /* call the data handler to fill in playback data */
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200480 err = prepare_outbound_urb(ep, ctx, in_stream_lock);
481 /* can be stopped during prepare callback */
482 if (unlikely(!ep_state_running(ep)))
483 break;
484 if (err < 0) {
485 /* push back to ready list again for -EAGAIN */
486 if (err == -EAGAIN)
487 push_back_to_ready_list(ep, ctx);
488 else
489 notify_xrun(ep);
490 return;
491 }
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200492
493 err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100494 if (err < 0) {
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100495 usb_audio_err(ep->chip,
Takashi Iwaie93e8902020-11-23 09:53:13 +0100496 "Unable to submit urb #%d: %d at %s\n",
497 ctx->index, err, __func__);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100498 notify_xrun(ep);
499 return;
500 }
501
502 set_bit(ctx->index, &ep->active_mask);
Takashi Iwai86a42ad2021-09-29 10:08:37 +0200503 atomic_inc(&ep->submitted_urbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200504 }
505}
506
507/*
508 * complete callback for urbs
509 */
510static void snd_complete_urb(struct urb *urb)
511{
512 struct snd_urb_ctx *ctx = urb->context;
513 struct snd_usb_endpoint *ep = ctx->ep;
514 int err;
515
516 if (unlikely(urb->status == -ENOENT || /* unlinked */
517 urb->status == -ENODEV || /* device removed */
518 urb->status == -ECONNRESET || /* unlinked */
Takashi Iwai47ab1542015-08-25 16:09:00 +0200519 urb->status == -ESHUTDOWN)) /* device disabled */
520 goto exit_clear;
521 /* device disconnected */
522 if (unlikely(atomic_read(&ep->chip->shutdown)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200523 goto exit_clear;
524
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100525 if (unlikely(!ep_state_running(ep)))
Ioan-Adrian Ratiu13a6c832017-01-05 00:37:47 +0200526 goto exit_clear;
527
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200528 if (usb_pipeout(ep->pipe)) {
529 retire_outbound_urb(ep, ctx);
530 /* can be stopped during retire callback */
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100531 if (unlikely(!ep_state_running(ep)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200532 goto exit_clear;
533
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200534 /* in low-latency and implicit-feedback modes, push back the
535 * URB to ready list at first, then process as much as possible
536 */
537 if (ep->lowlatency_playback ||
538 snd_usb_endpoint_implicit_feedback_sink(ep)) {
539 push_back_to_ready_list(ep, ctx);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100540 clear_bit(ctx->index, &ep->active_mask);
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200541 snd_usb_queue_pending_output_urbs(ep, false);
Takashi Iwai86a42ad2021-09-29 10:08:37 +0200542 atomic_dec(&ep->submitted_urbs); /* decrement at last */
Takashi Iwaic15871e2020-11-23 09:53:32 +0100543 return;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200544 }
545
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200546 /* in non-lowlatency mode, no error handling for prepare */
547 prepare_outbound_urb(ep, ctx, false);
Henry Lin52869932019-11-13 10:14:19 +0800548 /* can be stopped during prepare callback */
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100549 if (unlikely(!ep_state_running(ep)))
Henry Lin52869932019-11-13 10:14:19 +0800550 goto exit_clear;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200551 } else {
552 retire_inbound_urb(ep, ctx);
553 /* can be stopped during retire callback */
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100554 if (unlikely(!ep_state_running(ep)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200555 goto exit_clear;
556
557 prepare_inbound_urb(ep, ctx);
558 }
559
560 err = usb_submit_urb(urb, GFP_ATOMIC);
561 if (err == 0)
562 return;
563
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100564 usb_audio_err(ep->chip, "cannot submit urb (err = %d)\n", err);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100565 notify_xrun(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200566
567exit_clear:
568 clear_bit(ctx->index, &ep->active_mask);
Takashi Iwai86a42ad2021-09-29 10:08:37 +0200569 atomic_dec(&ep->submitted_urbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200570}
571
Takashi Iwaic7474d02020-11-23 09:53:11 +0100572/*
Takashi Iwai00272c62021-01-08 08:52:17 +0100573 * Find or create a refcount object for the given interface
574 *
575 * The objects are released altogether in snd_usb_endpoint_free_all()
576 */
577static struct snd_usb_iface_ref *
578iface_ref_find(struct snd_usb_audio *chip, int iface)
579{
580 struct snd_usb_iface_ref *ip;
581
582 list_for_each_entry(ip, &chip->iface_ref_list, list)
583 if (ip->iface == iface)
584 return ip;
585
586 ip = kzalloc(sizeof(*ip), GFP_KERNEL);
587 if (!ip)
588 return NULL;
589 ip->iface = iface;
590 list_add_tail(&ip->list, &chip->iface_ref_list);
591 return ip;
592}
593
594/*
Takashi Iwai54cb3192020-11-23 09:53:20 +0100595 * Get the existing endpoint object corresponding EP
Takashi Iwaic7474d02020-11-23 09:53:11 +0100596 * Returns NULL if not present.
Takashi Iwaic7474d02020-11-23 09:53:11 +0100597 */
598struct snd_usb_endpoint *
Takashi Iwai54cb3192020-11-23 09:53:20 +0100599snd_usb_get_endpoint(struct snd_usb_audio *chip, int ep_num)
Takashi Iwaic7474d02020-11-23 09:53:11 +0100600{
601 struct snd_usb_endpoint *ep;
602
603 list_for_each_entry(ep, &chip->ep_list, list) {
Takashi Iwai54cb3192020-11-23 09:53:20 +0100604 if (ep->ep_num == ep_num)
Takashi Iwaic7474d02020-11-23 09:53:11 +0100605 return ep;
606 }
Takashi Iwai54cb3192020-11-23 09:53:20 +0100607
Takashi Iwaic7474d02020-11-23 09:53:11 +0100608 return NULL;
609}
610
Takashi Iwai5a6c3e12020-11-23 09:53:16 +0100611#define ep_type_name(type) \
612 (type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync")
613
Daniel Mack94c27212012-04-12 13:51:15 +0200614/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200615 * snd_usb_add_endpoint: Add an endpoint to an USB audio chip
Daniel Mack94c27212012-04-12 13:51:15 +0200616 *
617 * @chip: The chip
Daniel Mack94c27212012-04-12 13:51:15 +0200618 * @ep_num: The number of the endpoint to use
Daniel Mack94c27212012-04-12 13:51:15 +0200619 * @type: SND_USB_ENDPOINT_TYPE_DATA or SND_USB_ENDPOINT_TYPE_SYNC
620 *
621 * If the requested endpoint has not been added to the given chip before,
Takashi Iwai54cb3192020-11-23 09:53:20 +0100622 * a new instance is created.
623 *
624 * Returns zero on success or a negative error code.
Daniel Mack94c27212012-04-12 13:51:15 +0200625 *
Takashi Iwai00272c62021-01-08 08:52:17 +0100626 * New endpoints will be added to chip->ep_list and freed by
627 * calling snd_usb_endpoint_free_all().
Takashi Iwai447d6272016-03-15 15:20:58 +0100628 *
629 * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
630 * bNumEndpoints > 1 beforehand.
Daniel Mack94c27212012-04-12 13:51:15 +0200631 */
Takashi Iwai54cb3192020-11-23 09:53:20 +0100632int snd_usb_add_endpoint(struct snd_usb_audio *chip, int ep_num, int type)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200633{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200634 struct snd_usb_endpoint *ep;
Takashi Iwai54cb3192020-11-23 09:53:20 +0100635 bool is_playback;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200636
Takashi Iwai54cb3192020-11-23 09:53:20 +0100637 ep = snd_usb_get_endpoint(chip, ep_num);
638 if (ep)
639 return 0;
Eldad Zacke7e58df2013-08-03 10:51:15 +0200640
Takashi Iwai54cb3192020-11-23 09:53:20 +0100641 usb_audio_dbg(chip, "Creating new %s endpoint #%x\n",
Takashi Iwai5a6c3e12020-11-23 09:53:16 +0100642 ep_type_name(type),
643 ep_num);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200644 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
645 if (!ep)
Takashi Iwai54cb3192020-11-23 09:53:20 +0100646 return -ENOMEM;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200647
648 ep->chip = chip;
649 spin_lock_init(&ep->lock);
650 ep->type = type;
651 ep->ep_num = ep_num;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200652 INIT_LIST_HEAD(&ep->ready_playback_urbs);
Takashi Iwai86a42ad2021-09-29 10:08:37 +0200653 atomic_set(&ep->submitted_urbs, 0);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200654
Takashi Iwai54cb3192020-11-23 09:53:20 +0100655 is_playback = ((ep_num & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
656 ep_num &= USB_ENDPOINT_NUMBER_MASK;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200657 if (is_playback)
658 ep->pipe = usb_sndisocpipe(chip->dev, ep_num);
659 else
660 ep->pipe = usb_rcvisocpipe(chip->dev, ep_num);
661
Takashi Iwai54cb3192020-11-23 09:53:20 +0100662 list_add_tail(&ep->list, &chip->ep_list);
663 return 0;
664}
665
666/* Set up syncinterval and maxsyncsize for a sync EP */
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100667static void endpoint_set_syncinterval(struct snd_usb_audio *chip,
668 struct snd_usb_endpoint *ep)
Takashi Iwai54cb3192020-11-23 09:53:20 +0100669{
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100670 struct usb_host_interface *alts;
671 struct usb_endpoint_descriptor *desc;
Takashi Iwai54cb3192020-11-23 09:53:20 +0100672
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100673 alts = snd_usb_get_host_interface(chip, ep->iface, ep->altsetting);
674 if (!alts)
675 return;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200676
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100677 desc = get_endpoint(alts, ep->ep_idx);
678 if (desc->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
679 desc->bRefresh >= 1 && desc->bRefresh <= 9)
680 ep->syncinterval = desc->bRefresh;
681 else if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL)
682 ep->syncinterval = 1;
683 else if (desc->bInterval >= 1 && desc->bInterval <= 16)
684 ep->syncinterval = desc->bInterval - 1;
685 else
686 ep->syncinterval = 3;
687
688 ep->syncmaxsize = le16_to_cpu(desc->wMaxPacketSize);
689}
690
691static bool endpoint_compatible(struct snd_usb_endpoint *ep,
692 const struct audioformat *fp,
693 const struct snd_pcm_hw_params *params)
694{
695 if (!ep->opened)
696 return false;
697 if (ep->cur_audiofmt != fp)
698 return false;
699 if (ep->cur_rate != params_rate(params) ||
700 ep->cur_format != params_format(params) ||
701 ep->cur_period_frames != params_period_size(params) ||
702 ep->cur_buffer_periods != params_periods(params))
703 return false;
704 return true;
705}
706
707/*
gushengxianff630b62021-07-05 05:00:52 -0700708 * Check whether the given fp and hw params are compatible with the current
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100709 * setup of the target EP for implicit feedback sync
710 */
711bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
712 struct snd_usb_endpoint *ep,
713 const struct audioformat *fp,
714 const struct snd_pcm_hw_params *params)
715{
716 bool ret;
717
718 mutex_lock(&chip->mutex);
719 ret = endpoint_compatible(ep, fp, params);
720 mutex_unlock(&chip->mutex);
721 return ret;
722}
723
724/*
725 * snd_usb_endpoint_open: Open the endpoint
726 *
727 * Called from hw_params to assign the endpoint to the substream.
728 * It's reference-counted, and only the first opener is allowed to set up
729 * arbitrary parameters. The later opener must be compatible with the
730 * former opened parameters.
731 * The endpoint needs to be closed via snd_usb_endpoint_close() later.
732 *
733 * Note that this function doesn't configure the endpoint. The substream
734 * needs to set it up later via snd_usb_endpoint_configure().
735 */
736struct snd_usb_endpoint *
737snd_usb_endpoint_open(struct snd_usb_audio *chip,
Takashi Iwaicab941b2020-11-23 09:53:33 +0100738 const struct audioformat *fp,
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100739 const struct snd_pcm_hw_params *params,
740 bool is_sync_ep)
741{
742 struct snd_usb_endpoint *ep;
743 int ep_num = is_sync_ep ? fp->sync_ep : fp->endpoint;
744
745 mutex_lock(&chip->mutex);
746 ep = snd_usb_get_endpoint(chip, ep_num);
747 if (!ep) {
748 usb_audio_err(chip, "Cannot find EP 0x%x to open\n", ep_num);
749 goto unlock;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200750 }
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100751
752 if (!ep->opened) {
753 if (is_sync_ep) {
754 ep->iface = fp->sync_iface;
755 ep->altsetting = fp->sync_altsetting;
756 ep->ep_idx = fp->sync_ep_idx;
757 } else {
758 ep->iface = fp->iface;
759 ep->altsetting = fp->altsetting;
Takashi Iwaieae4d052021-01-08 08:52:18 +0100760 ep->ep_idx = fp->ep_idx;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100761 }
762 usb_audio_dbg(chip, "Open EP 0x%x, iface=%d:%d, idx=%d\n",
763 ep_num, ep->iface, ep->altsetting, ep->ep_idx);
764
Takashi Iwai00272c62021-01-08 08:52:17 +0100765 ep->iface_ref = iface_ref_find(chip, ep->iface);
766 if (!ep->iface_ref) {
767 ep = NULL;
768 goto unlock;
769 }
770
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100771 ep->cur_audiofmt = fp;
772 ep->cur_channels = fp->channels;
773 ep->cur_rate = params_rate(params);
774 ep->cur_format = params_format(params);
775 ep->cur_frame_bytes = snd_pcm_format_physical_width(ep->cur_format) *
776 ep->cur_channels / 8;
777 ep->cur_period_frames = params_period_size(params);
778 ep->cur_period_bytes = ep->cur_period_frames * ep->cur_frame_bytes;
779 ep->cur_buffer_periods = params_periods(params);
Takashi Iwai4e7cf1f2021-09-29 10:08:36 +0200780 ep->cur_clock = fp->clock;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100781
782 if (ep->type == SND_USB_ENDPOINT_TYPE_SYNC)
783 endpoint_set_syncinterval(chip, ep);
784
785 ep->implicit_fb_sync = fp->implicit_fb;
786 ep->need_setup = true;
787
788 usb_audio_dbg(chip, " channels=%d, rate=%d, format=%s, period_bytes=%d, periods=%d, implicit_fb=%d\n",
789 ep->cur_channels, ep->cur_rate,
790 snd_pcm_format_name(ep->cur_format),
791 ep->cur_period_bytes, ep->cur_buffer_periods,
792 ep->implicit_fb_sync);
793
794 } else {
Takashi Iwai00272c62021-01-08 08:52:17 +0100795 if (WARN_ON(!ep->iface_ref)) {
796 ep = NULL;
797 goto unlock;
798 }
799
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100800 if (!endpoint_compatible(ep, fp, params)) {
801 usb_audio_err(chip, "Incompatible EP setup for 0x%x\n",
802 ep_num);
803 ep = NULL;
804 goto unlock;
805 }
806
807 usb_audio_dbg(chip, "Reopened EP 0x%x (count %d)\n",
808 ep_num, ep->opened);
809 }
810
Takashi Iwai00272c62021-01-08 08:52:17 +0100811 if (!ep->iface_ref->opened++)
812 ep->iface_ref->need_setup = true;
813
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100814 ep->opened++;
815
816 unlock:
817 mutex_unlock(&chip->mutex);
818 return ep;
819}
820
821/*
822 * snd_usb_endpoint_set_sync: Link data and sync endpoints
823 *
824 * Pass NULL to sync_ep to unlink again
825 */
826void snd_usb_endpoint_set_sync(struct snd_usb_audio *chip,
827 struct snd_usb_endpoint *data_ep,
828 struct snd_usb_endpoint *sync_ep)
829{
Takashi Iwai53837b42020-11-23 09:53:39 +0100830 data_ep->sync_source = sync_ep;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200831}
832
833/*
Takashi Iwai96e221f2020-11-23 09:53:28 +0100834 * Set data endpoint callbacks and the assigned data stream
835 *
836 * Called at PCM trigger and cleanups.
837 * Pass NULL to deactivate each callback.
838 */
839void snd_usb_endpoint_set_callback(struct snd_usb_endpoint *ep,
Takashi Iwaid5f871f2021-09-29 10:08:43 +0200840 int (*prepare)(struct snd_usb_substream *subs,
841 struct urb *urb,
842 bool in_stream_lock),
Takashi Iwai96e221f2020-11-23 09:53:28 +0100843 void (*retire)(struct snd_usb_substream *subs,
844 struct urb *urb),
845 struct snd_usb_substream *data_subs)
846{
847 ep->prepare_data_urb = prepare;
848 ep->retire_data_urb = retire;
Takashi Iwai9c9a3b92021-09-29 10:08:38 +0200849 if (data_subs)
850 ep->lowlatency_playback = data_subs->lowlatency_playback;
851 else
852 ep->lowlatency_playback = false;
Takashi Iwai96e221f2020-11-23 09:53:28 +0100853 WRITE_ONCE(ep->data_subs, data_subs);
854}
855
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100856static int endpoint_set_interface(struct snd_usb_audio *chip,
857 struct snd_usb_endpoint *ep,
858 bool set)
859{
860 int altset = set ? ep->altsetting : 0;
861 int err;
862
863 usb_audio_dbg(chip, "Setting usb interface %d:%d for EP 0x%x\n",
864 ep->iface, altset, ep->ep_num);
865 err = usb_set_interface(chip->dev, ep->iface, altset);
866 if (err < 0) {
867 usb_audio_err(chip, "%d:%d: usb_set_interface failed (%d)\n",
868 ep->iface, altset, err);
869 return err;
870 }
871
Takashi Iwai1f074fe2021-07-29 09:38:55 +0200872 if (chip->quirk_flags & QUIRK_FLAG_IFACE_DELAY)
873 msleep(50);
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100874 return 0;
875}
876
877/*
878 * snd_usb_endpoint_close: Close the endpoint
879 *
880 * Unreference the already opened endpoint via snd_usb_endpoint_open().
881 */
882void snd_usb_endpoint_close(struct snd_usb_audio *chip,
883 struct snd_usb_endpoint *ep)
884{
885 mutex_lock(&chip->mutex);
886 usb_audio_dbg(chip, "Closing EP 0x%x (count %d)\n",
887 ep->ep_num, ep->opened);
Takashi Iwai00272c62021-01-08 08:52:17 +0100888
889 if (!--ep->iface_ref->opened)
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100890 endpoint_set_interface(chip, ep, false);
Takashi Iwai00272c62021-01-08 08:52:17 +0100891
892 if (!--ep->opened) {
Takashi Iwai89fa3f62020-11-23 09:53:40 +0100893 ep->iface = 0;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100894 ep->altsetting = 0;
895 ep->cur_audiofmt = NULL;
896 ep->cur_rate = 0;
Takashi Iwai4e7cf1f2021-09-29 10:08:36 +0200897 ep->cur_clock = 0;
Takashi Iwai00272c62021-01-08 08:52:17 +0100898 ep->iface_ref = NULL;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100899 usb_audio_dbg(chip, "EP 0x%x closed\n", ep->ep_num);
900 }
901 mutex_unlock(&chip->mutex);
902}
903
904/* Prepare for suspening EP, called from the main suspend handler */
905void snd_usb_endpoint_suspend(struct snd_usb_endpoint *ep)
906{
907 ep->need_setup = true;
Takashi Iwai00272c62021-01-08 08:52:17 +0100908 if (ep->iface_ref)
909 ep->iface_ref->need_setup = true;
Takashi Iwaibf6313a2020-11-23 09:53:31 +0100910}
911
Takashi Iwai96e221f2020-11-23 09:53:28 +0100912/*
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200913 * wait until all urbs are processed.
914 */
915static int wait_clear_urbs(struct snd_usb_endpoint *ep)
916{
917 unsigned long end_time = jiffies + msecs_to_jiffies(1000);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200918 int alive;
919
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100920 if (atomic_read(&ep->state) != EP_STATE_STOPPING)
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100921 return 0;
922
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200923 do {
Takashi Iwai86a42ad2021-09-29 10:08:37 +0200924 alive = atomic_read(&ep->submitted_urbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200925 if (!alive)
926 break;
927
928 schedule_timeout_uninterruptible(1);
929 } while (time_before(jiffies, end_time));
930
931 if (alive)
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100932 usb_audio_err(ep->chip,
933 "timeout: still %d active urbs on EP #%x\n",
934 alive, ep->ep_num);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200935
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100936 if (ep_state_update(ep, EP_STATE_STOPPING, EP_STATE_STOPPED)) {
937 ep->sync_sink = NULL;
938 snd_usb_endpoint_set_callback(ep, NULL, NULL, NULL);
939 }
Ioan-Adrian Ratiu1d0f9532017-01-05 00:37:46 +0200940
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200941 return 0;
942}
943
Takashi Iwaif58161b2012-11-08 08:52:45 +0100944/* sync the pending stop operation;
945 * this function itself doesn't trigger the stop operation
946 */
947void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep)
948{
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100949 if (ep)
Takashi Iwaif58161b2012-11-08 08:52:45 +0100950 wait_clear_urbs(ep);
951}
952
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200953/*
Takashi Iwaid6cda462021-02-06 21:30:50 +0100954 * Stop active urbs
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100955 *
Takashi Iwaid6cda462021-02-06 21:30:50 +0100956 * This function moves the EP to STOPPING state if it's being RUNNING.
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200957 */
Takashi Iwai813a17c2021-09-29 10:08:44 +0200958static int stop_urbs(struct snd_usb_endpoint *ep, bool force, bool keep_pending)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200959{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200960 unsigned int i;
Takashi Iwai0ef74362021-09-29 10:08:42 +0200961 unsigned long flags;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200962
Takashi Iwaid6cda462021-02-06 21:30:50 +0100963 if (!force && atomic_read(&ep->running))
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100964 return -EBUSY;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200965
Takashi Iwai5c2b3012021-02-06 21:30:51 +0100966 if (!ep_state_update(ep, EP_STATE_RUNNING, EP_STATE_STOPPING))
Takashi Iwaid6cda462021-02-06 21:30:50 +0100967 return 0;
Takashi Iwaid0f09d12020-11-23 09:53:35 +0100968
Takashi Iwai0ef74362021-09-29 10:08:42 +0200969 spin_lock_irqsave(&ep->lock, flags);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200970 INIT_LIST_HEAD(&ep->ready_playback_urbs);
Takashi Iwaic15871e2020-11-23 09:53:32 +0100971 ep->next_packet_head = 0;
972 ep->next_packet_queued = 0;
Takashi Iwai0ef74362021-09-29 10:08:42 +0200973 spin_unlock_irqrestore(&ep->lock, flags);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200974
Takashi Iwai813a17c2021-09-29 10:08:44 +0200975 if (keep_pending)
976 return 0;
977
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200978 for (i = 0; i < ep->nurbs; i++) {
979 if (test_bit(i, &ep->active_mask)) {
980 if (!test_and_set_bit(i, &ep->unlink_mask)) {
981 struct urb *u = ep->urb[i].urb;
Takashi Iwaiccc16962012-11-21 08:22:52 +0100982 usb_unlink_urb(u);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200983 }
984 }
985 }
986
987 return 0;
988}
989
990/*
991 * release an endpoint's urbs
992 */
Takashi Iwaid6cda462021-02-06 21:30:50 +0100993static int release_urbs(struct snd_usb_endpoint *ep, bool force)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200994{
Takashi Iwaid6cda462021-02-06 21:30:50 +0100995 int i, err;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200996
997 /* route incoming urbs to nirvana */
Takashi Iwai96e221f2020-11-23 09:53:28 +0100998 snd_usb_endpoint_set_callback(ep, NULL, NULL, NULL);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200999
Takashi Iwaid6cda462021-02-06 21:30:50 +01001000 /* stop and unlink urbs */
Takashi Iwai813a17c2021-09-29 10:08:44 +02001001 err = stop_urbs(ep, force, false);
Takashi Iwaid6cda462021-02-06 21:30:50 +01001002 if (err)
1003 return err;
1004
1005 wait_clear_urbs(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001006
1007 for (i = 0; i < ep->nurbs; i++)
1008 release_urb_ctx(&ep->urb[i]);
1009
Xu Wang2e5a8e12020-07-27 02:52:08 +00001010 usb_free_coherent(ep->chip->dev, SYNC_URBS * 4,
1011 ep->syncbuf, ep->sync_dma);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001012
1013 ep->syncbuf = NULL;
1014 ep->nurbs = 0;
Takashi Iwaid6cda462021-02-06 21:30:50 +01001015 return 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001016}
1017
Daniel Mack94c27212012-04-12 13:51:15 +02001018/*
1019 * configure a data endpoint
1020 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001021static int data_ep_set_params(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001022{
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001023 struct snd_usb_audio *chip = ep->chip;
Alan Stern976b6c02013-09-24 15:51:58 -04001024 unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb;
1025 unsigned int max_packs_per_period, urbs_per_period, urb_packs;
1026 unsigned int max_urbs, i;
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001027 const struct audioformat *fmt = ep->cur_audiofmt;
1028 int frame_bits = ep->cur_frame_bytes * 8;
Takashi Iwaic1b034a2021-07-29 09:38:50 +02001029 int tx_length_quirk = (has_tx_length_quirk(chip) &&
Ricard Wanderlof759c90f2015-10-19 08:52:54 +02001030 usb_pipeout(ep->pipe));
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001031
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001032 usb_audio_dbg(chip, "Setting params for data EP 0x%x, pipe 0x%x\n",
1033 ep->ep_num, ep->pipe);
1034
1035 if (ep->cur_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) {
Daniel Mackd24f5062013-04-17 00:01:38 +08001036 /*
1037 * When operating in DSD DOP mode, the size of a sample frame
1038 * in hardware differs from the actual physical format width
1039 * because we need to make room for the DOP markers.
1040 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001041 frame_bits += ep->cur_channels << 3;
Daniel Mackd24f5062013-04-17 00:01:38 +08001042 }
1043
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001044 ep->datainterval = fmt->datainterval;
1045 ep->stride = frame_bits >> 3;
Nobutaka Okabe01200732016-12-13 02:52:58 +09001046
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001047 switch (ep->cur_format) {
Nobutaka Okabe01200732016-12-13 02:52:58 +09001048 case SNDRV_PCM_FORMAT_U8:
1049 ep->silence_value = 0x80;
1050 break;
1051 case SNDRV_PCM_FORMAT_DSD_U8:
1052 case SNDRV_PCM_FORMAT_DSD_U16_LE:
1053 case SNDRV_PCM_FORMAT_DSD_U32_LE:
1054 case SNDRV_PCM_FORMAT_DSD_U16_BE:
1055 case SNDRV_PCM_FORMAT_DSD_U32_BE:
1056 ep->silence_value = 0x69;
1057 break;
1058 default:
1059 ep->silence_value = 0;
1060 }
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001061
Andreas Papefd1a5052016-12-06 14:46:14 +09001062 /* assume max. frequency is 50% higher than nominal */
1063 ep->freqmax = ep->freqn + (ep->freqn >> 1);
Ricard Wanderlofab309652015-10-11 20:54:51 +02001064 /* Round up freqmax to nearest integer in order to calculate maximum
1065 * packet size, which must represent a whole number of frames.
1066 * This is accomplished by adding 0x0.ffff before converting the
1067 * Q16.16 format into integer.
1068 * In order to accurately calculate the maximum packet size when
1069 * the data interval is more than 1 (i.e. ep->datainterval > 0),
1070 * multiply by the data interval prior to rounding. For instance,
1071 * a freqmax of 41 kHz will result in a max packet size of 6 (5.125)
1072 * frames with a data interval of 1, but 11 (10.25) frames with a
1073 * data interval of 2.
1074 * (ep->freqmax << ep->datainterval overflows at 8.192 MHz for the
1075 * maximum datainterval value of 3, at USB full speed, higher for
1076 * USB high speed, noting that ep->freqmax is in units of
1077 * frames per packet in Q16.16 format.)
1078 */
1079 maxsize = (((ep->freqmax << ep->datainterval) + 0xffff) >> 16) *
1080 (frame_bits >> 3);
Ricard Wanderlof759c90f2015-10-19 08:52:54 +02001081 if (tx_length_quirk)
1082 maxsize += sizeof(__le32); /* Space for length descriptor */
Clemens Ladisch57e6dae2013-08-08 11:24:55 +02001083 /* but wMaxPacketSize might reduce this */
1084 if (ep->maxpacksize && ep->maxpacksize < maxsize) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001085 /* whatever fits into a max. size packet */
Ricard Wanderlof759c90f2015-10-19 08:52:54 +02001086 unsigned int data_maxsize = maxsize = ep->maxpacksize;
1087
1088 if (tx_length_quirk)
1089 /* Need to remove the length descriptor to calc freq */
1090 data_maxsize -= sizeof(__le32);
1091 ep->freqmax = (data_maxsize / (frame_bits >> 3))
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001092 << (16 - ep->datainterval);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001093 }
1094
1095 if (ep->fill_max)
1096 ep->curpacksize = ep->maxpacksize;
1097 else
1098 ep->curpacksize = maxsize;
1099
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001100 if (snd_usb_get_speed(chip->dev) != USB_SPEED_FULL) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001101 packs_per_ms = 8 >> ep->datainterval;
Alan Stern976b6c02013-09-24 15:51:58 -04001102 max_packs_per_urb = MAX_PACKS_HS;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001103 } else {
Alan Stern976b6c02013-09-24 15:51:58 -04001104 packs_per_ms = 1;
1105 max_packs_per_urb = MAX_PACKS;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001106 }
Takashi Iwai53837b42020-11-23 09:53:39 +01001107 if (ep->sync_source && !ep->implicit_fb_sync)
Alan Stern976b6c02013-09-24 15:51:58 -04001108 max_packs_per_urb = min(max_packs_per_urb,
Takashi Iwai53837b42020-11-23 09:53:39 +01001109 1U << ep->sync_source->syncinterval);
Alan Stern976b6c02013-09-24 15:51:58 -04001110 max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001111
Alan Stern976b6c02013-09-24 15:51:58 -04001112 /*
1113 * Capture endpoints need to use small URBs because there's no way
1114 * to tell in advance where the next period will end, and we don't
1115 * want the next URB to complete much after the period ends.
1116 *
1117 * Playback endpoints with implicit sync much use the same parameters
1118 * as their corresponding capture endpoint.
1119 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001120 if (usb_pipein(ep->pipe) || ep->implicit_fb_sync) {
Alan Stern976b6c02013-09-24 15:51:58 -04001121
Thomas Pugliesea93455e2013-11-26 13:58:15 -06001122 urb_packs = packs_per_ms;
1123 /*
1124 * Wireless devices can poll at a max rate of once per 4ms.
1125 * For dataintervals less than 5, increase the packet count to
1126 * allow the host controller to use bursting to fill in the
1127 * gaps.
1128 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001129 if (snd_usb_get_speed(chip->dev) == USB_SPEED_WIRELESS) {
Thomas Pugliesea93455e2013-11-26 13:58:15 -06001130 int interval = ep->datainterval;
1131 while (interval < 5) {
1132 urb_packs <<= 1;
1133 ++interval;
1134 }
1135 }
Alan Stern976b6c02013-09-24 15:51:58 -04001136 /* make capture URBs <= 1 ms and smaller than a period */
Thomas Pugliesea93455e2013-11-26 13:58:15 -06001137 urb_packs = min(max_packs_per_urb, urb_packs);
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001138 while (urb_packs > 1 && urb_packs * maxsize >= ep->cur_period_bytes)
Alan Stern976b6c02013-09-24 15:51:58 -04001139 urb_packs >>= 1;
1140 ep->nurbs = MAX_URBS;
1141
1142 /*
1143 * Playback endpoints without implicit sync are adjusted so that
1144 * a period fits as evenly as possible in the smallest number of
1145 * URBs. The total number of URBs is adjusted to the size of the
1146 * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits.
1147 */
1148 } else {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001149 /* determine how small a packet can be */
Alan Stern976b6c02013-09-24 15:51:58 -04001150 minsize = (ep->freqn >> (16 - ep->datainterval)) *
1151 (frame_bits >> 3);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001152 /* with sync from device, assume it can be 12% lower */
Takashi Iwai53837b42020-11-23 09:53:39 +01001153 if (ep->sync_source)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001154 minsize -= minsize >> 3;
1155 minsize = max(minsize, 1u);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001156
Alan Stern976b6c02013-09-24 15:51:58 -04001157 /* how many packets will contain an entire ALSA period? */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001158 max_packs_per_period = DIV_ROUND_UP(ep->cur_period_bytes, minsize);
Alan Stern976b6c02013-09-24 15:51:58 -04001159
1160 /* how many URBs will contain a period? */
1161 urbs_per_period = DIV_ROUND_UP(max_packs_per_period,
1162 max_packs_per_urb);
1163 /* how many packets are needed in each URB? */
1164 urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period);
1165
1166 /* limit the number of frames in a single URB */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001167 ep->max_urb_frames = DIV_ROUND_UP(ep->cur_period_frames,
1168 urbs_per_period);
Alan Stern976b6c02013-09-24 15:51:58 -04001169
1170 /* try to use enough URBs to contain an entire ALSA buffer */
1171 max_urbs = min((unsigned) MAX_URBS,
1172 MAX_QUEUE * packs_per_ms / urb_packs);
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001173 ep->nurbs = min(max_urbs, urbs_per_period * ep->cur_buffer_periods);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001174 }
1175
1176 /* allocate and initialize data urbs */
1177 for (i = 0; i < ep->nurbs; i++) {
1178 struct snd_urb_ctx *u = &ep->urb[i];
1179 u->index = i;
1180 u->ep = ep;
Alan Stern976b6c02013-09-24 15:51:58 -04001181 u->packets = urb_packs;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001182 u->buffer_size = maxsize * u->packets;
1183
1184 if (fmt->fmt_type == UAC_FORMAT_TYPE_II)
1185 u->packets++; /* for transfer delimiter */
1186 u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
1187 if (!u->urb)
1188 goto out_of_memory;
1189
1190 u->urb->transfer_buffer =
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001191 usb_alloc_coherent(chip->dev, u->buffer_size,
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001192 GFP_KERNEL, &u->urb->transfer_dma);
1193 if (!u->urb->transfer_buffer)
1194 goto out_of_memory;
1195 u->urb->pipe = ep->pipe;
Clemens Ladischc75c5ab2013-04-27 12:10:32 +02001196 u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001197 u->urb->interval = 1 << ep->datainterval;
1198 u->urb->context = u;
1199 u->urb->complete = snd_complete_urb;
1200 INIT_LIST_HEAD(&u->ready_list);
1201 }
1202
1203 return 0;
1204
1205out_of_memory:
Takashi Iwaid6cda462021-02-06 21:30:50 +01001206 release_urbs(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001207 return -ENOMEM;
1208}
1209
Daniel Mack94c27212012-04-12 13:51:15 +02001210/*
1211 * configure a sync endpoint
1212 */
Eldad Zack93721032013-10-06 22:31:06 +02001213static int sync_ep_set_params(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001214{
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001215 struct snd_usb_audio *chip = ep->chip;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001216 int i;
1217
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001218 usb_audio_dbg(chip, "Setting params for sync EP 0x%x, pipe 0x%x\n",
1219 ep->ep_num, ep->pipe);
1220
1221 ep->syncbuf = usb_alloc_coherent(chip->dev, SYNC_URBS * 4,
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001222 GFP_KERNEL, &ep->sync_dma);
1223 if (!ep->syncbuf)
1224 return -ENOMEM;
1225
1226 for (i = 0; i < SYNC_URBS; i++) {
1227 struct snd_urb_ctx *u = &ep->urb[i];
1228 u->index = i;
1229 u->ep = ep;
1230 u->packets = 1;
1231 u->urb = usb_alloc_urb(1, GFP_KERNEL);
1232 if (!u->urb)
1233 goto out_of_memory;
1234 u->urb->transfer_buffer = ep->syncbuf + i * 4;
1235 u->urb->transfer_dma = ep->sync_dma + i * 4;
1236 u->urb->transfer_buffer_length = 4;
1237 u->urb->pipe = ep->pipe;
Clemens Ladischc75c5ab2013-04-27 12:10:32 +02001238 u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001239 u->urb->number_of_packets = 1;
1240 u->urb->interval = 1 << ep->syncinterval;
1241 u->urb->context = u;
1242 u->urb->complete = snd_complete_urb;
1243 }
1244
1245 ep->nurbs = SYNC_URBS;
1246
1247 return 0;
1248
1249out_of_memory:
Takashi Iwaid6cda462021-02-06 21:30:50 +01001250 release_urbs(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001251 return -ENOMEM;
1252}
1253
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001254/*
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001255 * snd_usb_endpoint_set_params: configure an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +02001256 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001257 * Determine the number of URBs to be used on this endpoint.
Daniel Mack94c27212012-04-12 13:51:15 +02001258 * An endpoint must be configured before it can be started.
1259 * An endpoint that is already running can not be reconfigured.
1260 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001261static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
1262 struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001263{
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001264 const struct audioformat *fmt = ep->cur_audiofmt;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001265 int err;
1266
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001267 /* release old buffers, if any */
Takashi Iwaid6cda462021-02-06 21:30:50 +01001268 err = release_urbs(ep, false);
1269 if (err < 0)
1270 return err;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001271
1272 ep->datainterval = fmt->datainterval;
1273 ep->maxpacksize = fmt->maxpacksize;
Takashi Iwai85f71932012-04-13 12:41:54 +02001274 ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001275
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001276 if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL) {
1277 ep->freqn = get_usb_full_speed_rate(ep->cur_rate);
Alexander Tsoyb9fd2002020-06-29 05:59:34 +03001278 ep->pps = 1000 >> ep->datainterval;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001279 } else {
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001280 ep->freqn = get_usb_high_speed_rate(ep->cur_rate);
Alexander Tsoyb9fd2002020-06-29 05:59:34 +03001281 ep->pps = 8000 >> ep->datainterval;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001282 }
1283
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001284 ep->sample_rem = ep->cur_rate % ep->pps;
1285 ep->packsize[0] = ep->cur_rate / ep->pps;
1286 ep->packsize[1] = (ep->cur_rate + (ep->pps - 1)) / ep->pps;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001287
1288 /* calculate the frequency in 16.16 format */
1289 ep->freqm = ep->freqn;
1290 ep->freqshift = INT_MIN;
1291
1292 ep->phase = 0;
1293
1294 switch (ep->type) {
1295 case SND_USB_ENDPOINT_TYPE_DATA:
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001296 err = data_ep_set_params(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001297 break;
1298 case SND_USB_ENDPOINT_TYPE_SYNC:
Eldad Zack93721032013-10-06 22:31:06 +02001299 err = sync_ep_set_params(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001300 break;
1301 default:
1302 err = -EINVAL;
1303 }
1304
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001305 usb_audio_dbg(chip, "Set up %d URBS, ret=%d\n", ep->nurbs, err);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001306
Takashi Iwai5a6c3e12020-11-23 09:53:16 +01001307 if (err < 0)
1308 return err;
1309
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001310 /* some unit conversions in runtime */
1311 ep->maxframesize = ep->maxpacksize / ep->cur_frame_bytes;
1312 ep->curframesize = ep->curpacksize / ep->cur_frame_bytes;
Takashi Iwai5a6c3e12020-11-23 09:53:16 +01001313
1314 return 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001315}
1316
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001317/*
1318 * snd_usb_endpoint_configure: Configure the endpoint
1319 *
1320 * This function sets up the EP to be fully usable state.
1321 * It's called either from hw_params or prepare callback.
gushengxianff630b62021-07-05 05:00:52 -07001322 * The function checks need_setup flag, and performs nothing unless needed,
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001323 * so it's safe to call this multiple times.
1324 *
1325 * This returns zero if unchanged, 1 if the configuration has changed,
1326 * or a negative error code.
1327 */
1328int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
1329 struct snd_usb_endpoint *ep)
1330{
1331 bool iface_first;
1332 int err = 0;
1333
1334 mutex_lock(&chip->mutex);
Takashi Iwai00272c62021-01-08 08:52:17 +01001335 if (WARN_ON(!ep->iface_ref))
1336 goto unlock;
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001337 if (!ep->need_setup)
1338 goto unlock;
1339
Takashi Iwai00272c62021-01-08 08:52:17 +01001340 /* If the interface has been already set up, just set EP parameters */
1341 if (!ep->iface_ref->need_setup) {
Takashi Iwai3784d442021-01-18 08:58:15 +01001342 /* sample rate setup of UAC1 is per endpoint, and we need
1343 * to update at each EP configuration
1344 */
1345 if (ep->cur_audiofmt->protocol == UAC_VERSION_1) {
1346 err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt,
1347 ep->cur_rate);
1348 if (err < 0)
1349 goto unlock;
1350 }
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001351 err = snd_usb_endpoint_set_params(chip, ep);
1352 if (err < 0)
1353 goto unlock;
1354 goto done;
1355 }
1356
1357 /* Need to deselect altsetting at first */
1358 endpoint_set_interface(chip, ep, false);
1359
1360 /* Some UAC1 devices (e.g. Yamaha THR10) need the host interface
1361 * to be set up before parameter setups
1362 */
1363 iface_first = ep->cur_audiofmt->protocol == UAC_VERSION_1;
Takashi Iwai6e413402021-08-24 07:57:20 +02001364 /* Workaround for devices that require the interface setup at first like UAC1 */
1365 if (chip->quirk_flags & QUIRK_FLAG_SET_IFACE_FIRST)
Takashi Iwai7af5a142021-08-24 07:47:00 +02001366 iface_first = true;
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001367 if (iface_first) {
1368 err = endpoint_set_interface(chip, ep, true);
1369 if (err < 0)
1370 goto unlock;
1371 }
1372
1373 err = snd_usb_init_pitch(chip, ep->cur_audiofmt);
1374 if (err < 0)
1375 goto unlock;
1376
1377 err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt, ep->cur_rate);
1378 if (err < 0)
1379 goto unlock;
1380
1381 err = snd_usb_endpoint_set_params(chip, ep);
1382 if (err < 0)
1383 goto unlock;
1384
1385 err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt);
1386 if (err < 0)
1387 goto unlock;
1388
1389 /* for UAC2/3, enable the interface altset here at last */
1390 if (!iface_first) {
1391 err = endpoint_set_interface(chip, ep, true);
1392 if (err < 0)
1393 goto unlock;
1394 }
1395
Takashi Iwai00272c62021-01-08 08:52:17 +01001396 ep->iface_ref->need_setup = false;
1397
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001398 done:
1399 ep->need_setup = false;
1400 err = 1;
1401
1402unlock:
1403 mutex_unlock(&chip->mutex);
1404 return err;
1405}
1406
Takashi Iwai4e7cf1f2021-09-29 10:08:36 +02001407/* get the current rate set to the given clock by any endpoint */
1408int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock)
1409{
1410 struct snd_usb_endpoint *ep;
1411 int rate = 0;
1412
1413 if (!clock)
1414 return 0;
1415 mutex_lock(&chip->mutex);
1416 list_for_each_entry(ep, &chip->ep_list, list) {
1417 if (ep->cur_clock == clock && ep->cur_rate) {
1418 rate = ep->cur_rate;
1419 break;
1420 }
1421 }
1422 mutex_unlock(&chip->mutex);
1423 return rate;
1424}
1425
Daniel Mack94c27212012-04-12 13:51:15 +02001426/**
1427 * snd_usb_endpoint_start: start an snd_usb_endpoint
1428 *
Ioan-Adrian Ratiu1d0f9532017-01-05 00:37:46 +02001429 * @ep: the endpoint to start
Daniel Mack94c27212012-04-12 13:51:15 +02001430 *
Takashi Iwai43b81e82020-11-23 09:53:34 +01001431 * A call to this function will increment the running count of the endpoint.
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001432 * In case it is not already running, the URBs for this endpoint will be
Daniel Mack94c27212012-04-12 13:51:15 +02001433 * submitted. Otherwise, this function does nothing.
1434 *
1435 * Must be balanced to calls of snd_usb_endpoint_stop().
1436 *
1437 * Returns an error if the URB submission failed, 0 in all other cases.
1438 */
Ioan-Adrian Ratiu1d0f9532017-01-05 00:37:46 +02001439int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001440{
Takashi Iwaid5f871f2021-09-29 10:08:43 +02001441 bool is_playback = usb_pipeout(ep->pipe);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001442 int err;
1443 unsigned int i;
1444
Takashi Iwai47ab1542015-08-25 16:09:00 +02001445 if (atomic_read(&ep->chip->shutdown))
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001446 return -EBADFD;
1447
Takashi Iwai53837b42020-11-23 09:53:39 +01001448 if (ep->sync_source)
1449 WRITE_ONCE(ep->sync_source->sync_sink, ep);
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001450
Takashi Iwai43b81e82020-11-23 09:53:34 +01001451 usb_audio_dbg(ep->chip, "Starting %s EP 0x%x (running %d)\n",
1452 ep_type_name(ep->type), ep->ep_num,
1453 atomic_read(&ep->running));
Takashi Iwai57234bc2020-11-23 09:53:27 +01001454
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001455 /* already running? */
Takashi Iwai43b81e82020-11-23 09:53:34 +01001456 if (atomic_inc_return(&ep->running) != 1)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001457 return 0;
1458
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001459 ep->active_mask = 0;
1460 ep->unlink_mask = 0;
1461 ep->phase = 0;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001462 ep->sample_accum = 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001463
Daniel Mack2b58fd52012-09-04 10:23:07 +02001464 snd_usb_endpoint_start_quirk(ep);
1465
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001466 /*
1467 * If this endpoint has a data endpoint as implicit feedback source,
1468 * don't start the urbs here. Instead, mark them all as available,
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001469 * wait for the record urbs to return and queue the playback urbs
1470 * from that context.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001471 */
1472
Takashi Iwai5c2b3012021-02-06 21:30:51 +01001473 if (!ep_state_update(ep, EP_STATE_STOPPED, EP_STATE_RUNNING))
1474 goto __error;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001475
Takashi Iwaiebe8dc52021-04-14 10:32:55 +02001476 if (snd_usb_endpoint_implicit_feedback_sink(ep) &&
Takashi Iwai019c7f92021-07-29 09:38:51 +02001477 !(ep->chip->quirk_flags & QUIRK_FLAG_PLAYBACK_FIRST)) {
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001478 usb_audio_dbg(ep->chip, "No URB submission due to implicit fb sync\n");
Takashi Iwaid5f871f2021-09-29 10:08:43 +02001479 i = 0;
1480 goto fill_rest;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001481 }
1482
1483 for (i = 0; i < ep->nurbs; i++) {
1484 struct urb *urb = ep->urb[i].urb;
1485
1486 if (snd_BUG_ON(!urb))
1487 goto __error;
1488
Takashi Iwaid5f871f2021-09-29 10:08:43 +02001489 if (is_playback)
1490 err = prepare_outbound_urb(ep, urb->context, true);
1491 else
1492 err = prepare_inbound_urb(ep, urb->context);
1493 if (err < 0) {
1494 /* stop filling at applptr */
1495 if (err == -EAGAIN)
1496 break;
1497 usb_audio_dbg(ep->chip,
1498 "EP 0x%x: failed to prepare urb: %d\n",
1499 ep->ep_num, err);
1500 goto __error;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001501 }
1502
1503 err = usb_submit_urb(urb, GFP_ATOMIC);
1504 if (err < 0) {
Takashi Iwai0ba41d92014-02-26 13:02:17 +01001505 usb_audio_err(ep->chip,
1506 "cannot submit urb %d, error %d: %s\n",
1507 i, err, usb_error_string(err));
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001508 goto __error;
1509 }
1510 set_bit(i, &ep->active_mask);
Takashi Iwai86a42ad2021-09-29 10:08:37 +02001511 atomic_inc(&ep->submitted_urbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001512 }
1513
Takashi Iwaid5f871f2021-09-29 10:08:43 +02001514 if (!i) {
1515 usb_audio_dbg(ep->chip, "XRUN at starting EP 0x%x\n",
1516 ep->ep_num);
1517 goto __error;
1518 }
1519
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001520 usb_audio_dbg(ep->chip, "%d URBs submitted for EP 0x%x\n",
Takashi Iwaid5f871f2021-09-29 10:08:43 +02001521 i, ep->ep_num);
1522
1523 fill_rest:
1524 /* put the remaining URBs to ready list */
1525 if (is_playback) {
1526 for (; i < ep->nurbs; i++)
1527 push_back_to_ready_list(ep, ep->urb + i);
1528 }
1529
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001530 return 0;
1531
1532__error:
Takashi Iwai813a17c2021-09-29 10:08:44 +02001533 snd_usb_endpoint_stop(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001534 return -EPIPE;
1535}
1536
Daniel Mack94c27212012-04-12 13:51:15 +02001537/**
1538 * snd_usb_endpoint_stop: stop an snd_usb_endpoint
1539 *
1540 * @ep: the endpoint to stop (may be NULL)
Takashi Iwai813a17c2021-09-29 10:08:44 +02001541 * @keep_pending: keep in-flight URBs
Daniel Mack94c27212012-04-12 13:51:15 +02001542 *
Takashi Iwai43b81e82020-11-23 09:53:34 +01001543 * A call to this function will decrement the running count of the endpoint.
Daniel Mack94c27212012-04-12 13:51:15 +02001544 * In case the last user has requested the endpoint stop, the URBs will
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001545 * actually be deactivated.
Daniel Mack94c27212012-04-12 13:51:15 +02001546 *
1547 * Must be balanced to calls of snd_usb_endpoint_start().
Takashi Iwaib2eb9502012-11-21 08:30:48 +01001548 *
1549 * The caller needs to synchronize the pending stop operation via
1550 * snd_usb_endpoint_sync_pending_stop().
Daniel Mack94c27212012-04-12 13:51:15 +02001551 */
Takashi Iwai813a17c2021-09-29 10:08:44 +02001552void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, bool keep_pending)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001553{
1554 if (!ep)
1555 return;
1556
Takashi Iwai43b81e82020-11-23 09:53:34 +01001557 usb_audio_dbg(ep->chip, "Stopping %s EP 0x%x (running %d)\n",
1558 ep_type_name(ep->type), ep->ep_num,
1559 atomic_read(&ep->running));
Takashi Iwai57234bc2020-11-23 09:53:27 +01001560
Takashi Iwai43b81e82020-11-23 09:53:34 +01001561 if (snd_BUG_ON(!atomic_read(&ep->running)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001562 return;
1563
Takashi Iwai988cc172021-04-26 08:33:49 +02001564 if (!atomic_dec_return(&ep->running)) {
1565 if (ep->sync_source)
1566 WRITE_ONCE(ep->sync_source->sync_sink, NULL);
Takashi Iwai813a17c2021-09-29 10:08:44 +02001567 stop_urbs(ep, false, keep_pending);
Takashi Iwai988cc172021-04-26 08:33:49 +02001568 }
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001569}
1570
Daniel Mack94c27212012-04-12 13:51:15 +02001571/**
Takashi Iwai92a586b2014-06-25 14:24:47 +02001572 * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
1573 *
1574 * @ep: the endpoint to release
1575 *
Takashi Iwai43b81e82020-11-23 09:53:34 +01001576 * This function does not care for the endpoint's running count but will tear
Takashi Iwai92a586b2014-06-25 14:24:47 +02001577 * down all the streaming URBs immediately.
1578 */
1579void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
1580{
Takashi Iwaid6cda462021-02-06 21:30:50 +01001581 release_urbs(ep, true);
Takashi Iwai92a586b2014-06-25 14:24:47 +02001582}
1583
1584/**
Takashi Iwai00272c62021-01-08 08:52:17 +01001585 * snd_usb_endpoint_free_all: Free the resources of an snd_usb_endpoint
Takashi Iwai036f90d2021-02-05 09:28:37 +01001586 * @chip: The chip
Daniel Mack94c27212012-04-12 13:51:15 +02001587 *
Takashi Iwai00272c62021-01-08 08:52:17 +01001588 * This free all endpoints and those resources
Daniel Mack94c27212012-04-12 13:51:15 +02001589 */
Takashi Iwai00272c62021-01-08 08:52:17 +01001590void snd_usb_endpoint_free_all(struct snd_usb_audio *chip)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001591{
Takashi Iwai00272c62021-01-08 08:52:17 +01001592 struct snd_usb_endpoint *ep, *en;
1593 struct snd_usb_iface_ref *ip, *in;
1594
1595 list_for_each_entry_safe(ep, en, &chip->ep_list, list)
1596 kfree(ep);
1597
1598 list_for_each_entry_safe(ip, in, &chip->iface_ref_list, list)
1599 kfree(ip);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001600}
1601
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001602/*
Daniel Mack94c27212012-04-12 13:51:15 +02001603 * snd_usb_handle_sync_urb: parse an USB sync packet
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001604 *
Daniel Mack94c27212012-04-12 13:51:15 +02001605 * @ep: the endpoint to handle the packet
1606 * @sender: the sending endpoint
1607 * @urb: the received packet
1608 *
1609 * This function is called from the context of an endpoint that received
1610 * the packet and is used to let another endpoint object handle the payload.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001611 */
Takashi Iwaibf6313a2020-11-23 09:53:31 +01001612static void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
1613 struct snd_usb_endpoint *sender,
1614 const struct urb *urb)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001615{
1616 int shift;
1617 unsigned int f;
1618 unsigned long flags;
1619
1620 snd_BUG_ON(ep == sender);
1621
Daniel Mack94c27212012-04-12 13:51:15 +02001622 /*
1623 * In case the endpoint is operating in implicit feedback mode, prepare
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001624 * a new outbound URB that has the same layout as the received packet
1625 * and add it to the list of pending urbs. queue_pending_output_urbs()
1626 * will take care of them later.
Daniel Mack94c27212012-04-12 13:51:15 +02001627 */
Eldad Zack98ae4722013-04-03 23:18:52 +02001628 if (snd_usb_endpoint_implicit_feedback_sink(ep) &&
Takashi Iwai43b81e82020-11-23 09:53:34 +01001629 atomic_read(&ep->running)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001630
1631 /* implicit feedback case */
1632 int i, bytes = 0;
1633 struct snd_urb_ctx *in_ctx;
1634 struct snd_usb_packet_info *out_packet;
1635
1636 in_ctx = urb->context;
1637
1638 /* Count overall packet size */
1639 for (i = 0; i < in_ctx->packets; i++)
1640 if (urb->iso_frame_desc[i].status == 0)
1641 bytes += urb->iso_frame_desc[i].actual_length;
1642
1643 /*
1644 * skip empty packets. At least M-Audio's Fast Track Ultra stops
1645 * streaming once it received a 0-byte OUT URB
1646 */
1647 if (bytes == 0)
1648 return;
1649
1650 spin_lock_irqsave(&ep->lock, flags);
Takashi Iwaic15871e2020-11-23 09:53:32 +01001651 if (ep->next_packet_queued >= ARRAY_SIZE(ep->next_packet)) {
1652 spin_unlock_irqrestore(&ep->lock, flags);
1653 usb_audio_err(ep->chip,
1654 "next package FIFO overflow EP 0x%x\n",
1655 ep->ep_num);
1656 notify_xrun(ep);
1657 return;
1658 }
1659
1660 out_packet = next_packet_fifo_enqueue(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001661
1662 /*
1663 * Iterate through the inbound packet and prepare the lengths
1664 * for the output packet. The OUT packet we are about to send
Eldad Zack28acb122012-11-28 23:55:34 +01001665 * will have the same amount of payload bytes per stride as the
1666 * IN packet we just received. Since the actual size is scaled
1667 * by the stride, use the sender stride to calculate the length
1668 * in case the number of channels differ between the implicitly
1669 * fed-back endpoint and the synchronizing endpoint.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001670 */
1671
1672 out_packet->packets = in_ctx->packets;
1673 for (i = 0; i < in_ctx->packets; i++) {
1674 if (urb->iso_frame_desc[i].status == 0)
1675 out_packet->packet_size[i] =
Eldad Zack28acb122012-11-28 23:55:34 +01001676 urb->iso_frame_desc[i].actual_length / sender->stride;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001677 else
1678 out_packet->packet_size[i] = 0;
1679 }
1680
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001681 spin_unlock_irqrestore(&ep->lock, flags);
Takashi Iwaid5f871f2021-09-29 10:08:43 +02001682 snd_usb_queue_pending_output_urbs(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001683
1684 return;
1685 }
1686
Daniel Mack94c27212012-04-12 13:51:15 +02001687 /*
1688 * process after playback sync complete
1689 *
1690 * Full speed devices report feedback values in 10.14 format as samples
1691 * per frame, high speed devices in 16.16 format as samples per
1692 * microframe.
1693 *
1694 * Because the Audio Class 1 spec was written before USB 2.0, many high
1695 * speed devices use a wrong interpretation, some others use an
1696 * entirely different format.
1697 *
1698 * Therefore, we cannot predict what format any particular device uses
1699 * and must detect it automatically.
1700 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001701
1702 if (urb->iso_frame_desc[0].status != 0 ||
1703 urb->iso_frame_desc[0].actual_length < 3)
1704 return;
1705
1706 f = le32_to_cpup(urb->transfer_buffer);
1707 if (urb->iso_frame_desc[0].actual_length == 3)
1708 f &= 0x00ffffff;
1709 else
1710 f &= 0x0fffffff;
1711
1712 if (f == 0)
1713 return;
1714
Daniel Mackca0dd272016-08-22 08:53:37 +02001715 if (unlikely(sender->tenor_fb_quirk)) {
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001716 /*
Daniel Mackca0dd272016-08-22 08:53:37 +02001717 * Devices based on Tenor 8802 chipsets (TEAC UD-H01
1718 * and others) sometimes change the feedback value
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001719 * by +/- 0x1.0000.
1720 */
1721 if (f < ep->freqn - 0x8000)
Daniel Mack36e1ac32016-08-22 08:53:38 +02001722 f += 0xf000;
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001723 else if (f > ep->freqn + 0x8000)
Daniel Mack36e1ac32016-08-22 08:53:38 +02001724 f -= 0xf000;
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001725 } else if (unlikely(ep->freqshift == INT_MIN)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001726 /*
1727 * The first time we see a feedback value, determine its format
1728 * by shifting it left or right until it matches the nominal
1729 * frequency value. This assumes that the feedback does not
1730 * differ from the nominal value more than +50% or -25%.
1731 */
1732 shift = 0;
1733 while (f < ep->freqn - ep->freqn / 4) {
1734 f <<= 1;
1735 shift++;
1736 }
1737 while (f > ep->freqn + ep->freqn / 2) {
1738 f >>= 1;
1739 shift--;
1740 }
1741 ep->freqshift = shift;
1742 } else if (ep->freqshift >= 0)
1743 f <<= ep->freqshift;
1744 else
1745 f >>= -ep->freqshift;
1746
1747 if (likely(f >= ep->freqn - ep->freqn / 8 && f <= ep->freqmax)) {
1748 /*
1749 * If the frequency looks valid, set it.
1750 * This value is referred to in prepare_playback_urb().
1751 */
1752 spin_lock_irqsave(&ep->lock, flags);
1753 ep->freqm = f;
1754 spin_unlock_irqrestore(&ep->lock, flags);
1755 } else {
1756 /*
1757 * Out of range; maybe the shift value is wrong.
1758 * Reset it so that we autodetect again the next time.
1759 */
1760 ep->freqshift = INT_MIN;
1761 }
1762}
1763