blob: 88760268fb5568eddbb7cdf90e7961664f31b717 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Daniel Macke5779992010-03-04 19:46:13 +01002/*
Daniel Macke5779992010-03-04 19:46:13 +01003 */
4
Daniel Mackc731bc92011-09-14 12:46:57 +02005#include <linux/gfp.h>
6#include <linux/init.h>
Takashi Iwai80c8a2a2012-01-09 11:37:20 +01007#include <linux/ratelimit.h>
Daniel Mackc731bc92011-09-14 12:46:57 +02008#include <linux/usb.h>
9#include <linux/usb/audio.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020010#include <linux/slab.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020011
12#include <sound/core.h>
13#include <sound/pcm.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020014#include <sound/pcm_params.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020015
16#include "usbaudio.h"
17#include "helper.h"
18#include "card.h"
19#include "endpoint.h"
20#include "pcm.h"
Daniel Mack2b58fd52012-09-04 10:23:07 +020021#include "quirks.h"
Daniel Mackc731bc92011-09-14 12:46:57 +020022
Daniel Mack8fdff6a2012-04-12 13:51:11 +020023#define EP_FLAG_RUNNING 1
Takashi Iwaif58161b2012-11-08 08:52:45 +010024#define EP_FLAG_STOPPING 2
Daniel Mack8fdff6a2012-04-12 13:51:11 +020025
Daniel Mackc731bc92011-09-14 12:46:57 +020026/*
Daniel Mack94c27212012-04-12 13:51:15 +020027 * snd_usb_endpoint is a model that abstracts everything related to an
28 * USB endpoint and its streaming.
29 *
30 * There are functions to activate and deactivate the streaming URBs and
Daniel Mack07a5e9d2012-04-24 19:31:24 +020031 * optional callbacks to let the pcm logic handle the actual content of the
Daniel Mack94c27212012-04-12 13:51:15 +020032 * packets for playback and record. Thus, the bus streaming and the audio
33 * handlers are fully decoupled.
34 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020035 * There are two different types of endpoints in audio applications.
Daniel Mack94c27212012-04-12 13:51:15 +020036 *
37 * SND_USB_ENDPOINT_TYPE_DATA handles full audio data payload for both
38 * inbound and outbound traffic.
39 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020040 * SND_USB_ENDPOINT_TYPE_SYNC endpoints are for inbound traffic only and
41 * expect the payload to carry Q10.14 / Q16.16 formatted sync information
42 * (3 or 4 bytes).
Daniel Mack94c27212012-04-12 13:51:15 +020043 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020044 * Each endpoint has to be configured prior to being used by calling
45 * snd_usb_endpoint_set_params().
Daniel Mack94c27212012-04-12 13:51:15 +020046 *
47 * The model incorporates a reference counting, so that multiple users
48 * can call snd_usb_endpoint_start() and snd_usb_endpoint_stop(), and
49 * only the first user will effectively start the URBs, and only the last
Daniel Mack07a5e9d2012-04-24 19:31:24 +020050 * one to stop it will tear the URBs down again.
Daniel Mack94c27212012-04-12 13:51:15 +020051 */
52
53/*
Daniel Mackc731bc92011-09-14 12:46:57 +020054 * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
55 * this will overflow at approx 524 kHz
56 */
57static inline unsigned get_usb_full_speed_rate(unsigned int rate)
58{
59 return ((rate << 13) + 62) / 125;
60}
61
62/*
63 * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
64 * this will overflow at approx 4 MHz
65 */
66static inline unsigned get_usb_high_speed_rate(unsigned int rate)
67{
68 return ((rate << 10) + 62) / 125;
69}
70
71/*
Daniel Mackc731bc92011-09-14 12:46:57 +020072 * release a urb data
73 */
74static void release_urb_ctx(struct snd_urb_ctx *u)
75{
Daniel Mackd399ff92012-04-12 13:51:13 +020076 if (u->buffer_size)
77 usb_free_coherent(u->ep->chip->dev, u->buffer_size,
78 u->urb->transfer_buffer,
79 u->urb->transfer_dma);
80 usb_free_urb(u->urb);
81 u->urb = NULL;
Daniel Mackc731bc92011-09-14 12:46:57 +020082}
83
84static const char *usb_error_string(int err)
85{
86 switch (err) {
87 case -ENODEV:
88 return "no device";
89 case -ENOENT:
90 return "endpoint not enabled";
91 case -EPIPE:
92 return "endpoint stalled";
93 case -ENOSPC:
94 return "not enough bandwidth";
95 case -ESHUTDOWN:
96 return "device disabled";
97 case -EHOSTUNREACH:
98 return "device suspended";
99 case -EINVAL:
100 case -EAGAIN:
101 case -EFBIG:
102 case -EMSGSIZE:
103 return "internal error";
104 default:
105 return "unknown error";
106 }
107}
108
Daniel Mack94c27212012-04-12 13:51:15 +0200109/**
110 * snd_usb_endpoint_implicit_feedback_sink: Report endpoint usage type
111 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200112 * @ep: The snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200113 *
114 * Determine whether an endpoint is driven by an implicit feedback
115 * data endpoint source.
116 */
Eldad Zack98ae4722013-04-03 23:18:52 +0200117int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200118{
119 return ep->sync_master &&
120 ep->sync_master->type == SND_USB_ENDPOINT_TYPE_DATA &&
121 ep->type == SND_USB_ENDPOINT_TYPE_DATA &&
122 usb_pipeout(ep->pipe);
123}
124
Daniel Mack94c27212012-04-12 13:51:15 +0200125/*
126 * For streaming based on information derived from sync endpoints,
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300127 * prepare_outbound_urb_sizes() will call slave_next_packet_size() to
Daniel Mack94c27212012-04-12 13:51:15 +0200128 * determine the number of samples to be sent in the next packet.
129 *
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300130 * For implicit feedback, slave_next_packet_size() is unused.
Daniel Mack94c27212012-04-12 13:51:15 +0200131 */
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300132int snd_usb_endpoint_slave_next_packet_size(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200133{
134 unsigned long flags;
135 int ret;
136
137 if (ep->fill_max)
138 return ep->maxframesize;
139
140 spin_lock_irqsave(&ep->lock, flags);
141 ep->phase = (ep->phase & 0xffff)
142 + (ep->freqm << ep->datainterval);
143 ret = min(ep->phase >> 16, ep->maxframesize);
144 spin_unlock_irqrestore(&ep->lock, flags);
145
146 return ret;
147}
148
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300149/*
150 * For adaptive and synchronous endpoints, prepare_outbound_urb_sizes()
151 * will call next_packet_size() to determine the number of samples to be
152 * sent in the next packet.
153 */
154int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
155{
156 int ret;
157
158 if (ep->fill_max)
159 return ep->maxframesize;
160
161 ep->sample_accum += ep->sample_rem;
Alexander Tsoyb9fd2002020-06-29 05:59:34 +0300162 if (ep->sample_accum >= ep->pps) {
163 ep->sample_accum -= ep->pps;
164 ret = ep->packsize[1];
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300165 } else {
Alexander Tsoyb9fd2002020-06-29 05:59:34 +0300166 ret = ep->packsize[0];
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300167 }
168
169 return ret;
170}
171
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200172static void retire_outbound_urb(struct snd_usb_endpoint *ep,
173 struct snd_urb_ctx *urb_ctx)
174{
175 if (ep->retire_data_urb)
176 ep->retire_data_urb(ep->data_subs, urb_ctx->urb);
177}
178
179static void retire_inbound_urb(struct snd_usb_endpoint *ep,
180 struct snd_urb_ctx *urb_ctx)
181{
182 struct urb *urb = urb_ctx->urb;
183
Daniel Mack2b58fd52012-09-04 10:23:07 +0200184 if (unlikely(ep->skip_packets > 0)) {
185 ep->skip_packets--;
186 return;
187 }
188
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200189 if (ep->sync_slave)
190 snd_usb_handle_sync_urb(ep->sync_slave, ep, urb);
191
192 if (ep->retire_data_urb)
193 ep->retire_data_urb(ep->data_subs, urb);
194}
195
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200196static void prepare_silent_urb(struct snd_usb_endpoint *ep,
197 struct snd_urb_ctx *ctx)
198{
199 struct urb *urb = ctx->urb;
200 unsigned int offs = 0;
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200201 unsigned int extra = 0;
202 __le32 packet_length;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200203 int i;
204
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200205 /* For tx_length_quirk, put packet length at start of packet */
206 if (ep->chip->tx_length_quirk)
207 extra = sizeof(packet_length);
208
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200209 for (i = 0; i < ctx->packets; ++i) {
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200210 unsigned int offset;
211 unsigned int length;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200212 int counts;
213
214 if (ctx->packet_size[i])
215 counts = ctx->packet_size[i];
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +0300216 else if (ep->sync_master)
217 counts = snd_usb_endpoint_slave_next_packet_size(ep);
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200218 else
219 counts = snd_usb_endpoint_next_packet_size(ep);
220
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200221 length = counts * ep->stride; /* number of silent bytes */
222 offset = offs * ep->stride + extra * i;
223 urb->iso_frame_desc[i].offset = offset;
224 urb->iso_frame_desc[i].length = length + extra;
225 if (extra) {
226 packet_length = cpu_to_le32(length);
227 memcpy(urb->transfer_buffer + offset,
228 &packet_length, sizeof(packet_length));
229 }
230 memset(urb->transfer_buffer + offset + extra,
231 ep->silence_value, length);
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200232 offs += counts;
233 }
234
235 urb->number_of_packets = ctx->packets;
Ricard Wanderlofe0570442015-10-19 08:52:53 +0200236 urb->transfer_buffer_length = offs * ep->stride + ctx->packets * extra;
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200237}
238
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200239/*
240 * Prepare a PLAYBACK urb for submission to the bus.
241 */
242static void prepare_outbound_urb(struct snd_usb_endpoint *ep,
243 struct snd_urb_ctx *ctx)
244{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200245 struct urb *urb = ctx->urb;
246 unsigned char *cp = urb->transfer_buffer;
247
248 urb->dev = ep->chip->dev; /* we need to set this at each time */
249
250 switch (ep->type) {
251 case SND_USB_ENDPOINT_TYPE_DATA:
252 if (ep->prepare_data_urb) {
253 ep->prepare_data_urb(ep->data_subs, urb);
254 } else {
255 /* no data provider, so send silence */
Ricard Wanderlof5cf310e2015-10-19 08:52:51 +0200256 prepare_silent_urb(ep, ctx);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200257 }
258 break;
259
260 case SND_USB_ENDPOINT_TYPE_SYNC:
261 if (snd_usb_get_speed(ep->chip->dev) >= USB_SPEED_HIGH) {
262 /*
263 * fill the length and offset of each urb descriptor.
264 * the fixed 12.13 frequency is passed as 16.16 through the pipe.
265 */
266 urb->iso_frame_desc[0].length = 4;
267 urb->iso_frame_desc[0].offset = 0;
268 cp[0] = ep->freqn;
269 cp[1] = ep->freqn >> 8;
270 cp[2] = ep->freqn >> 16;
271 cp[3] = ep->freqn >> 24;
272 } else {
273 /*
274 * fill the length and offset of each urb descriptor.
275 * the fixed 10.14 frequency is passed through the pipe.
276 */
277 urb->iso_frame_desc[0].length = 3;
278 urb->iso_frame_desc[0].offset = 0;
279 cp[0] = ep->freqn >> 2;
280 cp[1] = ep->freqn >> 10;
281 cp[2] = ep->freqn >> 18;
282 }
283
284 break;
285 }
286}
287
288/*
289 * Prepare a CAPTURE or SYNC urb for submission to the bus.
290 */
291static inline void prepare_inbound_urb(struct snd_usb_endpoint *ep,
292 struct snd_urb_ctx *urb_ctx)
293{
294 int i, offs;
295 struct urb *urb = urb_ctx->urb;
296
297 urb->dev = ep->chip->dev; /* we need to set this at each time */
298
299 switch (ep->type) {
300 case SND_USB_ENDPOINT_TYPE_DATA:
301 offs = 0;
302 for (i = 0; i < urb_ctx->packets; i++) {
303 urb->iso_frame_desc[i].offset = offs;
304 urb->iso_frame_desc[i].length = ep->curpacksize;
305 offs += ep->curpacksize;
306 }
307
308 urb->transfer_buffer_length = offs;
309 urb->number_of_packets = urb_ctx->packets;
310 break;
311
312 case SND_USB_ENDPOINT_TYPE_SYNC:
313 urb->iso_frame_desc[0].length = min(4u, ep->syncmaxsize);
314 urb->iso_frame_desc[0].offset = 0;
315 break;
316 }
317}
318
Daniel Mack94c27212012-04-12 13:51:15 +0200319/*
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200320 * Send output urbs that have been prepared previously. URBs are dequeued
Daniel Mack94c27212012-04-12 13:51:15 +0200321 * from ep->ready_playback_urbs and in case there there aren't any available
322 * or there are no packets that have been prepared, this function does
323 * nothing.
324 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200325 * The reason why the functionality of sending and preparing URBs is separated
326 * is that host controllers don't guarantee the order in which they return
327 * inbound and outbound packets to their submitters.
Daniel Mack94c27212012-04-12 13:51:15 +0200328 *
329 * This function is only used for implicit feedback endpoints. For endpoints
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200330 * driven by dedicated sync endpoints, URBs are immediately re-submitted
331 * from their completion handler.
Daniel Mack94c27212012-04-12 13:51:15 +0200332 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200333static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
334{
335 while (test_bit(EP_FLAG_RUNNING, &ep->flags)) {
336
337 unsigned long flags;
Andrew Morton68853fa2012-04-24 08:10:10 +0200338 struct snd_usb_packet_info *uninitialized_var(packet);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200339 struct snd_urb_ctx *ctx = NULL;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200340 int err, i;
341
342 spin_lock_irqsave(&ep->lock, flags);
343 if (ep->next_packet_read_pos != ep->next_packet_write_pos) {
344 packet = ep->next_packet + ep->next_packet_read_pos;
345 ep->next_packet_read_pos++;
346 ep->next_packet_read_pos %= MAX_URBS;
347
348 /* take URB out of FIFO */
Takashi Iwai5b6cc382020-04-24 09:40:16 +0200349 if (!list_empty(&ep->ready_playback_urbs)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200350 ctx = list_first_entry(&ep->ready_playback_urbs,
351 struct snd_urb_ctx, ready_list);
Takashi Iwai5b6cc382020-04-24 09:40:16 +0200352 list_del_init(&ctx->ready_list);
353 }
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200354 }
355 spin_unlock_irqrestore(&ep->lock, flags);
356
357 if (ctx == NULL)
358 return;
359
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200360 /* copy over the length information */
361 for (i = 0; i < packet->packets; i++)
362 ctx->packet_size[i] = packet->packet_size[i];
363
Daniel Mack94c27212012-04-12 13:51:15 +0200364 /* call the data handler to fill in playback data */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200365 prepare_outbound_urb(ep, ctx);
366
367 err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
368 if (err < 0)
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100369 usb_audio_err(ep->chip,
370 "Unable to submit urb #%d: %d (urb %p)\n",
371 ctx->index, err, ctx->urb);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200372 else
373 set_bit(ctx->index, &ep->active_mask);
374 }
375}
376
377/*
378 * complete callback for urbs
379 */
380static void snd_complete_urb(struct urb *urb)
381{
382 struct snd_urb_ctx *ctx = urb->context;
383 struct snd_usb_endpoint *ep = ctx->ep;
Takashi Iwai67e22502014-11-06 13:04:49 +0100384 struct snd_pcm_substream *substream;
385 unsigned long flags;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200386 int err;
387
388 if (unlikely(urb->status == -ENOENT || /* unlinked */
389 urb->status == -ENODEV || /* device removed */
390 urb->status == -ECONNRESET || /* unlinked */
Takashi Iwai47ab1542015-08-25 16:09:00 +0200391 urb->status == -ESHUTDOWN)) /* device disabled */
392 goto exit_clear;
393 /* device disconnected */
394 if (unlikely(atomic_read(&ep->chip->shutdown)))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200395 goto exit_clear;
396
Ioan-Adrian Ratiu13a6c832017-01-05 00:37:47 +0200397 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
398 goto exit_clear;
399
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200400 if (usb_pipeout(ep->pipe)) {
401 retire_outbound_urb(ep, ctx);
402 /* can be stopped during retire callback */
403 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
404 goto exit_clear;
405
Eldad Zack98ae4722013-04-03 23:18:52 +0200406 if (snd_usb_endpoint_implicit_feedback_sink(ep)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200407 spin_lock_irqsave(&ep->lock, flags);
408 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
409 spin_unlock_irqrestore(&ep->lock, flags);
410 queue_pending_output_urbs(ep);
411
412 goto exit_clear;
413 }
414
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200415 prepare_outbound_urb(ep, ctx);
Henry Lin52869932019-11-13 10:14:19 +0800416 /* can be stopped during prepare callback */
417 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
418 goto exit_clear;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200419 } else {
420 retire_inbound_urb(ep, ctx);
421 /* can be stopped during retire callback */
422 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
423 goto exit_clear;
424
425 prepare_inbound_urb(ep, ctx);
426 }
427
428 err = usb_submit_urb(urb, GFP_ATOMIC);
429 if (err == 0)
430 return;
431
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100432 usb_audio_err(ep->chip, "cannot submit urb (err = %d)\n", err);
Takashi Iwai67e22502014-11-06 13:04:49 +0100433 if (ep->data_subs && ep->data_subs->pcm_substream) {
434 substream = ep->data_subs->pcm_substream;
Takashi Iwai1fb85102014-11-07 17:08:28 +0100435 snd_pcm_stop_xrun(substream);
Takashi Iwai67e22502014-11-06 13:04:49 +0100436 }
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200437
438exit_clear:
439 clear_bit(ctx->index, &ep->active_mask);
440}
441
Daniel Mack94c27212012-04-12 13:51:15 +0200442/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200443 * snd_usb_add_endpoint: Add an endpoint to an USB audio chip
Daniel Mack94c27212012-04-12 13:51:15 +0200444 *
445 * @chip: The chip
446 * @alts: The USB host interface
447 * @ep_num: The number of the endpoint to use
448 * @direction: SNDRV_PCM_STREAM_PLAYBACK or SNDRV_PCM_STREAM_CAPTURE
449 * @type: SND_USB_ENDPOINT_TYPE_DATA or SND_USB_ENDPOINT_TYPE_SYNC
450 *
451 * If the requested endpoint has not been added to the given chip before,
452 * a new instance is created. Otherwise, a pointer to the previoulsy
453 * created instance is returned. In case of any error, NULL is returned.
454 *
455 * New endpoints will be added to chip->ep_list and must be freed by
456 * calling snd_usb_endpoint_free().
Takashi Iwai447d6272016-03-15 15:20:58 +0100457 *
458 * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
459 * bNumEndpoints > 1 beforehand.
Daniel Mack94c27212012-04-12 13:51:15 +0200460 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200461struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
462 struct usb_host_interface *alts,
463 int ep_num, int direction, int type)
464{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200465 struct snd_usb_endpoint *ep;
Daniel Mack68e67f42012-07-12 13:08:40 +0200466 int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200467
Eldad Zacke7e58df2013-08-03 10:51:15 +0200468 if (WARN_ON(!alts))
469 return NULL;
470
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200471 mutex_lock(&chip->mutex);
472
Eldad Zack88766f02013-04-03 23:18:49 +0200473 list_for_each_entry(ep, &chip->ep_list, list) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200474 if (ep->ep_num == ep_num &&
475 ep->iface == alts->desc.bInterfaceNumber &&
Eldad Zackdf23a242013-10-06 22:31:13 +0200476 ep->altsetting == alts->desc.bAlternateSetting) {
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100477 usb_audio_dbg(ep->chip,
478 "Re-using EP %x in iface %d,%d @%p\n",
Eldad Zackdf23a242013-10-06 22:31:13 +0200479 ep_num, ep->iface, ep->altsetting, ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200480 goto __exit_unlock;
481 }
482 }
483
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100484 usb_audio_dbg(chip, "Creating new %s %s endpoint #%x\n",
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200485 is_playback ? "playback" : "capture",
486 type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync",
487 ep_num);
488
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200489 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
490 if (!ep)
491 goto __exit_unlock;
492
493 ep->chip = chip;
494 spin_lock_init(&ep->lock);
495 ep->type = type;
496 ep->ep_num = ep_num;
497 ep->iface = alts->desc.bInterfaceNumber;
Eldad Zackdf23a242013-10-06 22:31:13 +0200498 ep->altsetting = alts->desc.bAlternateSetting;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200499 INIT_LIST_HEAD(&ep->ready_playback_urbs);
500 ep_num &= USB_ENDPOINT_NUMBER_MASK;
501
502 if (is_playback)
503 ep->pipe = usb_sndisocpipe(chip->dev, ep_num);
504 else
505 ep->pipe = usb_rcvisocpipe(chip->dev, ep_num);
506
507 if (type == SND_USB_ENDPOINT_TYPE_SYNC) {
508 if (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
509 get_endpoint(alts, 1)->bRefresh >= 1 &&
510 get_endpoint(alts, 1)->bRefresh <= 9)
511 ep->syncinterval = get_endpoint(alts, 1)->bRefresh;
512 else if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL)
513 ep->syncinterval = 1;
514 else if (get_endpoint(alts, 1)->bInterval >= 1 &&
515 get_endpoint(alts, 1)->bInterval <= 16)
516 ep->syncinterval = get_endpoint(alts, 1)->bInterval - 1;
517 else
518 ep->syncinterval = 3;
519
520 ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
521 }
522
523 list_add_tail(&ep->list, &chip->ep_list);
524
Erwin Burema10ce77e2020-05-10 20:29:11 +0200525 ep->is_implicit_feedback = 0;
526
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200527__exit_unlock:
528 mutex_unlock(&chip->mutex);
529
530 return ep;
531}
532
533/*
534 * wait until all urbs are processed.
535 */
536static int wait_clear_urbs(struct snd_usb_endpoint *ep)
537{
538 unsigned long end_time = jiffies + msecs_to_jiffies(1000);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200539 int alive;
540
541 do {
Joe Perches190006f2012-11-16 23:35:16 -0800542 alive = bitmap_weight(&ep->active_mask, ep->nurbs);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200543 if (!alive)
544 break;
545
546 schedule_timeout_uninterruptible(1);
547 } while (time_before(jiffies, end_time));
548
549 if (alive)
Takashi Iwai0ba41d92014-02-26 13:02:17 +0100550 usb_audio_err(ep->chip,
551 "timeout: still %d active urbs on EP #%x\n",
552 alive, ep->ep_num);
Takashi Iwaif58161b2012-11-08 08:52:45 +0100553 clear_bit(EP_FLAG_STOPPING, &ep->flags);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200554
Ioan-Adrian Ratiu1d0f9532017-01-05 00:37:46 +0200555 ep->data_subs = NULL;
556 ep->sync_slave = NULL;
557 ep->retire_data_urb = NULL;
558 ep->prepare_data_urb = NULL;
559
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200560 return 0;
561}
562
Takashi Iwaif58161b2012-11-08 08:52:45 +0100563/* sync the pending stop operation;
564 * this function itself doesn't trigger the stop operation
565 */
566void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep)
567{
568 if (ep && test_bit(EP_FLAG_STOPPING, &ep->flags))
569 wait_clear_urbs(ep);
570}
571
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200572/*
573 * unlink active urbs.
574 */
Takashi Iwaiccc16962012-11-21 08:22:52 +0100575static int deactivate_urbs(struct snd_usb_endpoint *ep, bool force)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200576{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200577 unsigned int i;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200578
Takashi Iwai47ab1542015-08-25 16:09:00 +0200579 if (!force && atomic_read(&ep->chip->shutdown)) /* to be sure... */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200580 return -EBADFD;
581
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200582 clear_bit(EP_FLAG_RUNNING, &ep->flags);
583
584 INIT_LIST_HEAD(&ep->ready_playback_urbs);
585 ep->next_packet_read_pos = 0;
586 ep->next_packet_write_pos = 0;
587
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200588 for (i = 0; i < ep->nurbs; i++) {
589 if (test_bit(i, &ep->active_mask)) {
590 if (!test_and_set_bit(i, &ep->unlink_mask)) {
591 struct urb *u = ep->urb[i].urb;
Takashi Iwaiccc16962012-11-21 08:22:52 +0100592 usb_unlink_urb(u);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200593 }
594 }
595 }
596
597 return 0;
598}
599
600/*
601 * release an endpoint's urbs
602 */
603static void release_urbs(struct snd_usb_endpoint *ep, int force)
604{
605 int i;
606
607 /* route incoming urbs to nirvana */
608 ep->retire_data_urb = NULL;
609 ep->prepare_data_urb = NULL;
610
611 /* stop urbs */
Takashi Iwaiccc16962012-11-21 08:22:52 +0100612 deactivate_urbs(ep, force);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200613 wait_clear_urbs(ep);
614
615 for (i = 0; i < ep->nurbs; i++)
616 release_urb_ctx(&ep->urb[i]);
617
618 if (ep->syncbuf)
619 usb_free_coherent(ep->chip->dev, SYNC_URBS * 4,
620 ep->syncbuf, ep->sync_dma);
621
622 ep->syncbuf = NULL;
623 ep->nurbs = 0;
624}
625
Daniel Mack94c27212012-04-12 13:51:15 +0200626/*
Erwin Burema10ce77e2020-05-10 20:29:11 +0200627 * Check data endpoint for format differences
628 */
629static bool check_ep_params(struct snd_usb_endpoint *ep,
630 snd_pcm_format_t pcm_format,
631 unsigned int channels,
632 unsigned int period_bytes,
633 unsigned int frames_per_period,
634 unsigned int periods_per_buffer,
635 struct audioformat *fmt,
636 struct snd_usb_endpoint *sync_ep)
637{
638 unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb;
639 unsigned int max_packs_per_period, urbs_per_period, urb_packs;
640 unsigned int max_urbs;
641 int frame_bits = snd_pcm_format_physical_width(pcm_format) * channels;
642 int tx_length_quirk = (ep->chip->tx_length_quirk &&
643 usb_pipeout(ep->pipe));
644 bool ret = 1;
645
646 if (pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) {
647 /*
648 * When operating in DSD DOP mode, the size of a sample frame
649 * in hardware differs from the actual physical format width
650 * because we need to make room for the DOP markers.
651 */
652 frame_bits += channels << 3;
653 }
654
655 ret = ret && (ep->datainterval == fmt->datainterval);
656 ret = ret && (ep->stride == frame_bits >> 3);
657
658 switch (pcm_format) {
659 case SNDRV_PCM_FORMAT_U8:
660 ret = ret && (ep->silence_value == 0x80);
661 break;
662 case SNDRV_PCM_FORMAT_DSD_U8:
663 case SNDRV_PCM_FORMAT_DSD_U16_LE:
664 case SNDRV_PCM_FORMAT_DSD_U32_LE:
665 case SNDRV_PCM_FORMAT_DSD_U16_BE:
666 case SNDRV_PCM_FORMAT_DSD_U32_BE:
667 ret = ret && (ep->silence_value == 0x69);
668 break;
669 default:
670 ret = ret && (ep->silence_value == 0);
671 }
672
673 /* assume max. frequency is 50% higher than nominal */
674 ret = ret && (ep->freqmax == ep->freqn + (ep->freqn >> 1));
675 /* Round up freqmax to nearest integer in order to calculate maximum
676 * packet size, which must represent a whole number of frames.
677 * This is accomplished by adding 0x0.ffff before converting the
678 * Q16.16 format into integer.
679 * In order to accurately calculate the maximum packet size when
680 * the data interval is more than 1 (i.e. ep->datainterval > 0),
681 * multiply by the data interval prior to rounding. For instance,
682 * a freqmax of 41 kHz will result in a max packet size of 6 (5.125)
683 * frames with a data interval of 1, but 11 (10.25) frames with a
684 * data interval of 2.
685 * (ep->freqmax << ep->datainterval overflows at 8.192 MHz for the
686 * maximum datainterval value of 3, at USB full speed, higher for
687 * USB high speed, noting that ep->freqmax is in units of
688 * frames per packet in Q16.16 format.)
689 */
690 maxsize = (((ep->freqmax << ep->datainterval) + 0xffff) >> 16) *
691 (frame_bits >> 3);
692 if (tx_length_quirk)
693 maxsize += sizeof(__le32); /* Space for length descriptor */
694 /* but wMaxPacketSize might reduce this */
695 if (ep->maxpacksize && ep->maxpacksize < maxsize) {
696 /* whatever fits into a max. size packet */
697 unsigned int data_maxsize = maxsize = ep->maxpacksize;
698
699 if (tx_length_quirk)
700 /* Need to remove the length descriptor to calc freq */
701 data_maxsize -= sizeof(__le32);
702 ret = ret && (ep->freqmax == (data_maxsize / (frame_bits >> 3))
703 << (16 - ep->datainterval));
704 }
705
706 if (ep->fill_max)
707 ret = ret && (ep->curpacksize == ep->maxpacksize);
708 else
709 ret = ret && (ep->curpacksize == maxsize);
710
711 if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) {
712 packs_per_ms = 8 >> ep->datainterval;
713 max_packs_per_urb = MAX_PACKS_HS;
714 } else {
715 packs_per_ms = 1;
716 max_packs_per_urb = MAX_PACKS;
717 }
718 if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep))
719 max_packs_per_urb = min(max_packs_per_urb,
720 1U << sync_ep->syncinterval);
721 max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval);
722
723 /*
724 * Capture endpoints need to use small URBs because there's no way
725 * to tell in advance where the next period will end, and we don't
726 * want the next URB to complete much after the period ends.
727 *
728 * Playback endpoints with implicit sync much use the same parameters
729 * as their corresponding capture endpoint.
730 */
731 if (usb_pipein(ep->pipe) ||
732 snd_usb_endpoint_implicit_feedback_sink(ep)) {
733
734 urb_packs = packs_per_ms;
735 /*
736 * Wireless devices can poll at a max rate of once per 4ms.
737 * For dataintervals less than 5, increase the packet count to
738 * allow the host controller to use bursting to fill in the
739 * gaps.
740 */
741 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_WIRELESS) {
742 int interval = ep->datainterval;
743
744 while (interval < 5) {
745 urb_packs <<= 1;
746 ++interval;
747 }
748 }
749 /* make capture URBs <= 1 ms and smaller than a period */
750 urb_packs = min(max_packs_per_urb, urb_packs);
751 while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
752 urb_packs >>= 1;
753 ret = ret && (ep->nurbs == MAX_URBS);
754
755 /*
756 * Playback endpoints without implicit sync are adjusted so that
757 * a period fits as evenly as possible in the smallest number of
758 * URBs. The total number of URBs is adjusted to the size of the
759 * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits.
760 */
761 } else {
762 /* determine how small a packet can be */
763 minsize = (ep->freqn >> (16 - ep->datainterval)) *
764 (frame_bits >> 3);
765 /* with sync from device, assume it can be 12% lower */
766 if (sync_ep)
767 minsize -= minsize >> 3;
768 minsize = max(minsize, 1u);
769
770 /* how many packets will contain an entire ALSA period? */
771 max_packs_per_period = DIV_ROUND_UP(period_bytes, minsize);
772
773 /* how many URBs will contain a period? */
774 urbs_per_period = DIV_ROUND_UP(max_packs_per_period,
775 max_packs_per_urb);
776 /* how many packets are needed in each URB? */
777 urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period);
778
779 /* limit the number of frames in a single URB */
780 ret = ret && (ep->max_urb_frames ==
781 DIV_ROUND_UP(frames_per_period, urbs_per_period));
782
783 /* try to use enough URBs to contain an entire ALSA buffer */
784 max_urbs = min((unsigned) MAX_URBS,
785 MAX_QUEUE * packs_per_ms / urb_packs);
786 ret = ret && (ep->nurbs == min(max_urbs,
787 urbs_per_period * periods_per_buffer));
788 }
789
790 ret = ret && (ep->datainterval == fmt->datainterval);
791 ret = ret && (ep->maxpacksize == fmt->maxpacksize);
792 ret = ret &&
793 (ep->fill_max == !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX));
794
795 return ret;
796}
797
798/*
Daniel Mack94c27212012-04-12 13:51:15 +0200799 * configure a data endpoint
800 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200801static int data_ep_set_params(struct snd_usb_endpoint *ep,
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700802 snd_pcm_format_t pcm_format,
803 unsigned int channels,
804 unsigned int period_bytes,
Alan Stern976b6c02013-09-24 15:51:58 -0400805 unsigned int frames_per_period,
806 unsigned int periods_per_buffer,
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200807 struct audioformat *fmt,
808 struct snd_usb_endpoint *sync_ep)
809{
Alan Stern976b6c02013-09-24 15:51:58 -0400810 unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb;
811 unsigned int max_packs_per_period, urbs_per_period, urb_packs;
812 unsigned int max_urbs, i;
Dylan Reid35ec7aa22012-09-18 09:49:47 -0700813 int frame_bits = snd_pcm_format_physical_width(pcm_format) * channels;
Ricard Wanderlof759c90f2015-10-19 08:52:54 +0200814 int tx_length_quirk = (ep->chip->tx_length_quirk &&
815 usb_pipeout(ep->pipe));
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200816
Daniel Mackd24f5062013-04-17 00:01:38 +0800817 if (pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) {
818 /*
819 * When operating in DSD DOP mode, the size of a sample frame
820 * in hardware differs from the actual physical format width
821 * because we need to make room for the DOP markers.
822 */
823 frame_bits += channels << 3;
824 }
825
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200826 ep->datainterval = fmt->datainterval;
827 ep->stride = frame_bits >> 3;
Nobutaka Okabe01200732016-12-13 02:52:58 +0900828
829 switch (pcm_format) {
830 case SNDRV_PCM_FORMAT_U8:
831 ep->silence_value = 0x80;
832 break;
833 case SNDRV_PCM_FORMAT_DSD_U8:
834 case SNDRV_PCM_FORMAT_DSD_U16_LE:
835 case SNDRV_PCM_FORMAT_DSD_U32_LE:
836 case SNDRV_PCM_FORMAT_DSD_U16_BE:
837 case SNDRV_PCM_FORMAT_DSD_U32_BE:
838 ep->silence_value = 0x69;
839 break;
840 default:
841 ep->silence_value = 0;
842 }
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200843
Andreas Papefd1a5052016-12-06 14:46:14 +0900844 /* assume max. frequency is 50% higher than nominal */
845 ep->freqmax = ep->freqn + (ep->freqn >> 1);
Ricard Wanderlofab309652015-10-11 20:54:51 +0200846 /* Round up freqmax to nearest integer in order to calculate maximum
847 * packet size, which must represent a whole number of frames.
848 * This is accomplished by adding 0x0.ffff before converting the
849 * Q16.16 format into integer.
850 * In order to accurately calculate the maximum packet size when
851 * the data interval is more than 1 (i.e. ep->datainterval > 0),
852 * multiply by the data interval prior to rounding. For instance,
853 * a freqmax of 41 kHz will result in a max packet size of 6 (5.125)
854 * frames with a data interval of 1, but 11 (10.25) frames with a
855 * data interval of 2.
856 * (ep->freqmax << ep->datainterval overflows at 8.192 MHz for the
857 * maximum datainterval value of 3, at USB full speed, higher for
858 * USB high speed, noting that ep->freqmax is in units of
859 * frames per packet in Q16.16 format.)
860 */
861 maxsize = (((ep->freqmax << ep->datainterval) + 0xffff) >> 16) *
862 (frame_bits >> 3);
Ricard Wanderlof759c90f2015-10-19 08:52:54 +0200863 if (tx_length_quirk)
864 maxsize += sizeof(__le32); /* Space for length descriptor */
Clemens Ladisch57e6dae2013-08-08 11:24:55 +0200865 /* but wMaxPacketSize might reduce this */
866 if (ep->maxpacksize && ep->maxpacksize < maxsize) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200867 /* whatever fits into a max. size packet */
Ricard Wanderlof759c90f2015-10-19 08:52:54 +0200868 unsigned int data_maxsize = maxsize = ep->maxpacksize;
869
870 if (tx_length_quirk)
871 /* Need to remove the length descriptor to calc freq */
872 data_maxsize -= sizeof(__le32);
873 ep->freqmax = (data_maxsize / (frame_bits >> 3))
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200874 << (16 - ep->datainterval);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200875 }
876
877 if (ep->fill_max)
878 ep->curpacksize = ep->maxpacksize;
879 else
880 ep->curpacksize = maxsize;
881
Alan Stern976b6c02013-09-24 15:51:58 -0400882 if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200883 packs_per_ms = 8 >> ep->datainterval;
Alan Stern976b6c02013-09-24 15:51:58 -0400884 max_packs_per_urb = MAX_PACKS_HS;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200885 } else {
Alan Stern976b6c02013-09-24 15:51:58 -0400886 packs_per_ms = 1;
887 max_packs_per_urb = MAX_PACKS;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200888 }
Eldad Zack98ae4722013-04-03 23:18:52 +0200889 if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep))
Alan Stern976b6c02013-09-24 15:51:58 -0400890 max_packs_per_urb = min(max_packs_per_urb,
891 1U << sync_ep->syncinterval);
892 max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200893
Alan Stern976b6c02013-09-24 15:51:58 -0400894 /*
895 * Capture endpoints need to use small URBs because there's no way
896 * to tell in advance where the next period will end, and we don't
897 * want the next URB to complete much after the period ends.
898 *
899 * Playback endpoints with implicit sync much use the same parameters
900 * as their corresponding capture endpoint.
901 */
902 if (usb_pipein(ep->pipe) ||
903 snd_usb_endpoint_implicit_feedback_sink(ep)) {
904
Thomas Pugliesea93455e2013-11-26 13:58:15 -0600905 urb_packs = packs_per_ms;
906 /*
907 * Wireless devices can poll at a max rate of once per 4ms.
908 * For dataintervals less than 5, increase the packet count to
909 * allow the host controller to use bursting to fill in the
910 * gaps.
911 */
912 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_WIRELESS) {
913 int interval = ep->datainterval;
914 while (interval < 5) {
915 urb_packs <<= 1;
916 ++interval;
917 }
918 }
Alan Stern976b6c02013-09-24 15:51:58 -0400919 /* make capture URBs <= 1 ms and smaller than a period */
Thomas Pugliesea93455e2013-11-26 13:58:15 -0600920 urb_packs = min(max_packs_per_urb, urb_packs);
Alan Stern976b6c02013-09-24 15:51:58 -0400921 while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
922 urb_packs >>= 1;
923 ep->nurbs = MAX_URBS;
924
925 /*
926 * Playback endpoints without implicit sync are adjusted so that
927 * a period fits as evenly as possible in the smallest number of
928 * URBs. The total number of URBs is adjusted to the size of the
929 * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits.
930 */
931 } else {
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200932 /* determine how small a packet can be */
Alan Stern976b6c02013-09-24 15:51:58 -0400933 minsize = (ep->freqn >> (16 - ep->datainterval)) *
934 (frame_bits >> 3);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200935 /* with sync from device, assume it can be 12% lower */
936 if (sync_ep)
937 minsize -= minsize >> 3;
938 minsize = max(minsize, 1u);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200939
Alan Stern976b6c02013-09-24 15:51:58 -0400940 /* how many packets will contain an entire ALSA period? */
941 max_packs_per_period = DIV_ROUND_UP(period_bytes, minsize);
942
943 /* how many URBs will contain a period? */
944 urbs_per_period = DIV_ROUND_UP(max_packs_per_period,
945 max_packs_per_urb);
946 /* how many packets are needed in each URB? */
947 urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period);
948
949 /* limit the number of frames in a single URB */
950 ep->max_urb_frames = DIV_ROUND_UP(frames_per_period,
951 urbs_per_period);
952
953 /* try to use enough URBs to contain an entire ALSA buffer */
954 max_urbs = min((unsigned) MAX_URBS,
955 MAX_QUEUE * packs_per_ms / urb_packs);
956 ep->nurbs = min(max_urbs, urbs_per_period * periods_per_buffer);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200957 }
958
959 /* allocate and initialize data urbs */
960 for (i = 0; i < ep->nurbs; i++) {
961 struct snd_urb_ctx *u = &ep->urb[i];
962 u->index = i;
963 u->ep = ep;
Alan Stern976b6c02013-09-24 15:51:58 -0400964 u->packets = urb_packs;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200965 u->buffer_size = maxsize * u->packets;
966
967 if (fmt->fmt_type == UAC_FORMAT_TYPE_II)
968 u->packets++; /* for transfer delimiter */
969 u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
970 if (!u->urb)
971 goto out_of_memory;
972
973 u->urb->transfer_buffer =
974 usb_alloc_coherent(ep->chip->dev, u->buffer_size,
975 GFP_KERNEL, &u->urb->transfer_dma);
976 if (!u->urb->transfer_buffer)
977 goto out_of_memory;
978 u->urb->pipe = ep->pipe;
Clemens Ladischc75c5ab2013-04-27 12:10:32 +0200979 u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200980 u->urb->interval = 1 << ep->datainterval;
981 u->urb->context = u;
982 u->urb->complete = snd_complete_urb;
983 INIT_LIST_HEAD(&u->ready_list);
984 }
985
986 return 0;
987
988out_of_memory:
989 release_urbs(ep, 0);
990 return -ENOMEM;
991}
992
Daniel Mack94c27212012-04-12 13:51:15 +0200993/*
994 * configure a sync endpoint
995 */
Eldad Zack93721032013-10-06 22:31:06 +0200996static int sync_ep_set_params(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200997{
998 int i;
999
1000 ep->syncbuf = usb_alloc_coherent(ep->chip->dev, SYNC_URBS * 4,
1001 GFP_KERNEL, &ep->sync_dma);
1002 if (!ep->syncbuf)
1003 return -ENOMEM;
1004
1005 for (i = 0; i < SYNC_URBS; i++) {
1006 struct snd_urb_ctx *u = &ep->urb[i];
1007 u->index = i;
1008 u->ep = ep;
1009 u->packets = 1;
1010 u->urb = usb_alloc_urb(1, GFP_KERNEL);
1011 if (!u->urb)
1012 goto out_of_memory;
1013 u->urb->transfer_buffer = ep->syncbuf + i * 4;
1014 u->urb->transfer_dma = ep->sync_dma + i * 4;
1015 u->urb->transfer_buffer_length = 4;
1016 u->urb->pipe = ep->pipe;
Clemens Ladischc75c5ab2013-04-27 12:10:32 +02001017 u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001018 u->urb->number_of_packets = 1;
1019 u->urb->interval = 1 << ep->syncinterval;
1020 u->urb->context = u;
1021 u->urb->complete = snd_complete_urb;
1022 }
1023
1024 ep->nurbs = SYNC_URBS;
1025
1026 return 0;
1027
1028out_of_memory:
1029 release_urbs(ep, 0);
1030 return -ENOMEM;
1031}
1032
Daniel Mack94c27212012-04-12 13:51:15 +02001033/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001034 * snd_usb_endpoint_set_params: configure an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +02001035 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001036 * @ep: the snd_usb_endpoint to configure
Dylan Reid35ec7aa22012-09-18 09:49:47 -07001037 * @pcm_format: the audio fomat.
1038 * @channels: the number of audio channels.
1039 * @period_bytes: the number of bytes in one alsa period.
Alan Stern976b6c02013-09-24 15:51:58 -04001040 * @period_frames: the number of frames in one alsa period.
1041 * @buffer_periods: the number of periods in one alsa buffer.
Dylan Reid35ec7aa22012-09-18 09:49:47 -07001042 * @rate: the frame rate.
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001043 * @fmt: the USB audio format information
1044 * @sync_ep: the sync endpoint to use, if any
Daniel Mack94c27212012-04-12 13:51:15 +02001045 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001046 * Determine the number of URBs to be used on this endpoint.
Daniel Mack94c27212012-04-12 13:51:15 +02001047 * An endpoint must be configured before it can be started.
1048 * An endpoint that is already running can not be reconfigured.
1049 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001050int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
Dylan Reid35ec7aa22012-09-18 09:49:47 -07001051 snd_pcm_format_t pcm_format,
1052 unsigned int channels,
1053 unsigned int period_bytes,
Alan Stern976b6c02013-09-24 15:51:58 -04001054 unsigned int period_frames,
1055 unsigned int buffer_periods,
Dylan Reid35ec7aa22012-09-18 09:49:47 -07001056 unsigned int rate,
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001057 struct audioformat *fmt,
1058 struct snd_usb_endpoint *sync_ep)
1059{
1060 int err;
1061
1062 if (ep->use_count != 0) {
Erwin Burema10ce77e2020-05-10 20:29:11 +02001063 bool check = ep->is_implicit_feedback &&
1064 check_ep_params(ep, pcm_format,
1065 channels, period_bytes,
1066 period_frames, buffer_periods,
1067 fmt, sync_ep);
1068
1069 if (!check) {
1070 usb_audio_warn(ep->chip,
1071 "Unable to change format on ep #%x: already in use\n",
1072 ep->ep_num);
1073 return -EBUSY;
1074 }
1075
1076 usb_audio_dbg(ep->chip,
1077 "Ep #%x already in use as implicit feedback but format not changed\n",
1078 ep->ep_num);
1079 return 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001080 }
1081
1082 /* release old buffers, if any */
1083 release_urbs(ep, 0);
1084
1085 ep->datainterval = fmt->datainterval;
1086 ep->maxpacksize = fmt->maxpacksize;
Takashi Iwai85f71932012-04-13 12:41:54 +02001087 ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001088
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001089 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) {
Dylan Reid35ec7aa22012-09-18 09:49:47 -07001090 ep->freqn = get_usb_full_speed_rate(rate);
Alexander Tsoyb9fd2002020-06-29 05:59:34 +03001091 ep->pps = 1000 >> ep->datainterval;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001092 } else {
Dylan Reid35ec7aa22012-09-18 09:49:47 -07001093 ep->freqn = get_usb_high_speed_rate(rate);
Alexander Tsoyb9fd2002020-06-29 05:59:34 +03001094 ep->pps = 8000 >> ep->datainterval;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001095 }
1096
Alexander Tsoyb9fd2002020-06-29 05:59:34 +03001097 ep->sample_rem = rate % ep->pps;
1098 ep->packsize[0] = rate / ep->pps;
1099 ep->packsize[1] = (rate + (ep->pps - 1)) / ep->pps;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001100
1101 /* calculate the frequency in 16.16 format */
1102 ep->freqm = ep->freqn;
1103 ep->freqshift = INT_MIN;
1104
1105 ep->phase = 0;
1106
1107 switch (ep->type) {
1108 case SND_USB_ENDPOINT_TYPE_DATA:
Dylan Reid35ec7aa22012-09-18 09:49:47 -07001109 err = data_ep_set_params(ep, pcm_format, channels,
Alan Stern976b6c02013-09-24 15:51:58 -04001110 period_bytes, period_frames,
1111 buffer_periods, fmt, sync_ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001112 break;
1113 case SND_USB_ENDPOINT_TYPE_SYNC:
Eldad Zack93721032013-10-06 22:31:06 +02001114 err = sync_ep_set_params(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001115 break;
1116 default:
1117 err = -EINVAL;
1118 }
1119
Takashi Iwai0ba41d92014-02-26 13:02:17 +01001120 usb_audio_dbg(ep->chip,
1121 "Setting params for ep #%x (type %d, %d urbs), ret=%d\n",
1122 ep->ep_num, ep->type, ep->nurbs, err);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001123
1124 return err;
1125}
1126
Daniel Mack94c27212012-04-12 13:51:15 +02001127/**
1128 * snd_usb_endpoint_start: start an snd_usb_endpoint
1129 *
Ioan-Adrian Ratiu1d0f9532017-01-05 00:37:46 +02001130 * @ep: the endpoint to start
Daniel Mack94c27212012-04-12 13:51:15 +02001131 *
1132 * A call to this function will increment the use count of the endpoint.
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001133 * In case it is not already running, the URBs for this endpoint will be
Daniel Mack94c27212012-04-12 13:51:15 +02001134 * submitted. Otherwise, this function does nothing.
1135 *
1136 * Must be balanced to calls of snd_usb_endpoint_stop().
1137 *
1138 * Returns an error if the URB submission failed, 0 in all other cases.
1139 */
Ioan-Adrian Ratiu1d0f9532017-01-05 00:37:46 +02001140int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001141{
1142 int err;
1143 unsigned int i;
1144
Takashi Iwai47ab1542015-08-25 16:09:00 +02001145 if (atomic_read(&ep->chip->shutdown))
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001146 return -EBADFD;
1147
1148 /* already running? */
1149 if (++ep->use_count != 1)
1150 return 0;
1151
Daniel Mack015618b2012-08-29 13:17:05 +02001152 /* just to be sure */
Takashi Iwaiccc16962012-11-21 08:22:52 +01001153 deactivate_urbs(ep, false);
Daniel Mack015618b2012-08-29 13:17:05 +02001154
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001155 ep->active_mask = 0;
1156 ep->unlink_mask = 0;
1157 ep->phase = 0;
Alexander Tsoyf0bd62b2020-04-24 05:24:48 +03001158 ep->sample_accum = 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001159
Daniel Mack2b58fd52012-09-04 10:23:07 +02001160 snd_usb_endpoint_start_quirk(ep);
1161
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001162 /*
1163 * If this endpoint has a data endpoint as implicit feedback source,
1164 * don't start the urbs here. Instead, mark them all as available,
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001165 * wait for the record urbs to return and queue the playback urbs
1166 * from that context.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001167 */
1168
1169 set_bit(EP_FLAG_RUNNING, &ep->flags);
1170
Eldad Zack98ae4722013-04-03 23:18:52 +02001171 if (snd_usb_endpoint_implicit_feedback_sink(ep)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001172 for (i = 0; i < ep->nurbs; i++) {
1173 struct snd_urb_ctx *ctx = ep->urb + i;
1174 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
1175 }
1176
1177 return 0;
1178 }
1179
1180 for (i = 0; i < ep->nurbs; i++) {
1181 struct urb *urb = ep->urb[i].urb;
1182
1183 if (snd_BUG_ON(!urb))
1184 goto __error;
1185
1186 if (usb_pipeout(ep->pipe)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001187 prepare_outbound_urb(ep, urb->context);
1188 } else {
1189 prepare_inbound_urb(ep, urb->context);
1190 }
1191
1192 err = usb_submit_urb(urb, GFP_ATOMIC);
1193 if (err < 0) {
Takashi Iwai0ba41d92014-02-26 13:02:17 +01001194 usb_audio_err(ep->chip,
1195 "cannot submit urb %d, error %d: %s\n",
1196 i, err, usb_error_string(err));
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001197 goto __error;
1198 }
1199 set_bit(i, &ep->active_mask);
1200 }
1201
1202 return 0;
1203
1204__error:
1205 clear_bit(EP_FLAG_RUNNING, &ep->flags);
1206 ep->use_count--;
Takashi Iwaiccc16962012-11-21 08:22:52 +01001207 deactivate_urbs(ep, false);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001208 return -EPIPE;
1209}
1210
Daniel Mack94c27212012-04-12 13:51:15 +02001211/**
1212 * snd_usb_endpoint_stop: stop an snd_usb_endpoint
1213 *
1214 * @ep: the endpoint to stop (may be NULL)
1215 *
1216 * A call to this function will decrement the use count of the endpoint.
1217 * In case the last user has requested the endpoint stop, the URBs will
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001218 * actually be deactivated.
Daniel Mack94c27212012-04-12 13:51:15 +02001219 *
1220 * Must be balanced to calls of snd_usb_endpoint_start().
Takashi Iwaib2eb9502012-11-21 08:30:48 +01001221 *
1222 * The caller needs to synchronize the pending stop operation via
1223 * snd_usb_endpoint_sync_pending_stop().
Daniel Mack94c27212012-04-12 13:51:15 +02001224 */
Takashi Iwaib2eb9502012-11-21 08:30:48 +01001225void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001226{
1227 if (!ep)
1228 return;
1229
1230 if (snd_BUG_ON(ep->use_count == 0))
1231 return;
1232
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001233 if (--ep->use_count == 0) {
Takashi Iwaiccc16962012-11-21 08:22:52 +01001234 deactivate_urbs(ep, false);
Takashi Iwaib2eb9502012-11-21 08:30:48 +01001235 set_bit(EP_FLAG_STOPPING, &ep->flags);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001236 }
1237}
1238
Daniel Mack94c27212012-04-12 13:51:15 +02001239/**
Daniel Mack94c27212012-04-12 13:51:15 +02001240 * snd_usb_endpoint_deactivate: deactivate an snd_usb_endpoint
1241 *
1242 * @ep: the endpoint to deactivate
1243 *
Eldad Zack9b7c5522013-10-06 22:31:10 +02001244 * If the endpoint is not currently in use, this functions will
1245 * deactivate its associated URBs.
Daniel Mack94c27212012-04-12 13:51:15 +02001246 *
1247 * In case of any active users, this functions does nothing.
Daniel Mack94c27212012-04-12 13:51:15 +02001248 */
Eldad Zack9b7c5522013-10-06 22:31:10 +02001249void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001250{
1251 if (!ep)
Eldad Zack9b7c5522013-10-06 22:31:10 +02001252 return;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001253
1254 if (ep->use_count != 0)
Eldad Zack9b7c5522013-10-06 22:31:10 +02001255 return;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001256
Eldad Zack239b9f72013-10-06 22:31:09 +02001257 deactivate_urbs(ep, true);
1258 wait_clear_urbs(ep);
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001259}
1260
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001261/**
Takashi Iwai92a586b2014-06-25 14:24:47 +02001262 * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
1263 *
1264 * @ep: the endpoint to release
1265 *
1266 * This function does not care for the endpoint's use count but will tear
1267 * down all the streaming URBs immediately.
1268 */
1269void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
1270{
1271 release_urbs(ep, 1);
1272}
1273
1274/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001275 * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +02001276 *
Takashi Iwaia6cece92014-10-31 11:24:32 +01001277 * @ep: the endpoint to free
Daniel Mack94c27212012-04-12 13:51:15 +02001278 *
Takashi Iwai92a586b2014-06-25 14:24:47 +02001279 * This free all resources of the given ep.
Daniel Mack94c27212012-04-12 13:51:15 +02001280 */
Takashi Iwaia6cece92014-10-31 11:24:32 +01001281void snd_usb_endpoint_free(struct snd_usb_endpoint *ep)
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001282{
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001283 kfree(ep);
1284}
1285
Daniel Mack94c27212012-04-12 13:51:15 +02001286/**
1287 * snd_usb_handle_sync_urb: parse an USB sync packet
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001288 *
Daniel Mack94c27212012-04-12 13:51:15 +02001289 * @ep: the endpoint to handle the packet
1290 * @sender: the sending endpoint
1291 * @urb: the received packet
1292 *
1293 * This function is called from the context of an endpoint that received
1294 * the packet and is used to let another endpoint object handle the payload.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001295 */
1296void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
1297 struct snd_usb_endpoint *sender,
1298 const struct urb *urb)
1299{
1300 int shift;
1301 unsigned int f;
1302 unsigned long flags;
1303
1304 snd_BUG_ON(ep == sender);
1305
Daniel Mack94c27212012-04-12 13:51:15 +02001306 /*
1307 * In case the endpoint is operating in implicit feedback mode, prepare
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001308 * a new outbound URB that has the same layout as the received packet
1309 * and add it to the list of pending urbs. queue_pending_output_urbs()
1310 * will take care of them later.
Daniel Mack94c27212012-04-12 13:51:15 +02001311 */
Eldad Zack98ae4722013-04-03 23:18:52 +02001312 if (snd_usb_endpoint_implicit_feedback_sink(ep) &&
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001313 ep->use_count != 0) {
1314
1315 /* implicit feedback case */
1316 int i, bytes = 0;
1317 struct snd_urb_ctx *in_ctx;
1318 struct snd_usb_packet_info *out_packet;
1319
1320 in_ctx = urb->context;
1321
1322 /* Count overall packet size */
1323 for (i = 0; i < in_ctx->packets; i++)
1324 if (urb->iso_frame_desc[i].status == 0)
1325 bytes += urb->iso_frame_desc[i].actual_length;
1326
1327 /*
1328 * skip empty packets. At least M-Audio's Fast Track Ultra stops
1329 * streaming once it received a 0-byte OUT URB
1330 */
1331 if (bytes == 0)
1332 return;
1333
1334 spin_lock_irqsave(&ep->lock, flags);
1335 out_packet = ep->next_packet + ep->next_packet_write_pos;
1336
1337 /*
1338 * Iterate through the inbound packet and prepare the lengths
1339 * for the output packet. The OUT packet we are about to send
Eldad Zack28acb122012-11-28 23:55:34 +01001340 * will have the same amount of payload bytes per stride as the
1341 * IN packet we just received. Since the actual size is scaled
1342 * by the stride, use the sender stride to calculate the length
1343 * in case the number of channels differ between the implicitly
1344 * fed-back endpoint and the synchronizing endpoint.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001345 */
1346
1347 out_packet->packets = in_ctx->packets;
1348 for (i = 0; i < in_ctx->packets; i++) {
1349 if (urb->iso_frame_desc[i].status == 0)
1350 out_packet->packet_size[i] =
Eldad Zack28acb122012-11-28 23:55:34 +01001351 urb->iso_frame_desc[i].actual_length / sender->stride;
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001352 else
1353 out_packet->packet_size[i] = 0;
1354 }
1355
1356 ep->next_packet_write_pos++;
1357 ep->next_packet_write_pos %= MAX_URBS;
1358 spin_unlock_irqrestore(&ep->lock, flags);
1359 queue_pending_output_urbs(ep);
1360
1361 return;
1362 }
1363
Daniel Mack94c27212012-04-12 13:51:15 +02001364 /*
1365 * process after playback sync complete
1366 *
1367 * Full speed devices report feedback values in 10.14 format as samples
1368 * per frame, high speed devices in 16.16 format as samples per
1369 * microframe.
1370 *
1371 * Because the Audio Class 1 spec was written before USB 2.0, many high
1372 * speed devices use a wrong interpretation, some others use an
1373 * entirely different format.
1374 *
1375 * Therefore, we cannot predict what format any particular device uses
1376 * and must detect it automatically.
1377 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001378
1379 if (urb->iso_frame_desc[0].status != 0 ||
1380 urb->iso_frame_desc[0].actual_length < 3)
1381 return;
1382
1383 f = le32_to_cpup(urb->transfer_buffer);
1384 if (urb->iso_frame_desc[0].actual_length == 3)
1385 f &= 0x00ffffff;
1386 else
1387 f &= 0x0fffffff;
1388
1389 if (f == 0)
1390 return;
1391
Daniel Mackca0dd272016-08-22 08:53:37 +02001392 if (unlikely(sender->tenor_fb_quirk)) {
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001393 /*
Daniel Mackca0dd272016-08-22 08:53:37 +02001394 * Devices based on Tenor 8802 chipsets (TEAC UD-H01
1395 * and others) sometimes change the feedback value
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001396 * by +/- 0x1.0000.
1397 */
1398 if (f < ep->freqn - 0x8000)
Daniel Mack36e1ac32016-08-22 08:53:38 +02001399 f += 0xf000;
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001400 else if (f > ep->freqn + 0x8000)
Daniel Mack36e1ac32016-08-22 08:53:38 +02001401 f -= 0xf000;
Clemens Ladisch7040b6d2014-05-01 12:20:22 +02001402 } else if (unlikely(ep->freqshift == INT_MIN)) {
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001403 /*
1404 * The first time we see a feedback value, determine its format
1405 * by shifting it left or right until it matches the nominal
1406 * frequency value. This assumes that the feedback does not
1407 * differ from the nominal value more than +50% or -25%.
1408 */
1409 shift = 0;
1410 while (f < ep->freqn - ep->freqn / 4) {
1411 f <<= 1;
1412 shift++;
1413 }
1414 while (f > ep->freqn + ep->freqn / 2) {
1415 f >>= 1;
1416 shift--;
1417 }
1418 ep->freqshift = shift;
1419 } else if (ep->freqshift >= 0)
1420 f <<= ep->freqshift;
1421 else
1422 f >>= -ep->freqshift;
1423
1424 if (likely(f >= ep->freqn - ep->freqn / 8 && f <= ep->freqmax)) {
1425 /*
1426 * If the frequency looks valid, set it.
1427 * This value is referred to in prepare_playback_urb().
1428 */
1429 spin_lock_irqsave(&ep->lock, flags);
1430 ep->freqm = f;
1431 spin_unlock_irqrestore(&ep->lock, flags);
1432 } else {
1433 /*
1434 * Out of range; maybe the shift value is wrong.
1435 * Reset it so that we autodetect again the next time.
1436 */
1437 ep->freqshift = INT_MIN;
1438 }
1439}
1440