blob: 94b08a6087a3c1f265142a31f0855a5937aa1427 [file] [log] [blame]
Daniel Macke5779992010-03-04 19:46:13 +01001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 *
16 */
17
Daniel Mackc731bc92011-09-14 12:46:57 +020018#include <linux/gfp.h>
19#include <linux/init.h>
Takashi Iwai80c8a2a2012-01-09 11:37:20 +010020#include <linux/ratelimit.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020021#include <linux/usb.h>
22#include <linux/usb/audio.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020023#include <linux/slab.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020024
25#include <sound/core.h>
26#include <sound/pcm.h>
Daniel Mack8fdff6a2012-04-12 13:51:11 +020027#include <sound/pcm_params.h>
Daniel Mackc731bc92011-09-14 12:46:57 +020028
29#include "usbaudio.h"
30#include "helper.h"
31#include "card.h"
32#include "endpoint.h"
33#include "pcm.h"
Daniel Mack2b58fd52012-09-04 10:23:07 +020034#include "quirks.h"
Daniel Mackc731bc92011-09-14 12:46:57 +020035
Daniel Mack8fdff6a2012-04-12 13:51:11 +020036#define EP_FLAG_ACTIVATED 0
37#define EP_FLAG_RUNNING 1
38
Daniel Mackc731bc92011-09-14 12:46:57 +020039/*
Daniel Mack94c27212012-04-12 13:51:15 +020040 * snd_usb_endpoint is a model that abstracts everything related to an
41 * USB endpoint and its streaming.
42 *
43 * There are functions to activate and deactivate the streaming URBs and
Daniel Mack07a5e9d2012-04-24 19:31:24 +020044 * optional callbacks to let the pcm logic handle the actual content of the
Daniel Mack94c27212012-04-12 13:51:15 +020045 * packets for playback and record. Thus, the bus streaming and the audio
46 * handlers are fully decoupled.
47 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020048 * There are two different types of endpoints in audio applications.
Daniel Mack94c27212012-04-12 13:51:15 +020049 *
50 * SND_USB_ENDPOINT_TYPE_DATA handles full audio data payload for both
51 * inbound and outbound traffic.
52 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020053 * SND_USB_ENDPOINT_TYPE_SYNC endpoints are for inbound traffic only and
54 * expect the payload to carry Q10.14 / Q16.16 formatted sync information
55 * (3 or 4 bytes).
Daniel Mack94c27212012-04-12 13:51:15 +020056 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +020057 * Each endpoint has to be configured prior to being used by calling
58 * snd_usb_endpoint_set_params().
Daniel Mack94c27212012-04-12 13:51:15 +020059 *
60 * The model incorporates a reference counting, so that multiple users
61 * can call snd_usb_endpoint_start() and snd_usb_endpoint_stop(), and
62 * only the first user will effectively start the URBs, and only the last
Daniel Mack07a5e9d2012-04-24 19:31:24 +020063 * one to stop it will tear the URBs down again.
Daniel Mack94c27212012-04-12 13:51:15 +020064 */
65
66/*
Daniel Mackc731bc92011-09-14 12:46:57 +020067 * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
68 * this will overflow at approx 524 kHz
69 */
70static inline unsigned get_usb_full_speed_rate(unsigned int rate)
71{
72 return ((rate << 13) + 62) / 125;
73}
74
75/*
76 * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
77 * this will overflow at approx 4 MHz
78 */
79static inline unsigned get_usb_high_speed_rate(unsigned int rate)
80{
81 return ((rate << 10) + 62) / 125;
82}
83
84/*
Daniel Mackc731bc92011-09-14 12:46:57 +020085 * release a urb data
86 */
87static void release_urb_ctx(struct snd_urb_ctx *u)
88{
Daniel Mackd399ff92012-04-12 13:51:13 +020089 if (u->buffer_size)
90 usb_free_coherent(u->ep->chip->dev, u->buffer_size,
91 u->urb->transfer_buffer,
92 u->urb->transfer_dma);
93 usb_free_urb(u->urb);
94 u->urb = NULL;
Daniel Mackc731bc92011-09-14 12:46:57 +020095}
96
97static const char *usb_error_string(int err)
98{
99 switch (err) {
100 case -ENODEV:
101 return "no device";
102 case -ENOENT:
103 return "endpoint not enabled";
104 case -EPIPE:
105 return "endpoint stalled";
106 case -ENOSPC:
107 return "not enough bandwidth";
108 case -ESHUTDOWN:
109 return "device disabled";
110 case -EHOSTUNREACH:
111 return "device suspended";
112 case -EINVAL:
113 case -EAGAIN:
114 case -EFBIG:
115 case -EMSGSIZE:
116 return "internal error";
117 default:
118 return "unknown error";
119 }
120}
121
Daniel Mack94c27212012-04-12 13:51:15 +0200122/**
123 * snd_usb_endpoint_implicit_feedback_sink: Report endpoint usage type
124 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200125 * @ep: The snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200126 *
127 * Determine whether an endpoint is driven by an implicit feedback
128 * data endpoint source.
129 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200130int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep)
131{
132 return ep->sync_master &&
133 ep->sync_master->type == SND_USB_ENDPOINT_TYPE_DATA &&
134 ep->type == SND_USB_ENDPOINT_TYPE_DATA &&
135 usb_pipeout(ep->pipe);
136}
137
Daniel Mack94c27212012-04-12 13:51:15 +0200138/*
139 * For streaming based on information derived from sync endpoints,
140 * prepare_outbound_urb_sizes() will call next_packet_size() to
141 * determine the number of samples to be sent in the next packet.
142 *
143 * For implicit feedback, next_packet_size() is unused.
144 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200145static int next_packet_size(struct snd_usb_endpoint *ep)
146{
147 unsigned long flags;
148 int ret;
149
150 if (ep->fill_max)
151 return ep->maxframesize;
152
153 spin_lock_irqsave(&ep->lock, flags);
154 ep->phase = (ep->phase & 0xffff)
155 + (ep->freqm << ep->datainterval);
156 ret = min(ep->phase >> 16, ep->maxframesize);
157 spin_unlock_irqrestore(&ep->lock, flags);
158
159 return ret;
160}
161
162static void retire_outbound_urb(struct snd_usb_endpoint *ep,
163 struct snd_urb_ctx *urb_ctx)
164{
165 if (ep->retire_data_urb)
166 ep->retire_data_urb(ep->data_subs, urb_ctx->urb);
167}
168
169static void retire_inbound_urb(struct snd_usb_endpoint *ep,
170 struct snd_urb_ctx *urb_ctx)
171{
172 struct urb *urb = urb_ctx->urb;
173
Daniel Mack2b58fd52012-09-04 10:23:07 +0200174 if (unlikely(ep->skip_packets > 0)) {
175 ep->skip_packets--;
176 return;
177 }
178
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200179 if (ep->sync_slave)
180 snd_usb_handle_sync_urb(ep->sync_slave, ep, urb);
181
182 if (ep->retire_data_urb)
183 ep->retire_data_urb(ep->data_subs, urb);
184}
185
186static void prepare_outbound_urb_sizes(struct snd_usb_endpoint *ep,
187 struct snd_urb_ctx *ctx)
188{
189 int i;
190
191 for (i = 0; i < ctx->packets; ++i)
192 ctx->packet_size[i] = next_packet_size(ep);
193}
194
195/*
196 * Prepare a PLAYBACK urb for submission to the bus.
197 */
198static void prepare_outbound_urb(struct snd_usb_endpoint *ep,
199 struct snd_urb_ctx *ctx)
200{
201 int i;
202 struct urb *urb = ctx->urb;
203 unsigned char *cp = urb->transfer_buffer;
204
205 urb->dev = ep->chip->dev; /* we need to set this at each time */
206
207 switch (ep->type) {
208 case SND_USB_ENDPOINT_TYPE_DATA:
209 if (ep->prepare_data_urb) {
210 ep->prepare_data_urb(ep->data_subs, urb);
211 } else {
212 /* no data provider, so send silence */
213 unsigned int offs = 0;
214 for (i = 0; i < ctx->packets; ++i) {
215 int counts = ctx->packet_size[i];
216 urb->iso_frame_desc[i].offset = offs * ep->stride;
217 urb->iso_frame_desc[i].length = counts * ep->stride;
218 offs += counts;
219 }
220
221 urb->number_of_packets = ctx->packets;
222 urb->transfer_buffer_length = offs * ep->stride;
223 memset(urb->transfer_buffer, ep->silence_value,
224 offs * ep->stride);
225 }
226 break;
227
228 case SND_USB_ENDPOINT_TYPE_SYNC:
229 if (snd_usb_get_speed(ep->chip->dev) >= USB_SPEED_HIGH) {
230 /*
231 * fill the length and offset of each urb descriptor.
232 * the fixed 12.13 frequency is passed as 16.16 through the pipe.
233 */
234 urb->iso_frame_desc[0].length = 4;
235 urb->iso_frame_desc[0].offset = 0;
236 cp[0] = ep->freqn;
237 cp[1] = ep->freqn >> 8;
238 cp[2] = ep->freqn >> 16;
239 cp[3] = ep->freqn >> 24;
240 } else {
241 /*
242 * fill the length and offset of each urb descriptor.
243 * the fixed 10.14 frequency is passed through the pipe.
244 */
245 urb->iso_frame_desc[0].length = 3;
246 urb->iso_frame_desc[0].offset = 0;
247 cp[0] = ep->freqn >> 2;
248 cp[1] = ep->freqn >> 10;
249 cp[2] = ep->freqn >> 18;
250 }
251
252 break;
253 }
254}
255
256/*
257 * Prepare a CAPTURE or SYNC urb for submission to the bus.
258 */
259static inline void prepare_inbound_urb(struct snd_usb_endpoint *ep,
260 struct snd_urb_ctx *urb_ctx)
261{
262 int i, offs;
263 struct urb *urb = urb_ctx->urb;
264
265 urb->dev = ep->chip->dev; /* we need to set this at each time */
266
267 switch (ep->type) {
268 case SND_USB_ENDPOINT_TYPE_DATA:
269 offs = 0;
270 for (i = 0; i < urb_ctx->packets; i++) {
271 urb->iso_frame_desc[i].offset = offs;
272 urb->iso_frame_desc[i].length = ep->curpacksize;
273 offs += ep->curpacksize;
274 }
275
276 urb->transfer_buffer_length = offs;
277 urb->number_of_packets = urb_ctx->packets;
278 break;
279
280 case SND_USB_ENDPOINT_TYPE_SYNC:
281 urb->iso_frame_desc[0].length = min(4u, ep->syncmaxsize);
282 urb->iso_frame_desc[0].offset = 0;
283 break;
284 }
285}
286
Daniel Mack94c27212012-04-12 13:51:15 +0200287/*
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200288 * Send output urbs that have been prepared previously. URBs are dequeued
Daniel Mack94c27212012-04-12 13:51:15 +0200289 * from ep->ready_playback_urbs and in case there there aren't any available
290 * or there are no packets that have been prepared, this function does
291 * nothing.
292 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200293 * The reason why the functionality of sending and preparing URBs is separated
294 * is that host controllers don't guarantee the order in which they return
295 * inbound and outbound packets to their submitters.
Daniel Mack94c27212012-04-12 13:51:15 +0200296 *
297 * This function is only used for implicit feedback endpoints. For endpoints
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200298 * driven by dedicated sync endpoints, URBs are immediately re-submitted
299 * from their completion handler.
Daniel Mack94c27212012-04-12 13:51:15 +0200300 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200301static void queue_pending_output_urbs(struct snd_usb_endpoint *ep)
302{
303 while (test_bit(EP_FLAG_RUNNING, &ep->flags)) {
304
305 unsigned long flags;
Andrew Morton68853fa2012-04-24 08:10:10 +0200306 struct snd_usb_packet_info *uninitialized_var(packet);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200307 struct snd_urb_ctx *ctx = NULL;
308 struct urb *urb;
309 int err, i;
310
311 spin_lock_irqsave(&ep->lock, flags);
312 if (ep->next_packet_read_pos != ep->next_packet_write_pos) {
313 packet = ep->next_packet + ep->next_packet_read_pos;
314 ep->next_packet_read_pos++;
315 ep->next_packet_read_pos %= MAX_URBS;
316
317 /* take URB out of FIFO */
318 if (!list_empty(&ep->ready_playback_urbs))
319 ctx = list_first_entry(&ep->ready_playback_urbs,
320 struct snd_urb_ctx, ready_list);
321 }
322 spin_unlock_irqrestore(&ep->lock, flags);
323
324 if (ctx == NULL)
325 return;
326
327 list_del_init(&ctx->ready_list);
328 urb = ctx->urb;
329
330 /* copy over the length information */
331 for (i = 0; i < packet->packets; i++)
332 ctx->packet_size[i] = packet->packet_size[i];
333
Daniel Mack94c27212012-04-12 13:51:15 +0200334 /* call the data handler to fill in playback data */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200335 prepare_outbound_urb(ep, ctx);
336
337 err = usb_submit_urb(ctx->urb, GFP_ATOMIC);
338 if (err < 0)
339 snd_printk(KERN_ERR "Unable to submit urb #%d: %d (urb %p)\n",
340 ctx->index, err, ctx->urb);
341 else
342 set_bit(ctx->index, &ep->active_mask);
343 }
344}
345
346/*
347 * complete callback for urbs
348 */
349static void snd_complete_urb(struct urb *urb)
350{
351 struct snd_urb_ctx *ctx = urb->context;
352 struct snd_usb_endpoint *ep = ctx->ep;
353 int err;
354
355 if (unlikely(urb->status == -ENOENT || /* unlinked */
356 urb->status == -ENODEV || /* device removed */
357 urb->status == -ECONNRESET || /* unlinked */
358 urb->status == -ESHUTDOWN || /* device disabled */
359 ep->chip->shutdown)) /* device disconnected */
360 goto exit_clear;
361
362 if (usb_pipeout(ep->pipe)) {
363 retire_outbound_urb(ep, ctx);
364 /* can be stopped during retire callback */
365 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
366 goto exit_clear;
367
368 if (snd_usb_endpoint_implict_feedback_sink(ep)) {
369 unsigned long flags;
370
371 spin_lock_irqsave(&ep->lock, flags);
372 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
373 spin_unlock_irqrestore(&ep->lock, flags);
374 queue_pending_output_urbs(ep);
375
376 goto exit_clear;
377 }
378
379 prepare_outbound_urb_sizes(ep, ctx);
380 prepare_outbound_urb(ep, ctx);
381 } else {
382 retire_inbound_urb(ep, ctx);
383 /* can be stopped during retire callback */
384 if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
385 goto exit_clear;
386
387 prepare_inbound_urb(ep, ctx);
388 }
389
390 err = usb_submit_urb(urb, GFP_ATOMIC);
391 if (err == 0)
392 return;
393
394 snd_printk(KERN_ERR "cannot submit urb (err = %d)\n", err);
395 //snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
396
397exit_clear:
398 clear_bit(ctx->index, &ep->active_mask);
399}
400
Daniel Mack94c27212012-04-12 13:51:15 +0200401/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200402 * snd_usb_add_endpoint: Add an endpoint to an USB audio chip
Daniel Mack94c27212012-04-12 13:51:15 +0200403 *
404 * @chip: The chip
405 * @alts: The USB host interface
406 * @ep_num: The number of the endpoint to use
407 * @direction: SNDRV_PCM_STREAM_PLAYBACK or SNDRV_PCM_STREAM_CAPTURE
408 * @type: SND_USB_ENDPOINT_TYPE_DATA or SND_USB_ENDPOINT_TYPE_SYNC
409 *
410 * If the requested endpoint has not been added to the given chip before,
411 * a new instance is created. Otherwise, a pointer to the previoulsy
412 * created instance is returned. In case of any error, NULL is returned.
413 *
414 * New endpoints will be added to chip->ep_list and must be freed by
415 * calling snd_usb_endpoint_free().
416 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200417struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
418 struct usb_host_interface *alts,
419 int ep_num, int direction, int type)
420{
421 struct list_head *p;
422 struct snd_usb_endpoint *ep;
Daniel Mack68e67f42012-07-12 13:08:40 +0200423 int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200424
425 mutex_lock(&chip->mutex);
426
427 list_for_each(p, &chip->ep_list) {
428 ep = list_entry(p, struct snd_usb_endpoint, list);
429 if (ep->ep_num == ep_num &&
430 ep->iface == alts->desc.bInterfaceNumber &&
431 ep->alt_idx == alts->desc.bAlternateSetting) {
432 snd_printdd(KERN_DEBUG "Re-using EP %x in iface %d,%d @%p\n",
433 ep_num, ep->iface, ep->alt_idx, ep);
434 goto __exit_unlock;
435 }
436 }
437
438 snd_printdd(KERN_DEBUG "Creating new %s %s endpoint #%x\n",
439 is_playback ? "playback" : "capture",
440 type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync",
441 ep_num);
442
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200443 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
444 if (!ep)
445 goto __exit_unlock;
446
447 ep->chip = chip;
448 spin_lock_init(&ep->lock);
449 ep->type = type;
450 ep->ep_num = ep_num;
451 ep->iface = alts->desc.bInterfaceNumber;
452 ep->alt_idx = alts->desc.bAlternateSetting;
453 INIT_LIST_HEAD(&ep->ready_playback_urbs);
454 ep_num &= USB_ENDPOINT_NUMBER_MASK;
455
456 if (is_playback)
457 ep->pipe = usb_sndisocpipe(chip->dev, ep_num);
458 else
459 ep->pipe = usb_rcvisocpipe(chip->dev, ep_num);
460
461 if (type == SND_USB_ENDPOINT_TYPE_SYNC) {
462 if (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
463 get_endpoint(alts, 1)->bRefresh >= 1 &&
464 get_endpoint(alts, 1)->bRefresh <= 9)
465 ep->syncinterval = get_endpoint(alts, 1)->bRefresh;
466 else if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL)
467 ep->syncinterval = 1;
468 else if (get_endpoint(alts, 1)->bInterval >= 1 &&
469 get_endpoint(alts, 1)->bInterval <= 16)
470 ep->syncinterval = get_endpoint(alts, 1)->bInterval - 1;
471 else
472 ep->syncinterval = 3;
473
474 ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize);
475 }
476
477 list_add_tail(&ep->list, &chip->ep_list);
478
479__exit_unlock:
480 mutex_unlock(&chip->mutex);
481
482 return ep;
483}
484
485/*
486 * wait until all urbs are processed.
487 */
488static int wait_clear_urbs(struct snd_usb_endpoint *ep)
489{
490 unsigned long end_time = jiffies + msecs_to_jiffies(1000);
491 unsigned int i;
492 int alive;
493
494 do {
495 alive = 0;
496 for (i = 0; i < ep->nurbs; i++)
497 if (test_bit(i, &ep->active_mask))
498 alive++;
499
500 if (!alive)
501 break;
502
503 schedule_timeout_uninterruptible(1);
504 } while (time_before(jiffies, end_time));
505
506 if (alive)
507 snd_printk(KERN_ERR "timeout: still %d active urbs on EP #%x\n",
508 alive, ep->ep_num);
509
510 return 0;
511}
512
513/*
514 * unlink active urbs.
515 */
516static int deactivate_urbs(struct snd_usb_endpoint *ep, int force, int can_sleep)
517{
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200518 unsigned int i;
519 int async;
520
521 if (!force && ep->chip->shutdown) /* to be sure... */
522 return -EBADFD;
523
524 async = !can_sleep && ep->chip->async_unlink;
525
526 clear_bit(EP_FLAG_RUNNING, &ep->flags);
527
528 INIT_LIST_HEAD(&ep->ready_playback_urbs);
529 ep->next_packet_read_pos = 0;
530 ep->next_packet_write_pos = 0;
531
532 if (!async && in_interrupt())
533 return 0;
534
535 for (i = 0; i < ep->nurbs; i++) {
536 if (test_bit(i, &ep->active_mask)) {
537 if (!test_and_set_bit(i, &ep->unlink_mask)) {
538 struct urb *u = ep->urb[i].urb;
539 if (async)
540 usb_unlink_urb(u);
541 else
542 usb_kill_urb(u);
543 }
544 }
545 }
546
547 return 0;
548}
549
550/*
551 * release an endpoint's urbs
552 */
553static void release_urbs(struct snd_usb_endpoint *ep, int force)
554{
555 int i;
556
557 /* route incoming urbs to nirvana */
558 ep->retire_data_urb = NULL;
559 ep->prepare_data_urb = NULL;
560
561 /* stop urbs */
562 deactivate_urbs(ep, force, 1);
563 wait_clear_urbs(ep);
564
565 for (i = 0; i < ep->nurbs; i++)
566 release_urb_ctx(&ep->urb[i]);
567
568 if (ep->syncbuf)
569 usb_free_coherent(ep->chip->dev, SYNC_URBS * 4,
570 ep->syncbuf, ep->sync_dma);
571
572 ep->syncbuf = NULL;
573 ep->nurbs = 0;
574}
575
Daniel Mack94c27212012-04-12 13:51:15 +0200576/*
577 * configure a data endpoint
578 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200579static int data_ep_set_params(struct snd_usb_endpoint *ep,
580 struct snd_pcm_hw_params *hw_params,
581 struct audioformat *fmt,
582 struct snd_usb_endpoint *sync_ep)
583{
584 unsigned int maxsize, i, urb_packs, total_packs, packs_per_ms;
585 int period_bytes = params_period_bytes(hw_params);
586 int format = params_format(hw_params);
587 int is_playback = usb_pipeout(ep->pipe);
588 int frame_bits = snd_pcm_format_physical_width(params_format(hw_params)) *
589 params_channels(hw_params);
590
591 ep->datainterval = fmt->datainterval;
592 ep->stride = frame_bits >> 3;
593 ep->silence_value = format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
594
595 /* calculate max. frequency */
596 if (ep->maxpacksize) {
597 /* whatever fits into a max. size packet */
598 maxsize = ep->maxpacksize;
599 ep->freqmax = (maxsize / (frame_bits >> 3))
600 << (16 - ep->datainterval);
601 } else {
602 /* no max. packet size: just take 25% higher than nominal */
603 ep->freqmax = ep->freqn + (ep->freqn >> 2);
604 maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
605 >> (16 - ep->datainterval);
606 }
607
608 if (ep->fill_max)
609 ep->curpacksize = ep->maxpacksize;
610 else
611 ep->curpacksize = maxsize;
612
613 if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL)
614 packs_per_ms = 8 >> ep->datainterval;
615 else
616 packs_per_ms = 1;
617
618 if (is_playback && !snd_usb_endpoint_implict_feedback_sink(ep)) {
619 urb_packs = max(ep->chip->nrpacks, 1);
620 urb_packs = min(urb_packs, (unsigned int) MAX_PACKS);
621 } else {
622 urb_packs = 1;
623 }
624
625 urb_packs *= packs_per_ms;
626
627 if (sync_ep && !snd_usb_endpoint_implict_feedback_sink(ep))
628 urb_packs = min(urb_packs, 1U << sync_ep->syncinterval);
629
630 /* decide how many packets to be used */
631 if (is_playback && !snd_usb_endpoint_implict_feedback_sink(ep)) {
632 unsigned int minsize, maxpacks;
633 /* determine how small a packet can be */
634 minsize = (ep->freqn >> (16 - ep->datainterval))
635 * (frame_bits >> 3);
636 /* with sync from device, assume it can be 12% lower */
637 if (sync_ep)
638 minsize -= minsize >> 3;
639 minsize = max(minsize, 1u);
640 total_packs = (period_bytes + minsize - 1) / minsize;
641 /* we need at least two URBs for queueing */
642 if (total_packs < 2) {
643 total_packs = 2;
644 } else {
645 /* and we don't want too long a queue either */
646 maxpacks = max(MAX_QUEUE * packs_per_ms, urb_packs * 2);
647 total_packs = min(total_packs, maxpacks);
648 }
649 } else {
650 while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
651 urb_packs >>= 1;
652 total_packs = MAX_URBS * urb_packs;
653 }
654
655 ep->nurbs = (total_packs + urb_packs - 1) / urb_packs;
656 if (ep->nurbs > MAX_URBS) {
657 /* too much... */
658 ep->nurbs = MAX_URBS;
659 total_packs = MAX_URBS * urb_packs;
660 } else if (ep->nurbs < 2) {
661 /* too little - we need at least two packets
662 * to ensure contiguous playback/capture
663 */
664 ep->nurbs = 2;
665 }
666
667 /* allocate and initialize data urbs */
668 for (i = 0; i < ep->nurbs; i++) {
669 struct snd_urb_ctx *u = &ep->urb[i];
670 u->index = i;
671 u->ep = ep;
672 u->packets = (i + 1) * total_packs / ep->nurbs
673 - i * total_packs / ep->nurbs;
674 u->buffer_size = maxsize * u->packets;
675
676 if (fmt->fmt_type == UAC_FORMAT_TYPE_II)
677 u->packets++; /* for transfer delimiter */
678 u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
679 if (!u->urb)
680 goto out_of_memory;
681
682 u->urb->transfer_buffer =
683 usb_alloc_coherent(ep->chip->dev, u->buffer_size,
684 GFP_KERNEL, &u->urb->transfer_dma);
685 if (!u->urb->transfer_buffer)
686 goto out_of_memory;
687 u->urb->pipe = ep->pipe;
688 u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
689 u->urb->interval = 1 << ep->datainterval;
690 u->urb->context = u;
691 u->urb->complete = snd_complete_urb;
692 INIT_LIST_HEAD(&u->ready_list);
693 }
694
695 return 0;
696
697out_of_memory:
698 release_urbs(ep, 0);
699 return -ENOMEM;
700}
701
Daniel Mack94c27212012-04-12 13:51:15 +0200702/*
703 * configure a sync endpoint
704 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200705static int sync_ep_set_params(struct snd_usb_endpoint *ep,
706 struct snd_pcm_hw_params *hw_params,
707 struct audioformat *fmt)
708{
709 int i;
710
711 ep->syncbuf = usb_alloc_coherent(ep->chip->dev, SYNC_URBS * 4,
712 GFP_KERNEL, &ep->sync_dma);
713 if (!ep->syncbuf)
714 return -ENOMEM;
715
716 for (i = 0; i < SYNC_URBS; i++) {
717 struct snd_urb_ctx *u = &ep->urb[i];
718 u->index = i;
719 u->ep = ep;
720 u->packets = 1;
721 u->urb = usb_alloc_urb(1, GFP_KERNEL);
722 if (!u->urb)
723 goto out_of_memory;
724 u->urb->transfer_buffer = ep->syncbuf + i * 4;
725 u->urb->transfer_dma = ep->sync_dma + i * 4;
726 u->urb->transfer_buffer_length = 4;
727 u->urb->pipe = ep->pipe;
728 u->urb->transfer_flags = URB_ISO_ASAP |
729 URB_NO_TRANSFER_DMA_MAP;
730 u->urb->number_of_packets = 1;
731 u->urb->interval = 1 << ep->syncinterval;
732 u->urb->context = u;
733 u->urb->complete = snd_complete_urb;
734 }
735
736 ep->nurbs = SYNC_URBS;
737
738 return 0;
739
740out_of_memory:
741 release_urbs(ep, 0);
742 return -ENOMEM;
743}
744
Daniel Mack94c27212012-04-12 13:51:15 +0200745/**
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200746 * snd_usb_endpoint_set_params: configure an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200747 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200748 * @ep: the snd_usb_endpoint to configure
749 * @hw_params: the hardware parameters
750 * @fmt: the USB audio format information
751 * @sync_ep: the sync endpoint to use, if any
Daniel Mack94c27212012-04-12 13:51:15 +0200752 *
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200753 * Determine the number of URBs to be used on this endpoint.
Daniel Mack94c27212012-04-12 13:51:15 +0200754 * An endpoint must be configured before it can be started.
755 * An endpoint that is already running can not be reconfigured.
756 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200757int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
758 struct snd_pcm_hw_params *hw_params,
759 struct audioformat *fmt,
760 struct snd_usb_endpoint *sync_ep)
761{
762 int err;
763
764 if (ep->use_count != 0) {
765 snd_printk(KERN_WARNING "Unable to change format on ep #%x: already in use\n",
766 ep->ep_num);
767 return -EBUSY;
768 }
769
770 /* release old buffers, if any */
771 release_urbs(ep, 0);
772
773 ep->datainterval = fmt->datainterval;
774 ep->maxpacksize = fmt->maxpacksize;
Takashi Iwai85f71932012-04-13 12:41:54 +0200775 ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200776
777 if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL)
778 ep->freqn = get_usb_full_speed_rate(params_rate(hw_params));
779 else
780 ep->freqn = get_usb_high_speed_rate(params_rate(hw_params));
781
782 /* calculate the frequency in 16.16 format */
783 ep->freqm = ep->freqn;
784 ep->freqshift = INT_MIN;
785
786 ep->phase = 0;
787
788 switch (ep->type) {
789 case SND_USB_ENDPOINT_TYPE_DATA:
790 err = data_ep_set_params(ep, hw_params, fmt, sync_ep);
791 break;
792 case SND_USB_ENDPOINT_TYPE_SYNC:
793 err = sync_ep_set_params(ep, hw_params, fmt);
794 break;
795 default:
796 err = -EINVAL;
797 }
798
799 snd_printdd(KERN_DEBUG "Setting params for ep #%x (type %d, %d urbs), ret=%d\n",
800 ep->ep_num, ep->type, ep->nurbs, err);
801
802 return err;
803}
804
Daniel Mack94c27212012-04-12 13:51:15 +0200805/**
806 * snd_usb_endpoint_start: start an snd_usb_endpoint
807 *
808 * @ep: the endpoint to start
809 *
810 * A call to this function will increment the use count of the endpoint.
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200811 * In case it is not already running, the URBs for this endpoint will be
Daniel Mack94c27212012-04-12 13:51:15 +0200812 * submitted. Otherwise, this function does nothing.
813 *
814 * Must be balanced to calls of snd_usb_endpoint_stop().
815 *
816 * Returns an error if the URB submission failed, 0 in all other cases.
817 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200818int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
819{
820 int err;
821 unsigned int i;
822
823 if (ep->chip->shutdown)
824 return -EBADFD;
825
826 /* already running? */
827 if (++ep->use_count != 1)
828 return 0;
829
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200830 ep->active_mask = 0;
831 ep->unlink_mask = 0;
832 ep->phase = 0;
833
Daniel Mack2b58fd52012-09-04 10:23:07 +0200834 snd_usb_endpoint_start_quirk(ep);
835
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200836 /*
837 * If this endpoint has a data endpoint as implicit feedback source,
838 * don't start the urbs here. Instead, mark them all as available,
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200839 * wait for the record urbs to return and queue the playback urbs
840 * from that context.
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200841 */
842
843 set_bit(EP_FLAG_RUNNING, &ep->flags);
844
845 if (snd_usb_endpoint_implict_feedback_sink(ep)) {
846 for (i = 0; i < ep->nurbs; i++) {
847 struct snd_urb_ctx *ctx = ep->urb + i;
848 list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs);
849 }
850
851 return 0;
852 }
853
854 for (i = 0; i < ep->nurbs; i++) {
855 struct urb *urb = ep->urb[i].urb;
856
857 if (snd_BUG_ON(!urb))
858 goto __error;
859
860 if (usb_pipeout(ep->pipe)) {
861 prepare_outbound_urb_sizes(ep, urb->context);
862 prepare_outbound_urb(ep, urb->context);
863 } else {
864 prepare_inbound_urb(ep, urb->context);
865 }
866
867 err = usb_submit_urb(urb, GFP_ATOMIC);
868 if (err < 0) {
869 snd_printk(KERN_ERR "cannot submit urb %d, error %d: %s\n",
870 i, err, usb_error_string(err));
871 goto __error;
872 }
873 set_bit(i, &ep->active_mask);
874 }
875
876 return 0;
877
878__error:
879 clear_bit(EP_FLAG_RUNNING, &ep->flags);
880 ep->use_count--;
881 deactivate_urbs(ep, 0, 0);
882 return -EPIPE;
883}
884
Daniel Mack94c27212012-04-12 13:51:15 +0200885/**
886 * snd_usb_endpoint_stop: stop an snd_usb_endpoint
887 *
888 * @ep: the endpoint to stop (may be NULL)
889 *
890 * A call to this function will decrement the use count of the endpoint.
891 * In case the last user has requested the endpoint stop, the URBs will
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200892 * actually be deactivated.
Daniel Mack94c27212012-04-12 13:51:15 +0200893 *
894 * Must be balanced to calls of snd_usb_endpoint_start().
895 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200896void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
897 int force, int can_sleep, int wait)
898{
899 if (!ep)
900 return;
901
902 if (snd_BUG_ON(ep->use_count == 0))
903 return;
904
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200905 if (--ep->use_count == 0) {
906 deactivate_urbs(ep, force, can_sleep);
907 ep->data_subs = NULL;
908 ep->sync_slave = NULL;
909 ep->retire_data_urb = NULL;
910 ep->prepare_data_urb = NULL;
911
912 if (wait)
913 wait_clear_urbs(ep);
914 }
915}
916
Daniel Mack94c27212012-04-12 13:51:15 +0200917/**
Daniel Mack94c27212012-04-12 13:51:15 +0200918 * snd_usb_endpoint_deactivate: deactivate an snd_usb_endpoint
919 *
920 * @ep: the endpoint to deactivate
921 *
922 * If the endpoint is not currently in use, this functions will select the
923 * alternate interface setting 0 for the interface of this endpoint.
924 *
925 * In case of any active users, this functions does nothing.
926 *
927 * Returns an error if usb_set_interface() failed, 0 in all other
928 * cases.
929 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200930int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
931{
932 if (!ep)
933 return -EINVAL;
934
Daniel Mack68e67f42012-07-12 13:08:40 +0200935 deactivate_urbs(ep, 1, 1);
936 wait_clear_urbs(ep);
937
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200938 if (ep->use_count != 0)
939 return 0;
940
Daniel Mack68e67f42012-07-12 13:08:40 +0200941 clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200942
Daniel Mack68e67f42012-07-12 13:08:40 +0200943 return 0;
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200944}
945
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200946/**
947 * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
Daniel Mack94c27212012-04-12 13:51:15 +0200948 *
949 * @ep: the list header of the endpoint to free
950 *
951 * This function does not care for the endpoint's use count but will tear
952 * down all the streaming URBs immediately and free all resources.
953 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200954void snd_usb_endpoint_free(struct list_head *head)
955{
956 struct snd_usb_endpoint *ep;
957
958 ep = list_entry(head, struct snd_usb_endpoint, list);
959 release_urbs(ep, 1);
960 kfree(ep);
961}
962
Daniel Mack94c27212012-04-12 13:51:15 +0200963/**
964 * snd_usb_handle_sync_urb: parse an USB sync packet
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200965 *
Daniel Mack94c27212012-04-12 13:51:15 +0200966 * @ep: the endpoint to handle the packet
967 * @sender: the sending endpoint
968 * @urb: the received packet
969 *
970 * This function is called from the context of an endpoint that received
971 * the packet and is used to let another endpoint object handle the payload.
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200972 */
973void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
974 struct snd_usb_endpoint *sender,
975 const struct urb *urb)
976{
977 int shift;
978 unsigned int f;
979 unsigned long flags;
980
981 snd_BUG_ON(ep == sender);
982
Daniel Mack94c27212012-04-12 13:51:15 +0200983 /*
984 * In case the endpoint is operating in implicit feedback mode, prepare
Daniel Mack07a5e9d2012-04-24 19:31:24 +0200985 * a new outbound URB that has the same layout as the received packet
986 * and add it to the list of pending urbs. queue_pending_output_urbs()
987 * will take care of them later.
Daniel Mack94c27212012-04-12 13:51:15 +0200988 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +0200989 if (snd_usb_endpoint_implict_feedback_sink(ep) &&
990 ep->use_count != 0) {
991
992 /* implicit feedback case */
993 int i, bytes = 0;
994 struct snd_urb_ctx *in_ctx;
995 struct snd_usb_packet_info *out_packet;
996
997 in_ctx = urb->context;
998
999 /* Count overall packet size */
1000 for (i = 0; i < in_ctx->packets; i++)
1001 if (urb->iso_frame_desc[i].status == 0)
1002 bytes += urb->iso_frame_desc[i].actual_length;
1003
1004 /*
1005 * skip empty packets. At least M-Audio's Fast Track Ultra stops
1006 * streaming once it received a 0-byte OUT URB
1007 */
1008 if (bytes == 0)
1009 return;
1010
1011 spin_lock_irqsave(&ep->lock, flags);
1012 out_packet = ep->next_packet + ep->next_packet_write_pos;
1013
1014 /*
1015 * Iterate through the inbound packet and prepare the lengths
1016 * for the output packet. The OUT packet we are about to send
Daniel Mack07a5e9d2012-04-24 19:31:24 +02001017 * will have the same amount of payload bytes than the IN
1018 * packet we just received.
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001019 */
1020
1021 out_packet->packets = in_ctx->packets;
1022 for (i = 0; i < in_ctx->packets; i++) {
1023 if (urb->iso_frame_desc[i].status == 0)
1024 out_packet->packet_size[i] =
1025 urb->iso_frame_desc[i].actual_length / ep->stride;
1026 else
1027 out_packet->packet_size[i] = 0;
1028 }
1029
1030 ep->next_packet_write_pos++;
1031 ep->next_packet_write_pos %= MAX_URBS;
1032 spin_unlock_irqrestore(&ep->lock, flags);
1033 queue_pending_output_urbs(ep);
1034
1035 return;
1036 }
1037
Daniel Mack94c27212012-04-12 13:51:15 +02001038 /*
1039 * process after playback sync complete
1040 *
1041 * Full speed devices report feedback values in 10.14 format as samples
1042 * per frame, high speed devices in 16.16 format as samples per
1043 * microframe.
1044 *
1045 * Because the Audio Class 1 spec was written before USB 2.0, many high
1046 * speed devices use a wrong interpretation, some others use an
1047 * entirely different format.
1048 *
1049 * Therefore, we cannot predict what format any particular device uses
1050 * and must detect it automatically.
1051 */
Daniel Mack8fdff6a2012-04-12 13:51:11 +02001052
1053 if (urb->iso_frame_desc[0].status != 0 ||
1054 urb->iso_frame_desc[0].actual_length < 3)
1055 return;
1056
1057 f = le32_to_cpup(urb->transfer_buffer);
1058 if (urb->iso_frame_desc[0].actual_length == 3)
1059 f &= 0x00ffffff;
1060 else
1061 f &= 0x0fffffff;
1062
1063 if (f == 0)
1064 return;
1065
1066 if (unlikely(ep->freqshift == INT_MIN)) {
1067 /*
1068 * The first time we see a feedback value, determine its format
1069 * by shifting it left or right until it matches the nominal
1070 * frequency value. This assumes that the feedback does not
1071 * differ from the nominal value more than +50% or -25%.
1072 */
1073 shift = 0;
1074 while (f < ep->freqn - ep->freqn / 4) {
1075 f <<= 1;
1076 shift++;
1077 }
1078 while (f > ep->freqn + ep->freqn / 2) {
1079 f >>= 1;
1080 shift--;
1081 }
1082 ep->freqshift = shift;
1083 } else if (ep->freqshift >= 0)
1084 f <<= ep->freqshift;
1085 else
1086 f >>= -ep->freqshift;
1087
1088 if (likely(f >= ep->freqn - ep->freqn / 8 && f <= ep->freqmax)) {
1089 /*
1090 * If the frequency looks valid, set it.
1091 * This value is referred to in prepare_playback_urb().
1092 */
1093 spin_lock_irqsave(&ep->lock, flags);
1094 ep->freqm = f;
1095 spin_unlock_irqrestore(&ep->lock, flags);
1096 } else {
1097 /*
1098 * Out of range; maybe the shift value is wrong.
1099 * Reset it so that we autodetect again the next time.
1100 */
1101 ep->freqshift = INT_MIN;
1102 }
1103}
1104