Oleksandr Andrushchenko | 788ef64 | 2018-05-14 09:27:39 +0300 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
| 2 | |
| 3 | /* |
| 4 | * Xen para-virtual sound device |
| 5 | * |
| 6 | * Copyright (C) 2016-2018 EPAM Systems Inc. |
| 7 | * |
| 8 | * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> |
| 9 | */ |
| 10 | |
| 11 | #include <xen/events.h> |
| 12 | #include <xen/grant_table.h> |
| 13 | #include <xen/xen.h> |
| 14 | #include <xen/xenbus.h> |
| 15 | |
| 16 | #include "xen_snd_front.h" |
Oleksandr Andrushchenko | 1cee559 | 2018-05-14 09:27:41 +0300 | [diff] [blame] | 17 | #include "xen_snd_front_alsa.h" |
Oleksandr Andrushchenko | 788ef64 | 2018-05-14 09:27:39 +0300 | [diff] [blame] | 18 | #include "xen_snd_front_cfg.h" |
| 19 | #include "xen_snd_front_evtchnl.h" |
| 20 | |
| 21 | static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id) |
| 22 | { |
| 23 | struct xen_snd_front_evtchnl *channel = dev_id; |
| 24 | struct xen_snd_front_info *front_info = channel->front_info; |
| 25 | struct xensnd_resp *resp; |
| 26 | RING_IDX i, rp; |
| 27 | |
| 28 | if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED)) |
| 29 | return IRQ_HANDLED; |
| 30 | |
| 31 | mutex_lock(&channel->ring_io_lock); |
| 32 | |
| 33 | again: |
| 34 | rp = channel->u.req.ring.sring->rsp_prod; |
| 35 | /* Ensure we see queued responses up to rp. */ |
| 36 | rmb(); |
| 37 | |
| 38 | /* |
| 39 | * Assume that the backend is trusted to always write sane values |
| 40 | * to the ring counters, so no overflow checks on frontend side |
| 41 | * are required. |
| 42 | */ |
| 43 | for (i = channel->u.req.ring.rsp_cons; i != rp; i++) { |
| 44 | resp = RING_GET_RESPONSE(&channel->u.req.ring, i); |
| 45 | if (resp->id != channel->evt_id) |
| 46 | continue; |
| 47 | switch (resp->operation) { |
| 48 | case XENSND_OP_OPEN: |
Oleksandr Andrushchenko | 788ef64 | 2018-05-14 09:27:39 +0300 | [diff] [blame] | 49 | case XENSND_OP_CLOSE: |
Oleksandr Andrushchenko | 788ef64 | 2018-05-14 09:27:39 +0300 | [diff] [blame] | 50 | case XENSND_OP_READ: |
Oleksandr Andrushchenko | 788ef64 | 2018-05-14 09:27:39 +0300 | [diff] [blame] | 51 | case XENSND_OP_WRITE: |
Oleksandr Andrushchenko | 788ef64 | 2018-05-14 09:27:39 +0300 | [diff] [blame] | 52 | case XENSND_OP_TRIGGER: |
| 53 | channel->u.req.resp_status = resp->status; |
| 54 | complete(&channel->u.req.completion); |
| 55 | break; |
| 56 | case XENSND_OP_HW_PARAM_QUERY: |
| 57 | channel->u.req.resp_status = resp->status; |
| 58 | channel->u.req.resp.hw_param = |
| 59 | resp->resp.hw_param; |
| 60 | complete(&channel->u.req.completion); |
| 61 | break; |
| 62 | |
| 63 | default: |
| 64 | dev_err(&front_info->xb_dev->dev, |
| 65 | "Operation %d is not supported\n", |
| 66 | resp->operation); |
| 67 | break; |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | channel->u.req.ring.rsp_cons = i; |
| 72 | if (i != channel->u.req.ring.req_prod_pvt) { |
| 73 | int more_to_do; |
| 74 | |
| 75 | RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring, |
| 76 | more_to_do); |
| 77 | if (more_to_do) |
| 78 | goto again; |
| 79 | } else { |
| 80 | channel->u.req.ring.sring->rsp_event = i + 1; |
| 81 | } |
| 82 | |
| 83 | mutex_unlock(&channel->ring_io_lock); |
| 84 | return IRQ_HANDLED; |
| 85 | } |
| 86 | |
| 87 | static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id) |
| 88 | { |
| 89 | struct xen_snd_front_evtchnl *channel = dev_id; |
| 90 | struct xensnd_event_page *page = channel->u.evt.page; |
| 91 | u32 cons, prod; |
| 92 | |
| 93 | if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED)) |
| 94 | return IRQ_HANDLED; |
| 95 | |
| 96 | mutex_lock(&channel->ring_io_lock); |
| 97 | |
| 98 | prod = page->in_prod; |
| 99 | /* Ensure we see ring contents up to prod. */ |
| 100 | virt_rmb(); |
| 101 | if (prod == page->in_cons) |
| 102 | goto out; |
| 103 | |
| 104 | /* |
| 105 | * Assume that the backend is trusted to always write sane values |
| 106 | * to the ring counters, so no overflow checks on frontend side |
| 107 | * are required. |
| 108 | */ |
| 109 | for (cons = page->in_cons; cons != prod; cons++) { |
| 110 | struct xensnd_evt *event; |
| 111 | |
| 112 | event = &XENSND_IN_RING_REF(page, cons); |
| 113 | if (unlikely(event->id != channel->evt_id++)) |
| 114 | continue; |
| 115 | |
| 116 | switch (event->type) { |
| 117 | case XENSND_EVT_CUR_POS: |
Oleksandr Andrushchenko | 1cee559 | 2018-05-14 09:27:41 +0300 | [diff] [blame] | 118 | xen_snd_front_alsa_handle_cur_pos(channel, |
| 119 | event->op.cur_pos.position); |
Oleksandr Andrushchenko | 788ef64 | 2018-05-14 09:27:39 +0300 | [diff] [blame] | 120 | break; |
| 121 | } |
| 122 | } |
| 123 | |
| 124 | page->in_cons = cons; |
| 125 | /* Ensure ring contents. */ |
| 126 | virt_wmb(); |
| 127 | |
| 128 | out: |
| 129 | mutex_unlock(&channel->ring_io_lock); |
| 130 | return IRQ_HANDLED; |
| 131 | } |
| 132 | |
| 133 | void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel) |
| 134 | { |
| 135 | int notify; |
| 136 | |
| 137 | channel->u.req.ring.req_prod_pvt++; |
| 138 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify); |
| 139 | if (notify) |
| 140 | notify_remote_via_irq(channel->irq); |
| 141 | } |
| 142 | |
| 143 | static void evtchnl_free(struct xen_snd_front_info *front_info, |
| 144 | struct xen_snd_front_evtchnl *channel) |
| 145 | { |
| 146 | unsigned long page = 0; |
| 147 | |
| 148 | if (channel->type == EVTCHNL_TYPE_REQ) |
| 149 | page = (unsigned long)channel->u.req.ring.sring; |
| 150 | else if (channel->type == EVTCHNL_TYPE_EVT) |
| 151 | page = (unsigned long)channel->u.evt.page; |
| 152 | |
| 153 | if (!page) |
| 154 | return; |
| 155 | |
| 156 | channel->state = EVTCHNL_STATE_DISCONNECTED; |
| 157 | if (channel->type == EVTCHNL_TYPE_REQ) { |
| 158 | /* Release all who still waits for response if any. */ |
| 159 | channel->u.req.resp_status = -EIO; |
| 160 | complete_all(&channel->u.req.completion); |
| 161 | } |
| 162 | |
| 163 | if (channel->irq) |
| 164 | unbind_from_irqhandler(channel->irq, channel); |
| 165 | |
| 166 | if (channel->port) |
| 167 | xenbus_free_evtchn(front_info->xb_dev, channel->port); |
| 168 | |
| 169 | /* End access and free the page. */ |
| 170 | if (channel->gref != GRANT_INVALID_REF) |
| 171 | gnttab_end_foreign_access(channel->gref, 0, page); |
| 172 | else |
| 173 | free_page(page); |
| 174 | |
| 175 | memset(channel, 0, sizeof(*channel)); |
| 176 | } |
| 177 | |
| 178 | void xen_snd_front_evtchnl_free_all(struct xen_snd_front_info *front_info) |
| 179 | { |
| 180 | int i; |
| 181 | |
| 182 | if (!front_info->evt_pairs) |
| 183 | return; |
| 184 | |
| 185 | for (i = 0; i < front_info->num_evt_pairs; i++) { |
| 186 | evtchnl_free(front_info, &front_info->evt_pairs[i].req); |
| 187 | evtchnl_free(front_info, &front_info->evt_pairs[i].evt); |
| 188 | } |
| 189 | |
| 190 | kfree(front_info->evt_pairs); |
| 191 | front_info->evt_pairs = NULL; |
| 192 | } |
| 193 | |
| 194 | static int evtchnl_alloc(struct xen_snd_front_info *front_info, int index, |
| 195 | struct xen_snd_front_evtchnl *channel, |
| 196 | enum xen_snd_front_evtchnl_type type) |
| 197 | { |
| 198 | struct xenbus_device *xb_dev = front_info->xb_dev; |
| 199 | unsigned long page; |
| 200 | grant_ref_t gref; |
| 201 | irq_handler_t handler; |
| 202 | char *handler_name = NULL; |
| 203 | int ret; |
| 204 | |
| 205 | memset(channel, 0, sizeof(*channel)); |
| 206 | channel->type = type; |
| 207 | channel->index = index; |
| 208 | channel->front_info = front_info; |
| 209 | channel->state = EVTCHNL_STATE_DISCONNECTED; |
| 210 | channel->gref = GRANT_INVALID_REF; |
| 211 | page = get_zeroed_page(GFP_KERNEL); |
| 212 | if (!page) { |
| 213 | ret = -ENOMEM; |
| 214 | goto fail; |
| 215 | } |
| 216 | |
| 217 | handler_name = kasprintf(GFP_KERNEL, "%s-%s", XENSND_DRIVER_NAME, |
| 218 | type == EVTCHNL_TYPE_REQ ? |
| 219 | XENSND_FIELD_RING_REF : |
| 220 | XENSND_FIELD_EVT_RING_REF); |
| 221 | if (!handler_name) { |
| 222 | ret = -ENOMEM; |
| 223 | goto fail; |
| 224 | } |
| 225 | |
| 226 | mutex_init(&channel->ring_io_lock); |
| 227 | |
| 228 | if (type == EVTCHNL_TYPE_REQ) { |
| 229 | struct xen_sndif_sring *sring = (struct xen_sndif_sring *)page; |
| 230 | |
| 231 | init_completion(&channel->u.req.completion); |
| 232 | mutex_init(&channel->u.req.req_io_lock); |
| 233 | SHARED_RING_INIT(sring); |
| 234 | FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE); |
| 235 | |
| 236 | ret = xenbus_grant_ring(xb_dev, sring, 1, &gref); |
| 237 | if (ret < 0) { |
| 238 | channel->u.req.ring.sring = NULL; |
| 239 | goto fail; |
| 240 | } |
| 241 | |
| 242 | handler = evtchnl_interrupt_req; |
| 243 | } else { |
| 244 | ret = gnttab_grant_foreign_access(xb_dev->otherend_id, |
| 245 | virt_to_gfn((void *)page), 0); |
| 246 | if (ret < 0) |
| 247 | goto fail; |
| 248 | |
| 249 | channel->u.evt.page = (struct xensnd_event_page *)page; |
| 250 | gref = ret; |
| 251 | handler = evtchnl_interrupt_evt; |
| 252 | } |
| 253 | |
| 254 | channel->gref = gref; |
| 255 | |
| 256 | ret = xenbus_alloc_evtchn(xb_dev, &channel->port); |
| 257 | if (ret < 0) |
| 258 | goto fail; |
| 259 | |
| 260 | ret = bind_evtchn_to_irq(channel->port); |
| 261 | if (ret < 0) { |
| 262 | dev_err(&xb_dev->dev, |
| 263 | "Failed to bind IRQ for domid %d port %d: %d\n", |
| 264 | front_info->xb_dev->otherend_id, channel->port, ret); |
| 265 | goto fail; |
| 266 | } |
| 267 | |
| 268 | channel->irq = ret; |
| 269 | |
| 270 | ret = request_threaded_irq(channel->irq, NULL, handler, |
| 271 | IRQF_ONESHOT, handler_name, channel); |
| 272 | if (ret < 0) { |
| 273 | dev_err(&xb_dev->dev, "Failed to request IRQ %d: %d\n", |
| 274 | channel->irq, ret); |
| 275 | goto fail; |
| 276 | } |
| 277 | |
| 278 | kfree(handler_name); |
| 279 | return 0; |
| 280 | |
| 281 | fail: |
| 282 | if (page) |
| 283 | free_page(page); |
| 284 | kfree(handler_name); |
| 285 | dev_err(&xb_dev->dev, "Failed to allocate ring: %d\n", ret); |
| 286 | return ret; |
| 287 | } |
| 288 | |
| 289 | int xen_snd_front_evtchnl_create_all(struct xen_snd_front_info *front_info, |
| 290 | int num_streams) |
| 291 | { |
| 292 | struct xen_front_cfg_card *cfg = &front_info->cfg; |
| 293 | struct device *dev = &front_info->xb_dev->dev; |
| 294 | int d, ret = 0; |
| 295 | |
| 296 | front_info->evt_pairs = |
| 297 | kcalloc(num_streams, |
| 298 | sizeof(struct xen_snd_front_evtchnl_pair), |
| 299 | GFP_KERNEL); |
| 300 | if (!front_info->evt_pairs) |
| 301 | return -ENOMEM; |
| 302 | |
| 303 | /* Iterate over devices and their streams and create event channels. */ |
| 304 | for (d = 0; d < cfg->num_pcm_instances; d++) { |
| 305 | struct xen_front_cfg_pcm_instance *pcm_instance; |
| 306 | int s, index; |
| 307 | |
| 308 | pcm_instance = &cfg->pcm_instances[d]; |
| 309 | |
| 310 | for (s = 0; s < pcm_instance->num_streams_pb; s++) { |
| 311 | index = pcm_instance->streams_pb[s].index; |
| 312 | |
| 313 | ret = evtchnl_alloc(front_info, index, |
| 314 | &front_info->evt_pairs[index].req, |
| 315 | EVTCHNL_TYPE_REQ); |
| 316 | if (ret < 0) { |
| 317 | dev_err(dev, "Error allocating control channel\n"); |
| 318 | goto fail; |
| 319 | } |
| 320 | |
| 321 | ret = evtchnl_alloc(front_info, index, |
| 322 | &front_info->evt_pairs[index].evt, |
| 323 | EVTCHNL_TYPE_EVT); |
| 324 | if (ret < 0) { |
| 325 | dev_err(dev, "Error allocating in-event channel\n"); |
| 326 | goto fail; |
| 327 | } |
| 328 | } |
| 329 | |
| 330 | for (s = 0; s < pcm_instance->num_streams_cap; s++) { |
| 331 | index = pcm_instance->streams_cap[s].index; |
| 332 | |
| 333 | ret = evtchnl_alloc(front_info, index, |
| 334 | &front_info->evt_pairs[index].req, |
| 335 | EVTCHNL_TYPE_REQ); |
| 336 | if (ret < 0) { |
| 337 | dev_err(dev, "Error allocating control channel\n"); |
| 338 | goto fail; |
| 339 | } |
| 340 | |
| 341 | ret = evtchnl_alloc(front_info, index, |
| 342 | &front_info->evt_pairs[index].evt, |
| 343 | EVTCHNL_TYPE_EVT); |
| 344 | if (ret < 0) { |
| 345 | dev_err(dev, "Error allocating in-event channel\n"); |
| 346 | goto fail; |
| 347 | } |
| 348 | } |
| 349 | } |
Oleksandr Andrushchenko | 788ef64 | 2018-05-14 09:27:39 +0300 | [diff] [blame] | 350 | |
| 351 | front_info->num_evt_pairs = num_streams; |
| 352 | return 0; |
| 353 | |
| 354 | fail: |
| 355 | xen_snd_front_evtchnl_free_all(front_info); |
| 356 | return ret; |
| 357 | } |
| 358 | |
| 359 | static int evtchnl_publish(struct xenbus_transaction xbt, |
| 360 | struct xen_snd_front_evtchnl *channel, |
| 361 | const char *path, const char *node_ring, |
| 362 | const char *node_chnl) |
| 363 | { |
| 364 | struct xenbus_device *xb_dev = channel->front_info->xb_dev; |
| 365 | int ret; |
| 366 | |
| 367 | /* Write control channel ring reference. */ |
| 368 | ret = xenbus_printf(xbt, path, node_ring, "%u", channel->gref); |
| 369 | if (ret < 0) { |
| 370 | dev_err(&xb_dev->dev, "Error writing ring-ref: %d\n", ret); |
| 371 | return ret; |
| 372 | } |
| 373 | |
| 374 | /* Write event channel ring reference. */ |
| 375 | ret = xenbus_printf(xbt, path, node_chnl, "%u", channel->port); |
| 376 | if (ret < 0) { |
| 377 | dev_err(&xb_dev->dev, "Error writing event channel: %d\n", ret); |
| 378 | return ret; |
| 379 | } |
| 380 | |
| 381 | return 0; |
| 382 | } |
| 383 | |
| 384 | int xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info *front_info) |
| 385 | { |
| 386 | struct xen_front_cfg_card *cfg = &front_info->cfg; |
| 387 | struct xenbus_transaction xbt; |
| 388 | int ret, d; |
| 389 | |
| 390 | again: |
| 391 | ret = xenbus_transaction_start(&xbt); |
| 392 | if (ret < 0) { |
| 393 | xenbus_dev_fatal(front_info->xb_dev, ret, |
| 394 | "starting transaction"); |
| 395 | return ret; |
| 396 | } |
| 397 | |
| 398 | for (d = 0; d < cfg->num_pcm_instances; d++) { |
| 399 | struct xen_front_cfg_pcm_instance *pcm_instance; |
| 400 | int s, index; |
| 401 | |
| 402 | pcm_instance = &cfg->pcm_instances[d]; |
| 403 | |
| 404 | for (s = 0; s < pcm_instance->num_streams_pb; s++) { |
| 405 | index = pcm_instance->streams_pb[s].index; |
| 406 | |
| 407 | ret = evtchnl_publish(xbt, |
| 408 | &front_info->evt_pairs[index].req, |
| 409 | pcm_instance->streams_pb[s].xenstore_path, |
| 410 | XENSND_FIELD_RING_REF, |
| 411 | XENSND_FIELD_EVT_CHNL); |
| 412 | if (ret < 0) |
| 413 | goto fail; |
| 414 | |
| 415 | ret = evtchnl_publish(xbt, |
| 416 | &front_info->evt_pairs[index].evt, |
| 417 | pcm_instance->streams_pb[s].xenstore_path, |
| 418 | XENSND_FIELD_EVT_RING_REF, |
| 419 | XENSND_FIELD_EVT_EVT_CHNL); |
| 420 | if (ret < 0) |
| 421 | goto fail; |
| 422 | } |
| 423 | |
| 424 | for (s = 0; s < pcm_instance->num_streams_cap; s++) { |
| 425 | index = pcm_instance->streams_cap[s].index; |
| 426 | |
| 427 | ret = evtchnl_publish(xbt, |
| 428 | &front_info->evt_pairs[index].req, |
| 429 | pcm_instance->streams_cap[s].xenstore_path, |
| 430 | XENSND_FIELD_RING_REF, |
| 431 | XENSND_FIELD_EVT_CHNL); |
| 432 | if (ret < 0) |
| 433 | goto fail; |
| 434 | |
| 435 | ret = evtchnl_publish(xbt, |
| 436 | &front_info->evt_pairs[index].evt, |
| 437 | pcm_instance->streams_cap[s].xenstore_path, |
| 438 | XENSND_FIELD_EVT_RING_REF, |
| 439 | XENSND_FIELD_EVT_EVT_CHNL); |
| 440 | if (ret < 0) |
| 441 | goto fail; |
| 442 | } |
| 443 | } |
| 444 | ret = xenbus_transaction_end(xbt, 0); |
| 445 | if (ret < 0) { |
| 446 | if (ret == -EAGAIN) |
| 447 | goto again; |
| 448 | |
| 449 | xenbus_dev_fatal(front_info->xb_dev, ret, |
| 450 | "completing transaction"); |
| 451 | goto fail_to_end; |
| 452 | } |
| 453 | return 0; |
| 454 | fail: |
| 455 | xenbus_transaction_end(xbt, 1); |
| 456 | fail_to_end: |
| 457 | xenbus_dev_fatal(front_info->xb_dev, ret, "writing XenStore"); |
| 458 | return ret; |
| 459 | } |
| 460 | |
| 461 | void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair *evt_pair, |
| 462 | bool is_connected) |
| 463 | { |
| 464 | enum xen_snd_front_evtchnl_state state; |
| 465 | |
| 466 | if (is_connected) |
| 467 | state = EVTCHNL_STATE_CONNECTED; |
| 468 | else |
| 469 | state = EVTCHNL_STATE_DISCONNECTED; |
| 470 | |
| 471 | mutex_lock(&evt_pair->req.ring_io_lock); |
| 472 | evt_pair->req.state = state; |
| 473 | mutex_unlock(&evt_pair->req.ring_io_lock); |
| 474 | |
| 475 | mutex_lock(&evt_pair->evt.ring_io_lock); |
| 476 | evt_pair->evt.state = state; |
| 477 | mutex_unlock(&evt_pair->evt.ring_io_lock); |
| 478 | } |
| 479 | |
| 480 | void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair) |
| 481 | { |
| 482 | mutex_lock(&evt_pair->req.ring_io_lock); |
| 483 | evt_pair->req.evt_next_id = 0; |
| 484 | mutex_unlock(&evt_pair->req.ring_io_lock); |
| 485 | |
| 486 | mutex_lock(&evt_pair->evt.ring_io_lock); |
| 487 | evt_pair->evt.evt_next_id = 0; |
| 488 | mutex_unlock(&evt_pair->evt.ring_io_lock); |
| 489 | } |
| 490 | |