1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright 2017-2021 NXP 3 4 #include <linux/dma-mapping.h> 5 #include <linux/slab.h> 6 #include <linux/module.h> 7 #include <linux/delay.h> 8 #include <linux/rpmsg.h> 9 #include <sound/core.h> 10 #include <sound/pcm.h> 11 #include <sound/pcm_params.h> 12 #include <sound/dmaengine_pcm.h> 13 #include <sound/soc.h> 14 15 #include "imx-pcm.h" 16 #include "fsl_rpmsg.h" 17 #include "imx-pcm-rpmsg.h" 18 19 static const struct snd_pcm_hardware imx_rpmsg_pcm_hardware = { 20 .info = SNDRV_PCM_INFO_INTERLEAVED | 21 SNDRV_PCM_INFO_BLOCK_TRANSFER | 22 SNDRV_PCM_INFO_BATCH | 23 SNDRV_PCM_INFO_MMAP | 24 SNDRV_PCM_INFO_MMAP_VALID | 25 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP | 26 SNDRV_PCM_INFO_PAUSE | 27 SNDRV_PCM_INFO_RESUME, 28 .buffer_bytes_max = IMX_DEFAULT_DMABUF_SIZE, 29 .period_bytes_min = 512, 30 .period_bytes_max = 65536, 31 .periods_min = 2, 32 .periods_max = 6000, 33 .fifo_size = 0, 34 }; 35 36 static int imx_rpmsg_pcm_send_message(struct rpmsg_msg *msg, 37 struct rpmsg_info *info) 38 { 39 struct rpmsg_device *rpdev = info->rpdev; 40 int ret = 0; 41 42 mutex_lock(&info->msg_lock); 43 if (!rpdev) { 44 dev_err(info->dev, "rpmsg channel not ready\n"); 45 mutex_unlock(&info->msg_lock); 46 return -EINVAL; 47 } 48 49 dev_dbg(&rpdev->dev, "send cmd %d\n", msg->s_msg.header.cmd); 50 51 if (!(msg->s_msg.header.type == MSG_TYPE_C)) 52 reinit_completion(&info->cmd_complete); 53 54 ret = rpmsg_send(rpdev->ept, (void *)&msg->s_msg, 55 sizeof(struct rpmsg_s_msg)); 56 if (ret) { 57 dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret); 58 mutex_unlock(&info->msg_lock); 59 return ret; 60 } 61 62 /* No receive msg for TYPE_C command */ 63 if (msg->s_msg.header.type == MSG_TYPE_C) { 64 mutex_unlock(&info->msg_lock); 65 return 0; 66 } 67 68 /* wait response from rpmsg */ 69 ret = wait_for_completion_timeout(&info->cmd_complete, 70 msecs_to_jiffies(RPMSG_TIMEOUT)); 71 if (!ret) { 72 dev_err(&rpdev->dev, "rpmsg_send cmd %d timeout!\n", 73 msg->s_msg.header.cmd); 74 mutex_unlock(&info->msg_lock); 75 return -ETIMEDOUT; 76 } 77 78 memcpy(&msg->r_msg, &info->r_msg, sizeof(struct rpmsg_r_msg)); 79 memcpy(&info->msg[msg->r_msg.header.cmd].r_msg, 80 &msg->r_msg, sizeof(struct rpmsg_r_msg)); 81 82 /* 83 * Reset the buffer pointer to be zero, actully we have 84 * set the buffer pointer to be zero in imx_rpmsg_terminate_all 85 * But if there is timer task queued in queue, after it is 86 * executed the buffer pointer will be changed, so need to 87 * reset it again with TERMINATE command. 88 */ 89 switch (msg->s_msg.header.cmd) { 90 case TX_TERMINATE: 91 info->msg[TX_POINTER].r_msg.param.buffer_offset = 0; 92 break; 93 case RX_TERMINATE: 94 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0; 95 break; 96 default: 97 break; 98 } 99 100 dev_dbg(&rpdev->dev, "cmd:%d, resp %d\n", msg->s_msg.header.cmd, 101 info->r_msg.param.resp); 102 103 mutex_unlock(&info->msg_lock); 104 105 return 0; 106 } 107 108 static int imx_rpmsg_insert_workqueue(struct snd_pcm_substream *substream, 109 struct rpmsg_msg *msg, 110 struct rpmsg_info *info) 111 { 112 unsigned long flags; 113 int ret = 0; 114 115 /* 116 * Queue the work to workqueue. 117 * If the queue is full, drop the message. 118 */ 119 spin_lock_irqsave(&info->wq_lock, flags); 120 if (info->work_write_index != info->work_read_index) { 121 int index = info->work_write_index; 122 123 memcpy(&info->work_list[index].msg, msg, 124 sizeof(struct rpmsg_s_msg)); 125 126 queue_work(info->rpmsg_wq, &info->work_list[index].work); 127 info->work_write_index++; 128 info->work_write_index %= WORK_MAX_NUM; 129 } else { 130 info->msg_drop_count[substream->stream]++; 131 ret = -EPIPE; 132 } 133 spin_unlock_irqrestore(&info->wq_lock, flags); 134 135 return ret; 136 } 137 138 static int imx_rpmsg_pcm_hw_params(struct snd_soc_component *component, 139 struct snd_pcm_substream *substream, 140 struct snd_pcm_hw_params *params) 141 { 142 struct rpmsg_info *info = dev_get_drvdata(component->dev); 143 struct rpmsg_msg *msg; 144 145 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 146 msg = &info->msg[TX_HW_PARAM]; 147 msg->s_msg.header.cmd = TX_HW_PARAM; 148 } else { 149 msg = &info->msg[RX_HW_PARAM]; 150 msg->s_msg.header.cmd = RX_HW_PARAM; 151 } 152 153 msg->s_msg.param.rate = params_rate(params); 154 155 switch (params_format(params)) { 156 case SNDRV_PCM_FORMAT_S16_LE: 157 msg->s_msg.param.format = RPMSG_S16_LE; 158 break; 159 case SNDRV_PCM_FORMAT_S24_LE: 160 msg->s_msg.param.format = RPMSG_S24_LE; 161 break; 162 case SNDRV_PCM_FORMAT_DSD_U16_LE: 163 msg->s_msg.param.format = RPMSG_DSD_U16_LE; 164 break; 165 case SNDRV_PCM_FORMAT_DSD_U32_LE: 166 msg->s_msg.param.format = RPMSG_DSD_U32_LE; 167 break; 168 default: 169 msg->s_msg.param.format = RPMSG_S32_LE; 170 break; 171 } 172 173 switch (params_channels(params)) { 174 case 1: 175 msg->s_msg.param.channels = RPMSG_CH_LEFT; 176 break; 177 case 2: 178 msg->s_msg.param.channels = RPMSG_CH_STEREO; 179 break; 180 default: 181 msg->s_msg.param.channels = params_channels(params); 182 break; 183 } 184 185 info->send_message(msg, info); 186 187 return 0; 188 } 189 190 static snd_pcm_uframes_t imx_rpmsg_pcm_pointer(struct snd_soc_component *component, 191 struct snd_pcm_substream *substream) 192 { 193 struct rpmsg_info *info = dev_get_drvdata(component->dev); 194 struct rpmsg_msg *msg; 195 unsigned int pos = 0; 196 int buffer_tail = 0; 197 198 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 199 msg = &info->msg[TX_PERIOD_DONE + MSG_TYPE_A_NUM]; 200 else 201 msg = &info->msg[RX_PERIOD_DONE + MSG_TYPE_A_NUM]; 202 203 buffer_tail = msg->r_msg.param.buffer_tail; 204 pos = buffer_tail * snd_pcm_lib_period_bytes(substream); 205 206 return bytes_to_frames(substream->runtime, pos); 207 } 208 209 static void imx_rpmsg_timer_callback(struct timer_list *t) 210 { 211 struct stream_timer *stream_timer = 212 from_timer(stream_timer, t, timer); 213 struct snd_pcm_substream *substream = stream_timer->substream; 214 struct rpmsg_info *info = stream_timer->info; 215 struct rpmsg_msg *msg; 216 217 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 218 msg = &info->msg[TX_PERIOD_DONE + MSG_TYPE_A_NUM]; 219 msg->s_msg.header.cmd = TX_PERIOD_DONE; 220 } else { 221 msg = &info->msg[RX_PERIOD_DONE + MSG_TYPE_A_NUM]; 222 msg->s_msg.header.cmd = RX_PERIOD_DONE; 223 } 224 225 imx_rpmsg_insert_workqueue(substream, msg, info); 226 } 227 228 static int imx_rpmsg_pcm_open(struct snd_soc_component *component, 229 struct snd_pcm_substream *substream) 230 { 231 struct rpmsg_info *info = dev_get_drvdata(component->dev); 232 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); 233 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); 234 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev); 235 struct snd_pcm_hardware pcm_hardware; 236 struct rpmsg_msg *msg; 237 int ret = 0; 238 int cmd; 239 240 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 241 msg = &info->msg[TX_OPEN]; 242 msg->s_msg.header.cmd = TX_OPEN; 243 244 /* reinitialize buffer counter*/ 245 cmd = TX_PERIOD_DONE + MSG_TYPE_A_NUM; 246 info->msg[cmd].s_msg.param.buffer_tail = 0; 247 info->msg[cmd].r_msg.param.buffer_tail = 0; 248 info->msg[TX_POINTER].r_msg.param.buffer_offset = 0; 249 250 } else { 251 msg = &info->msg[RX_OPEN]; 252 msg->s_msg.header.cmd = RX_OPEN; 253 254 /* reinitialize buffer counter*/ 255 cmd = RX_PERIOD_DONE + MSG_TYPE_A_NUM; 256 info->msg[cmd].s_msg.param.buffer_tail = 0; 257 info->msg[cmd].r_msg.param.buffer_tail = 0; 258 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0; 259 } 260 261 info->send_message(msg, info); 262 263 pcm_hardware = imx_rpmsg_pcm_hardware; 264 pcm_hardware.buffer_bytes_max = rpmsg->buffer_size; 265 pcm_hardware.period_bytes_max = pcm_hardware.buffer_bytes_max / 2; 266 267 snd_soc_set_runtime_hwparams(substream, &pcm_hardware); 268 269 ret = snd_pcm_hw_constraint_integer(substream->runtime, 270 SNDRV_PCM_HW_PARAM_PERIODS); 271 if (ret < 0) 272 return ret; 273 274 info->msg_drop_count[substream->stream] = 0; 275 276 /* Create timer*/ 277 info->stream_timer[substream->stream].info = info; 278 info->stream_timer[substream->stream].substream = substream; 279 timer_setup(&info->stream_timer[substream->stream].timer, 280 imx_rpmsg_timer_callback, 0); 281 return ret; 282 } 283 284 static int imx_rpmsg_pcm_close(struct snd_soc_component *component, 285 struct snd_pcm_substream *substream) 286 { 287 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); 288 struct rpmsg_info *info = dev_get_drvdata(component->dev); 289 struct rpmsg_msg *msg; 290 291 /* Flush work in workqueue to make TX_CLOSE is the last message */ 292 flush_workqueue(info->rpmsg_wq); 293 294 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 295 msg = &info->msg[TX_CLOSE]; 296 msg->s_msg.header.cmd = TX_CLOSE; 297 } else { 298 msg = &info->msg[RX_CLOSE]; 299 msg->s_msg.header.cmd = RX_CLOSE; 300 } 301 302 info->send_message(msg, info); 303 304 del_timer(&info->stream_timer[substream->stream].timer); 305 306 rtd->dai_link->ignore_suspend = 0; 307 308 if (info->msg_drop_count[substream->stream]) 309 dev_warn(rtd->dev, "Msg is dropped!, number is %d\n", 310 info->msg_drop_count[substream->stream]); 311 312 return 0; 313 } 314 315 static int imx_rpmsg_pcm_prepare(struct snd_soc_component *component, 316 struct snd_pcm_substream *substream) 317 { 318 struct snd_pcm_runtime *runtime = substream->runtime; 319 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); 320 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); 321 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev); 322 323 /* 324 * NON-MMAP mode, NONBLOCK, Version 2, enable lpa in dts 325 * four conditions to determine the lpa is enabled. 326 */ 327 if ((runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || 328 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) && 329 rpmsg->enable_lpa) { 330 /* 331 * Ignore suspend operation in low power mode 332 * M core will continue playback music on A core suspend. 333 */ 334 rtd->dai_link->ignore_suspend = 1; 335 rpmsg->force_lpa = 1; 336 } else { 337 rpmsg->force_lpa = 0; 338 } 339 340 return 0; 341 } 342 343 static void imx_rpmsg_pcm_dma_complete(void *arg) 344 { 345 struct snd_pcm_substream *substream = arg; 346 347 snd_pcm_period_elapsed(substream); 348 } 349 350 static int imx_rpmsg_prepare_and_submit(struct snd_soc_component *component, 351 struct snd_pcm_substream *substream) 352 { 353 struct rpmsg_info *info = dev_get_drvdata(component->dev); 354 struct rpmsg_msg *msg; 355 356 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 357 msg = &info->msg[TX_BUFFER]; 358 msg->s_msg.header.cmd = TX_BUFFER; 359 } else { 360 msg = &info->msg[RX_BUFFER]; 361 msg->s_msg.header.cmd = RX_BUFFER; 362 } 363 364 /* Send buffer address and buffer size */ 365 msg->s_msg.param.buffer_addr = substream->runtime->dma_addr; 366 msg->s_msg.param.buffer_size = snd_pcm_lib_buffer_bytes(substream); 367 msg->s_msg.param.period_size = snd_pcm_lib_period_bytes(substream); 368 msg->s_msg.param.buffer_tail = 0; 369 370 info->num_period[substream->stream] = msg->s_msg.param.buffer_size / 371 msg->s_msg.param.period_size; 372 373 info->callback[substream->stream] = imx_rpmsg_pcm_dma_complete; 374 info->callback_param[substream->stream] = substream; 375 376 return imx_rpmsg_insert_workqueue(substream, msg, info); 377 } 378 379 static int imx_rpmsg_async_issue_pending(struct snd_soc_component *component, 380 struct snd_pcm_substream *substream) 381 { 382 struct rpmsg_info *info = dev_get_drvdata(component->dev); 383 struct rpmsg_msg *msg; 384 385 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 386 msg = &info->msg[TX_START]; 387 msg->s_msg.header.cmd = TX_START; 388 } else { 389 msg = &info->msg[RX_START]; 390 msg->s_msg.header.cmd = RX_START; 391 } 392 393 return imx_rpmsg_insert_workqueue(substream, msg, info); 394 } 395 396 static int imx_rpmsg_restart(struct snd_soc_component *component, 397 struct snd_pcm_substream *substream) 398 { 399 struct rpmsg_info *info = dev_get_drvdata(component->dev); 400 struct rpmsg_msg *msg; 401 402 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 403 msg = &info->msg[TX_RESTART]; 404 msg->s_msg.header.cmd = TX_RESTART; 405 } else { 406 msg = &info->msg[RX_RESTART]; 407 msg->s_msg.header.cmd = RX_RESTART; 408 } 409 410 return imx_rpmsg_insert_workqueue(substream, msg, info); 411 } 412 413 static int imx_rpmsg_pause(struct snd_soc_component *component, 414 struct snd_pcm_substream *substream) 415 { 416 struct rpmsg_info *info = dev_get_drvdata(component->dev); 417 struct rpmsg_msg *msg; 418 419 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 420 msg = &info->msg[TX_PAUSE]; 421 msg->s_msg.header.cmd = TX_PAUSE; 422 } else { 423 msg = &info->msg[RX_PAUSE]; 424 msg->s_msg.header.cmd = RX_PAUSE; 425 } 426 427 return imx_rpmsg_insert_workqueue(substream, msg, info); 428 } 429 430 static int imx_rpmsg_terminate_all(struct snd_soc_component *component, 431 struct snd_pcm_substream *substream) 432 { 433 struct rpmsg_info *info = dev_get_drvdata(component->dev); 434 struct rpmsg_msg *msg; 435 int cmd; 436 437 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 438 msg = &info->msg[TX_TERMINATE]; 439 msg->s_msg.header.cmd = TX_TERMINATE; 440 /* Clear buffer count*/ 441 cmd = TX_PERIOD_DONE + MSG_TYPE_A_NUM; 442 info->msg[cmd].s_msg.param.buffer_tail = 0; 443 info->msg[cmd].r_msg.param.buffer_tail = 0; 444 info->msg[TX_POINTER].r_msg.param.buffer_offset = 0; 445 } else { 446 msg = &info->msg[RX_TERMINATE]; 447 msg->s_msg.header.cmd = RX_TERMINATE; 448 /* Clear buffer count*/ 449 cmd = RX_PERIOD_DONE + MSG_TYPE_A_NUM; 450 info->msg[cmd].s_msg.param.buffer_tail = 0; 451 info->msg[cmd].r_msg.param.buffer_tail = 0; 452 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0; 453 } 454 455 del_timer(&info->stream_timer[substream->stream].timer); 456 457 return imx_rpmsg_insert_workqueue(substream, msg, info); 458 } 459 460 static int imx_rpmsg_pcm_trigger(struct snd_soc_component *component, 461 struct snd_pcm_substream *substream, int cmd) 462 { 463 struct snd_pcm_runtime *runtime = substream->runtime; 464 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); 465 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); 466 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev); 467 int ret = 0; 468 469 switch (cmd) { 470 case SNDRV_PCM_TRIGGER_START: 471 ret = imx_rpmsg_prepare_and_submit(component, substream); 472 if (ret) 473 return ret; 474 ret = imx_rpmsg_async_issue_pending(component, substream); 475 break; 476 case SNDRV_PCM_TRIGGER_RESUME: 477 if (rpmsg->force_lpa) 478 break; 479 fallthrough; 480 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 481 ret = imx_rpmsg_restart(component, substream); 482 break; 483 case SNDRV_PCM_TRIGGER_SUSPEND: 484 if (!rpmsg->force_lpa) { 485 if (runtime->info & SNDRV_PCM_INFO_PAUSE) 486 ret = imx_rpmsg_pause(component, substream); 487 else 488 ret = imx_rpmsg_terminate_all(component, substream); 489 } 490 break; 491 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 492 ret = imx_rpmsg_pause(component, substream); 493 break; 494 case SNDRV_PCM_TRIGGER_STOP: 495 ret = imx_rpmsg_terminate_all(component, substream); 496 break; 497 default: 498 return -EINVAL; 499 } 500 501 if (ret) 502 return ret; 503 504 return 0; 505 } 506 507 /* 508 * imx_rpmsg_pcm_ack 509 * 510 * Send the period index to M core through rpmsg, but not send 511 * all the period index to M core, reduce some unnessesary msg 512 * to reduce the pressure of rpmsg bandwidth. 513 */ 514 static int imx_rpmsg_pcm_ack(struct snd_soc_component *component, 515 struct snd_pcm_substream *substream) 516 { 517 struct snd_pcm_runtime *runtime = substream->runtime; 518 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); 519 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); 520 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev); 521 struct rpmsg_info *info = dev_get_drvdata(component->dev); 522 snd_pcm_uframes_t period_size = runtime->period_size; 523 snd_pcm_sframes_t avail; 524 struct timer_list *timer; 525 struct rpmsg_msg *msg; 526 unsigned long flags; 527 int buffer_tail = 0; 528 int written_num; 529 530 if (!rpmsg->force_lpa) 531 return 0; 532 533 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 534 msg = &info->msg[TX_PERIOD_DONE + MSG_TYPE_A_NUM]; 535 msg->s_msg.header.cmd = TX_PERIOD_DONE; 536 } else { 537 msg = &info->msg[RX_PERIOD_DONE + MSG_TYPE_A_NUM]; 538 msg->s_msg.header.cmd = RX_PERIOD_DONE; 539 } 540 541 msg->s_msg.header.type = MSG_TYPE_C; 542 543 buffer_tail = (frames_to_bytes(runtime, runtime->control->appl_ptr) % 544 snd_pcm_lib_buffer_bytes(substream)); 545 buffer_tail = buffer_tail / snd_pcm_lib_period_bytes(substream); 546 547 /* There is update for period index */ 548 if (buffer_tail != msg->s_msg.param.buffer_tail) { 549 written_num = buffer_tail - msg->s_msg.param.buffer_tail; 550 if (written_num < 0) 551 written_num += runtime->periods; 552 553 msg->s_msg.param.buffer_tail = buffer_tail; 554 555 /* The notification message is updated to latest */ 556 spin_lock_irqsave(&info->lock[substream->stream], flags); 557 memcpy(&info->notify[substream->stream], msg, 558 sizeof(struct rpmsg_s_msg)); 559 info->notify_updated[substream->stream] = true; 560 spin_unlock_irqrestore(&info->lock[substream->stream], flags); 561 562 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 563 avail = snd_pcm_playback_hw_avail(runtime); 564 else 565 avail = snd_pcm_capture_hw_avail(runtime); 566 567 timer = &info->stream_timer[substream->stream].timer; 568 /* 569 * If the data in the buffer is less than one period before 570 * this fill, which means the data may not enough on M 571 * core side, we need to send message immediately to let 572 * M core know the pointer is updated. 573 * if there is more than one period data in the buffer before 574 * this fill, which means the data is enough on M core side, 575 * we can delay one period (using timer) to send the message 576 * for reduce the message number in workqueue, because the 577 * pointer may be updated by ack function later, we can 578 * send latest pointer to M core side. 579 */ 580 if ((avail - written_num * period_size) <= period_size) { 581 imx_rpmsg_insert_workqueue(substream, msg, info); 582 } else if (rpmsg->force_lpa && !timer_pending(timer)) { 583 int time_msec; 584 585 time_msec = (int)(runtime->period_size * 1000 / runtime->rate); 586 mod_timer(timer, jiffies + msecs_to_jiffies(time_msec)); 587 } 588 } 589 590 return 0; 591 } 592 593 static int imx_rpmsg_pcm_new(struct snd_soc_component *component, 594 struct snd_soc_pcm_runtime *rtd) 595 { 596 struct snd_card *card = rtd->card->snd_card; 597 struct snd_pcm *pcm = rtd->pcm; 598 struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); 599 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev); 600 int ret; 601 602 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32)); 603 if (ret) 604 return ret; 605 606 return snd_pcm_set_fixed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV_WC, 607 pcm->card->dev, rpmsg->buffer_size); 608 } 609 610 static const struct snd_soc_component_driver imx_rpmsg_soc_component = { 611 .name = IMX_PCM_DRV_NAME, 612 .pcm_construct = imx_rpmsg_pcm_new, 613 .open = imx_rpmsg_pcm_open, 614 .close = imx_rpmsg_pcm_close, 615 .hw_params = imx_rpmsg_pcm_hw_params, 616 .trigger = imx_rpmsg_pcm_trigger, 617 .pointer = imx_rpmsg_pcm_pointer, 618 .ack = imx_rpmsg_pcm_ack, 619 .prepare = imx_rpmsg_pcm_prepare, 620 }; 621 622 static void imx_rpmsg_pcm_work(struct work_struct *work) 623 { 624 struct work_of_rpmsg *work_of_rpmsg; 625 bool is_notification = false; 626 struct rpmsg_info *info; 627 struct rpmsg_msg msg; 628 unsigned long flags; 629 630 work_of_rpmsg = container_of(work, struct work_of_rpmsg, work); 631 info = work_of_rpmsg->info; 632 633 /* 634 * Every work in the work queue, first we check if there 635 * is update for period is filled, because there may be not 636 * enough data in M core side, need to let M core know 637 * data is updated immediately. 638 */ 639 spin_lock_irqsave(&info->lock[TX], flags); 640 if (info->notify_updated[TX]) { 641 memcpy(&msg, &info->notify[TX], sizeof(struct rpmsg_s_msg)); 642 info->notify_updated[TX] = false; 643 spin_unlock_irqrestore(&info->lock[TX], flags); 644 info->send_message(&msg, info); 645 } else { 646 spin_unlock_irqrestore(&info->lock[TX], flags); 647 } 648 649 spin_lock_irqsave(&info->lock[RX], flags); 650 if (info->notify_updated[RX]) { 651 memcpy(&msg, &info->notify[RX], sizeof(struct rpmsg_s_msg)); 652 info->notify_updated[RX] = false; 653 spin_unlock_irqrestore(&info->lock[RX], flags); 654 info->send_message(&msg, info); 655 } else { 656 spin_unlock_irqrestore(&info->lock[RX], flags); 657 } 658 659 /* Skip the notification message for it has been processed above */ 660 if (work_of_rpmsg->msg.s_msg.header.type == MSG_TYPE_C && 661 (work_of_rpmsg->msg.s_msg.header.cmd == TX_PERIOD_DONE || 662 work_of_rpmsg->msg.s_msg.header.cmd == RX_PERIOD_DONE)) 663 is_notification = true; 664 665 if (!is_notification) 666 info->send_message(&work_of_rpmsg->msg, info); 667 668 /* update read index */ 669 spin_lock_irqsave(&info->wq_lock, flags); 670 info->work_read_index++; 671 info->work_read_index %= WORK_MAX_NUM; 672 spin_unlock_irqrestore(&info->wq_lock, flags); 673 } 674 675 static int imx_rpmsg_pcm_probe(struct platform_device *pdev) 676 { 677 struct snd_soc_component *component; 678 struct rpmsg_info *info; 679 int ret, i; 680 681 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 682 if (!info) 683 return -ENOMEM; 684 685 platform_set_drvdata(pdev, info); 686 687 info->rpdev = container_of(pdev->dev.parent, struct rpmsg_device, dev); 688 info->dev = &pdev->dev; 689 /* Setup work queue */ 690 info->rpmsg_wq = alloc_ordered_workqueue(info->rpdev->id.name, 691 WQ_HIGHPRI | 692 WQ_UNBOUND | 693 WQ_FREEZABLE); 694 if (!info->rpmsg_wq) { 695 dev_err(&pdev->dev, "workqueue create failed\n"); 696 return -ENOMEM; 697 } 698 699 /* Write index initialize 1, make it differ with the read index */ 700 info->work_write_index = 1; 701 info->send_message = imx_rpmsg_pcm_send_message; 702 703 for (i = 0; i < WORK_MAX_NUM; i++) { 704 INIT_WORK(&info->work_list[i].work, imx_rpmsg_pcm_work); 705 info->work_list[i].info = info; 706 } 707 708 /* Initialize msg */ 709 for (i = 0; i < MSG_MAX_NUM; i++) { 710 info->msg[i].s_msg.header.cate = IMX_RPMSG_AUDIO; 711 info->msg[i].s_msg.header.major = IMX_RMPSG_MAJOR; 712 info->msg[i].s_msg.header.minor = IMX_RMPSG_MINOR; 713 info->msg[i].s_msg.header.type = MSG_TYPE_A; 714 info->msg[i].s_msg.param.audioindex = 0; 715 } 716 717 init_completion(&info->cmd_complete); 718 mutex_init(&info->msg_lock); 719 spin_lock_init(&info->lock[TX]); 720 spin_lock_init(&info->lock[RX]); 721 spin_lock_init(&info->wq_lock); 722 723 ret = devm_snd_soc_register_component(&pdev->dev, 724 &imx_rpmsg_soc_component, 725 NULL, 0); 726 if (ret) 727 goto fail; 728 729 component = snd_soc_lookup_component(&pdev->dev, NULL); 730 if (!component) { 731 ret = -EINVAL; 732 goto fail; 733 } 734 735 #ifdef CONFIG_DEBUG_FS 736 component->debugfs_prefix = "rpmsg"; 737 #endif 738 739 return 0; 740 741 fail: 742 if (info->rpmsg_wq) 743 destroy_workqueue(info->rpmsg_wq); 744 745 return ret; 746 } 747 748 static void imx_rpmsg_pcm_remove(struct platform_device *pdev) 749 { 750 struct rpmsg_info *info = platform_get_drvdata(pdev); 751 752 if (info->rpmsg_wq) 753 destroy_workqueue(info->rpmsg_wq); 754 } 755 756 #ifdef CONFIG_PM 757 static int imx_rpmsg_pcm_runtime_resume(struct device *dev) 758 { 759 struct rpmsg_info *info = dev_get_drvdata(dev); 760 761 cpu_latency_qos_add_request(&info->pm_qos_req, 0); 762 763 return 0; 764 } 765 766 static int imx_rpmsg_pcm_runtime_suspend(struct device *dev) 767 { 768 struct rpmsg_info *info = dev_get_drvdata(dev); 769 770 cpu_latency_qos_remove_request(&info->pm_qos_req); 771 772 return 0; 773 } 774 #endif 775 776 #ifdef CONFIG_PM_SLEEP 777 static int imx_rpmsg_pcm_suspend(struct device *dev) 778 { 779 struct rpmsg_info *info = dev_get_drvdata(dev); 780 struct rpmsg_msg *rpmsg_tx; 781 struct rpmsg_msg *rpmsg_rx; 782 783 rpmsg_tx = &info->msg[TX_SUSPEND]; 784 rpmsg_rx = &info->msg[RX_SUSPEND]; 785 786 rpmsg_tx->s_msg.header.cmd = TX_SUSPEND; 787 info->send_message(rpmsg_tx, info); 788 789 rpmsg_rx->s_msg.header.cmd = RX_SUSPEND; 790 info->send_message(rpmsg_rx, info); 791 792 return 0; 793 } 794 795 static int imx_rpmsg_pcm_resume(struct device *dev) 796 { 797 struct rpmsg_info *info = dev_get_drvdata(dev); 798 struct rpmsg_msg *rpmsg_tx; 799 struct rpmsg_msg *rpmsg_rx; 800 801 rpmsg_tx = &info->msg[TX_RESUME]; 802 rpmsg_rx = &info->msg[RX_RESUME]; 803 804 rpmsg_tx->s_msg.header.cmd = TX_RESUME; 805 info->send_message(rpmsg_tx, info); 806 807 rpmsg_rx->s_msg.header.cmd = RX_RESUME; 808 info->send_message(rpmsg_rx, info); 809 810 return 0; 811 } 812 #endif /* CONFIG_PM_SLEEP */ 813 814 static const struct dev_pm_ops imx_rpmsg_pcm_pm_ops = { 815 SET_RUNTIME_PM_OPS(imx_rpmsg_pcm_runtime_suspend, 816 imx_rpmsg_pcm_runtime_resume, 817 NULL) 818 SET_SYSTEM_SLEEP_PM_OPS(imx_rpmsg_pcm_suspend, 819 imx_rpmsg_pcm_resume) 820 }; 821 822 static const struct platform_device_id imx_rpmsg_pcm_id_table[] = { 823 { .name = "rpmsg-audio-channel" }, 824 { .name = "rpmsg-micfil-channel" }, 825 { }, 826 }; 827 MODULE_DEVICE_TABLE(platform, imx_rpmsg_pcm_id_table); 828 829 static struct platform_driver imx_pcm_rpmsg_driver = { 830 .probe = imx_rpmsg_pcm_probe, 831 .remove_new = imx_rpmsg_pcm_remove, 832 .id_table = imx_rpmsg_pcm_id_table, 833 .driver = { 834 .name = IMX_PCM_DRV_NAME, 835 .pm = &imx_rpmsg_pcm_pm_ops, 836 }, 837 }; 838 module_platform_driver(imx_pcm_rpmsg_driver); 839 840 MODULE_DESCRIPTION("Freescale SoC Audio RPMSG PCM interface"); 841 MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>"); 842 MODULE_ALIAS("platform:" IMX_PCM_DRV_NAME); 843 MODULE_LICENSE("GPL v2"); 844
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.