1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 // 3 // This file is provided under a dual BSD/GPLv2 license. When using or 4 // redistributing this file, you may do so under either license. 5 // 6 // Copyright(c) 2018 Intel Corporation 7 // 8 // Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com> 9 // Ranjani Sridharan <ranjani.sridharan@linux.intel.com> 10 // Rander Wang <rander.wang@intel.com> 11 // Keyon Jie <yang.jie@linux.intel.com> 12 // 13 14 /* 15 * Hardware interface for HDA DSP code loader 16 */ 17 18 #include <linux/firmware.h> 19 #include <sound/hdaudio_ext.h> 20 #include <sound/hda_register.h> 21 #include <sound/sof.h> 22 #include <sound/sof/ipc4/header.h> 23 #include "ext_manifest.h" 24 #include "../ipc4-priv.h" 25 #include "../ops.h" 26 #include "../sof-priv.h" 27 #include "hda.h" 28 29 static void hda_ssp_set_cbp_cfp(struct snd_sof_dev *sdev) 30 { 31 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 32 const struct sof_intel_dsp_desc *chip = hda->desc; 33 int i; 34 35 /* DSP is powered up, set all SSPs to clock consumer/codec provider mode */ 36 for (i = 0; i < chip->ssp_count; i++) { 37 snd_sof_dsp_update_bits_unlocked(sdev, HDA_DSP_BAR, 38 chip->ssp_base_offset 39 + i * SSP_DEV_MEM_SIZE 40 + SSP_SSC1_OFFSET, 41 SSP_SET_CBP_CFP, 42 SSP_SET_CBP_CFP); 43 } 44 } 45 46 struct hdac_ext_stream *hda_cl_prepare(struct device *dev, unsigned int format, 47 unsigned int size, struct snd_dma_buffer *dmab, 48 int direction, bool is_iccmax) 49 { 50 struct snd_sof_dev *sdev = dev_get_drvdata(dev); 51 struct hdac_ext_stream *hext_stream; 52 struct hdac_stream *hstream; 53 int ret; 54 55 hext_stream = hda_dsp_stream_get(sdev, direction, 0); 56 57 if (!hext_stream) { 58 dev_err(sdev->dev, "error: no stream available\n"); 59 return ERR_PTR(-ENODEV); 60 } 61 hstream = &hext_stream->hstream; 62 hstream->substream = NULL; 63 64 /* allocate DMA buffer */ 65 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, dev, size, dmab); 66 if (ret < 0) { 67 dev_err(sdev->dev, "error: memory alloc failed: %d\n", ret); 68 goto out_put; 69 } 70 71 hstream->period_bytes = 0;/* initialize period_bytes */ 72 hstream->format_val = format; 73 hstream->bufsize = size; 74 75 if (is_iccmax) { 76 ret = hda_dsp_iccmax_stream_hw_params(sdev, hext_stream, dmab, NULL); 77 if (ret < 0) { 78 dev_err(sdev->dev, "error: iccmax stream prepare failed: %d\n", ret); 79 goto out_free; 80 } 81 } else { 82 ret = hda_dsp_stream_hw_params(sdev, hext_stream, dmab, NULL); 83 if (ret < 0) { 84 dev_err(sdev->dev, "error: hdac prepare failed: %d\n", ret); 85 goto out_free; 86 } 87 hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_ENABLE, size); 88 } 89 90 return hext_stream; 91 92 out_free: 93 snd_dma_free_pages(dmab); 94 out_put: 95 hda_dsp_stream_put(sdev, direction, hstream->stream_tag); 96 return ERR_PTR(ret); 97 } 98 EXPORT_SYMBOL_NS(hda_cl_prepare, SND_SOC_SOF_INTEL_HDA_COMMON); 99 100 /* 101 * first boot sequence has some extra steps. 102 * power on all host managed cores and only unstall/run the boot core to boot the 103 * DSP then turn off all non boot cores (if any) is powered on. 104 */ 105 int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot) 106 { 107 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 108 const struct sof_intel_dsp_desc *chip = hda->desc; 109 unsigned int status, target_status; 110 u32 flags, ipc_hdr, j; 111 unsigned long mask; 112 char *dump_msg; 113 int ret; 114 115 /* step 1: power up corex */ 116 ret = hda_dsp_core_power_up(sdev, chip->host_managed_cores_mask); 117 if (ret < 0) { 118 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 119 dev_err(sdev->dev, "error: dsp core 0/1 power up failed\n"); 120 goto err; 121 } 122 123 hda_ssp_set_cbp_cfp(sdev); 124 125 /* step 2: Send ROM_CONTROL command (stream_tag is ignored for IMR boot) */ 126 ipc_hdr = chip->ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL; 127 if (!imr_boot) 128 ipc_hdr |= HDA_DSP_ROM_IPC_PURGE_FW | ((stream_tag - 1) << 9); 129 130 snd_sof_dsp_write(sdev, HDA_DSP_BAR, chip->ipc_req, ipc_hdr); 131 132 /* step 3: unset core 0 reset state & unstall/run core 0 */ 133 ret = hda_dsp_core_run(sdev, chip->init_core_mask); 134 if (ret < 0) { 135 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 136 dev_err(sdev->dev, 137 "error: dsp core start failed %d\n", ret); 138 ret = -EIO; 139 goto err; 140 } 141 142 /* step 4: wait for IPC DONE bit from ROM */ 143 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 144 chip->ipc_ack, status, 145 ((status & chip->ipc_ack_mask) 146 == chip->ipc_ack_mask), 147 HDA_DSP_REG_POLL_INTERVAL_US, 148 HDA_DSP_INIT_TIMEOUT_US); 149 150 if (ret < 0) { 151 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 152 dev_err(sdev->dev, 153 "error: %s: timeout for HIPCIE done\n", 154 __func__); 155 goto err; 156 } 157 158 /* set DONE bit to clear the reply IPC message */ 159 snd_sof_dsp_update_bits_forced(sdev, HDA_DSP_BAR, 160 chip->ipc_ack, 161 chip->ipc_ack_mask, 162 chip->ipc_ack_mask); 163 164 /* step 5: power down cores that are no longer needed */ 165 ret = hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask & 166 ~(chip->init_core_mask)); 167 if (ret < 0) { 168 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 169 dev_err(sdev->dev, 170 "error: dsp core x power down failed\n"); 171 goto err; 172 } 173 174 /* step 6: enable IPC interrupts */ 175 hda_dsp_ipc_int_enable(sdev); 176 177 /* 178 * step 7: 179 * - Cold/Full boot: wait for ROM init to proceed to download the firmware 180 * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR) 181 */ 182 if (imr_boot) 183 target_status = FSR_STATE_FW_ENTERED; 184 else 185 target_status = FSR_STATE_INIT_DONE; 186 187 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 188 chip->rom_status_reg, status, 189 (FSR_TO_STATE_CODE(status) == target_status), 190 HDA_DSP_REG_POLL_INTERVAL_US, 191 chip->rom_init_timeout * 192 USEC_PER_MSEC); 193 if (!ret) { 194 /* set enabled cores mask and increment ref count for cores in init_core_mask */ 195 sdev->enabled_cores_mask |= chip->init_core_mask; 196 mask = sdev->enabled_cores_mask; 197 for_each_set_bit(j, &mask, SOF_MAX_DSP_NUM_CORES) 198 sdev->dsp_core_ref_count[j]++; 199 return 0; 200 } 201 202 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 203 dev_err(sdev->dev, 204 "%s: timeout with rom_status_reg (%#x) read\n", 205 __func__, chip->rom_status_reg); 206 207 err: 208 flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL; 209 210 /* after max boot attempts make sure that the dump is printed */ 211 if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) 212 flags &= ~SOF_DBG_DUMP_OPTIONAL; 213 214 dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d", 215 hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS); 216 snd_sof_dsp_dbg_dump(sdev, dump_msg, flags); 217 hda_dsp_core_reset_power_down(sdev, chip->host_managed_cores_mask); 218 219 kfree(dump_msg); 220 return ret; 221 } 222 EXPORT_SYMBOL_NS(cl_dsp_init, SND_SOC_SOF_INTEL_HDA_COMMON); 223 224 int hda_cl_trigger(struct device *dev, struct hdac_ext_stream *hext_stream, int cmd) 225 { 226 struct snd_sof_dev *sdev = dev_get_drvdata(dev); 227 struct hdac_stream *hstream = &hext_stream->hstream; 228 int sd_offset = SOF_STREAM_SD_OFFSET(hstream); 229 struct sof_intel_hda_stream *hda_stream; 230 231 /* code loader is special case that reuses stream ops */ 232 switch (cmd) { 233 case SNDRV_PCM_TRIGGER_START: 234 hda_stream = container_of(hext_stream, struct sof_intel_hda_stream, 235 hext_stream); 236 reinit_completion(&hda_stream->ioc); 237 238 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, SOF_HDA_INTCTL, 239 1 << hstream->index, 240 1 << hstream->index); 241 242 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, 243 sd_offset, 244 SOF_HDA_SD_CTL_DMA_START | 245 SOF_HDA_CL_DMA_SD_INT_MASK, 246 SOF_HDA_SD_CTL_DMA_START | 247 SOF_HDA_CL_DMA_SD_INT_MASK); 248 249 hstream->running = true; 250 return 0; 251 default: 252 return hda_dsp_stream_trigger(sdev, hext_stream, cmd); 253 } 254 } 255 EXPORT_SYMBOL_NS(hda_cl_trigger, SND_SOC_SOF_INTEL_HDA_COMMON); 256 257 int hda_cl_cleanup(struct device *dev, struct snd_dma_buffer *dmab, 258 struct hdac_ext_stream *hext_stream) 259 { 260 struct snd_sof_dev *sdev = dev_get_drvdata(dev); 261 struct hdac_stream *hstream = &hext_stream->hstream; 262 int sd_offset = SOF_STREAM_SD_OFFSET(hstream); 263 int ret = 0; 264 265 if (hstream->direction == SNDRV_PCM_STREAM_PLAYBACK) 266 ret = hda_dsp_stream_spib_config(sdev, hext_stream, HDA_DSP_SPIB_DISABLE, 0); 267 else 268 snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, sd_offset, 269 SOF_HDA_SD_CTL_DMA_START, 0); 270 271 hda_dsp_stream_put(sdev, hstream->direction, hstream->stream_tag); 272 hstream->running = 0; 273 hstream->substream = NULL; 274 275 /* reset BDL address */ 276 snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, 277 sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL, 0); 278 snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, 279 sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU, 0); 280 281 snd_sof_dsp_write(sdev, HDA_DSP_HDA_BAR, sd_offset, 0); 282 snd_dma_free_pages(dmab); 283 dmab->area = NULL; 284 hstream->bufsize = 0; 285 hstream->format_val = 0; 286 287 return ret; 288 } 289 EXPORT_SYMBOL_NS(hda_cl_cleanup, SND_SOC_SOF_INTEL_HDA_COMMON); 290 291 #define HDA_CL_DMA_IOC_TIMEOUT_MS 500 292 293 int hda_cl_copy_fw(struct snd_sof_dev *sdev, struct hdac_ext_stream *hext_stream) 294 { 295 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 296 const struct sof_intel_dsp_desc *chip = hda->desc; 297 struct sof_intel_hda_stream *hda_stream; 298 unsigned long time_left; 299 unsigned int reg; 300 int ret, status; 301 302 hda_stream = container_of(hext_stream, struct sof_intel_hda_stream, 303 hext_stream); 304 305 dev_dbg(sdev->dev, "Code loader DMA starting\n"); 306 307 ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_START); 308 if (ret < 0) { 309 dev_err(sdev->dev, "error: DMA trigger start failed\n"); 310 return ret; 311 } 312 313 if (sdev->pdata->ipc_type == SOF_IPC_TYPE_4) { 314 /* Wait for completion of transfer */ 315 time_left = wait_for_completion_timeout(&hda_stream->ioc, 316 msecs_to_jiffies(HDA_CL_DMA_IOC_TIMEOUT_MS)); 317 318 if (!time_left) { 319 dev_err(sdev->dev, "Code loader DMA did not complete\n"); 320 return -ETIMEDOUT; 321 } 322 dev_dbg(sdev->dev, "Code loader DMA done\n"); 323 } 324 325 dev_dbg(sdev->dev, "waiting for FW_ENTERED status\n"); 326 327 status = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR, 328 chip->rom_status_reg, reg, 329 (FSR_TO_STATE_CODE(reg) == FSR_STATE_FW_ENTERED), 330 HDA_DSP_REG_POLL_INTERVAL_US, 331 HDA_DSP_BASEFW_TIMEOUT_US); 332 333 /* 334 * even in case of errors we still need to stop the DMAs, 335 * but we return the initial error should the DMA stop also fail 336 */ 337 338 if (status < 0) { 339 dev_err(sdev->dev, 340 "%s: timeout with rom_status_reg (%#x) read\n", 341 __func__, chip->rom_status_reg); 342 } else { 343 dev_dbg(sdev->dev, "Code loader FW_ENTERED status\n"); 344 } 345 346 ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_STOP); 347 if (ret < 0) { 348 dev_err(sdev->dev, "error: DMA trigger stop failed\n"); 349 if (!status) 350 status = ret; 351 } else { 352 dev_dbg(sdev->dev, "Code loader DMA stopped\n"); 353 } 354 355 return status; 356 } 357 358 int hda_dsp_cl_boot_firmware_iccmax(struct snd_sof_dev *sdev) 359 { 360 struct hdac_ext_stream *iccmax_stream; 361 struct snd_dma_buffer dmab_bdl; 362 int ret, ret1; 363 u8 original_gb; 364 365 /* save the original LTRP guardband value */ 366 original_gb = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP) & 367 HDA_VS_INTEL_LTRP_GB_MASK; 368 369 /* 370 * Prepare capture stream for ICCMAX. We do not need to store 371 * the data, so use a buffer of PAGE_SIZE for receiving. 372 */ 373 iccmax_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, PAGE_SIZE, 374 &dmab_bdl, SNDRV_PCM_STREAM_CAPTURE, true); 375 if (IS_ERR(iccmax_stream)) { 376 dev_err(sdev->dev, "error: dma prepare for ICCMAX stream failed\n"); 377 return PTR_ERR(iccmax_stream); 378 } 379 380 ret = hda_dsp_cl_boot_firmware(sdev); 381 382 /* 383 * Perform iccmax stream cleanup. This should be done even if firmware loading fails. 384 * If the cleanup also fails, we return the initial error 385 */ 386 ret1 = hda_cl_cleanup(sdev->dev, &dmab_bdl, iccmax_stream); 387 if (ret1 < 0) { 388 dev_err(sdev->dev, "error: ICCMAX stream cleanup failed\n"); 389 390 /* set return value to indicate cleanup failure */ 391 if (!ret) 392 ret = ret1; 393 } 394 395 /* restore the original guardband value after FW boot */ 396 snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_LTRP, 397 HDA_VS_INTEL_LTRP_GB_MASK, original_gb); 398 399 return ret; 400 } 401 EXPORT_SYMBOL_NS(hda_dsp_cl_boot_firmware_iccmax, SND_SOC_SOF_INTEL_CNL); 402 403 static int hda_dsp_boot_imr(struct snd_sof_dev *sdev) 404 { 405 const struct sof_intel_dsp_desc *chip_info; 406 int ret; 407 408 chip_info = get_chip_info(sdev->pdata); 409 if (chip_info->cl_init) 410 ret = chip_info->cl_init(sdev, 0, true); 411 else 412 ret = -EINVAL; 413 414 if (!ret) 415 hda_sdw_process_wakeen(sdev); 416 417 return ret; 418 } 419 420 int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev) 421 { 422 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 423 struct snd_sof_pdata *plat_data = sdev->pdata; 424 const struct sof_dev_desc *desc = plat_data->desc; 425 const struct sof_intel_dsp_desc *chip_info; 426 struct hdac_ext_stream *hext_stream; 427 struct firmware stripped_firmware; 428 struct snd_dma_buffer dmab; 429 int ret, ret1, i; 430 431 if (hda->imrboot_supported && !sdev->first_boot && !hda->skip_imr_boot) { 432 dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n"); 433 hda->boot_iteration = 0; 434 ret = hda_dsp_boot_imr(sdev); 435 if (!ret) { 436 hda->booted_from_imr = true; 437 return 0; 438 } 439 440 dev_warn(sdev->dev, "IMR restore failed, trying to cold boot\n"); 441 } 442 443 hda->booted_from_imr = false; 444 445 chip_info = desc->chip_info; 446 447 if (sdev->basefw.fw->size <= sdev->basefw.payload_offset) { 448 dev_err(sdev->dev, "error: firmware size must be greater than firmware offset\n"); 449 return -EINVAL; 450 } 451 452 stripped_firmware.data = sdev->basefw.fw->data + sdev->basefw.payload_offset; 453 stripped_firmware.size = sdev->basefw.fw->size - sdev->basefw.payload_offset; 454 455 /* init for booting wait */ 456 init_waitqueue_head(&sdev->boot_wait); 457 458 /* prepare DMA for code loader stream */ 459 hext_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, 460 stripped_firmware.size, 461 &dmab, SNDRV_PCM_STREAM_PLAYBACK, false); 462 if (IS_ERR(hext_stream)) { 463 dev_err(sdev->dev, "error: dma prepare for fw loading failed\n"); 464 return PTR_ERR(hext_stream); 465 } 466 467 memcpy(dmab.area, stripped_firmware.data, 468 stripped_firmware.size); 469 470 /* try ROM init a few times before giving up */ 471 for (i = 0; i < HDA_FW_BOOT_ATTEMPTS; i++) { 472 dev_dbg(sdev->dev, 473 "Attempting iteration %d of Core En/ROM load...\n", i); 474 475 hda->boot_iteration = i + 1; 476 if (chip_info->cl_init) 477 ret = chip_info->cl_init(sdev, hext_stream->hstream.stream_tag, false); 478 else 479 ret = -EINVAL; 480 481 /* don't retry anymore if successful */ 482 if (!ret) 483 break; 484 } 485 486 if (i == HDA_FW_BOOT_ATTEMPTS) { 487 dev_err(sdev->dev, "error: dsp init failed after %d attempts with err: %d\n", 488 i, ret); 489 goto cleanup; 490 } 491 492 /* 493 * When a SoundWire link is in clock stop state, a Slave 494 * device may trigger in-band wakes for events such as jack 495 * insertion or acoustic event detection. This event will lead 496 * to a WAKEEN interrupt, handled by the PCI device and routed 497 * to PME if the PCI device is in D3. The resume function in 498 * audio PCI driver will be invoked by ACPI for PME event and 499 * initialize the device and process WAKEEN interrupt. 500 * 501 * The WAKEEN interrupt should be processed ASAP to prevent an 502 * interrupt flood, otherwise other interrupts, such IPC, 503 * cannot work normally. The WAKEEN is handled after the ROM 504 * is initialized successfully, which ensures power rails are 505 * enabled before accessing the SoundWire SHIM registers 506 */ 507 if (!sdev->first_boot) 508 hda_sdw_process_wakeen(sdev); 509 510 /* 511 * Set the boot_iteration to the last attempt, indicating that the 512 * DSP ROM has been initialized and from this point there will be no 513 * retry done to boot. 514 * 515 * Continue with code loading and firmware boot 516 */ 517 hda->boot_iteration = HDA_FW_BOOT_ATTEMPTS; 518 ret = hda_cl_copy_fw(sdev, hext_stream); 519 if (!ret) { 520 dev_dbg(sdev->dev, "Firmware download successful, booting...\n"); 521 hda->skip_imr_boot = false; 522 } else { 523 snd_sof_dsp_dbg_dump(sdev, "Firmware download failed", 524 SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX); 525 hda->skip_imr_boot = true; 526 } 527 528 cleanup: 529 /* 530 * Perform codeloader stream cleanup. 531 * This should be done even if firmware loading fails. 532 * If the cleanup also fails, we return the initial error 533 */ 534 ret1 = hda_cl_cleanup(sdev->dev, &dmab, hext_stream); 535 if (ret1 < 0) { 536 dev_err(sdev->dev, "error: Code loader DSP cleanup failed\n"); 537 538 /* set return value to indicate cleanup failure */ 539 if (!ret) 540 ret = ret1; 541 } 542 543 /* 544 * return primary core id if both fw copy 545 * and stream clean up are successful 546 */ 547 if (!ret) 548 return chip_info->init_core_mask; 549 550 /* disable DSP */ 551 hda_dsp_ctrl_ppcap_enable(sdev, false); 552 553 return ret; 554 } 555 EXPORT_SYMBOL_NS(hda_dsp_cl_boot_firmware, SND_SOC_SOF_INTEL_HDA_COMMON); 556 557 int hda_dsp_ipc4_load_library(struct snd_sof_dev *sdev, 558 struct sof_ipc4_fw_library *fw_lib, bool reload) 559 { 560 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 561 struct sof_ipc4_fw_data *ipc4_data = sdev->private; 562 struct hdac_ext_stream *hext_stream; 563 struct firmware stripped_firmware; 564 struct sof_ipc4_msg msg = {}; 565 struct snd_dma_buffer dmab; 566 int ret, ret1; 567 568 /* if IMR booting is enabled and fw context is saved for D3 state, skip the loading */ 569 if (reload && hda->booted_from_imr && ipc4_data->fw_context_save) 570 return 0; 571 572 /* the fw_lib has been verified during loading, we can trust the validity here */ 573 stripped_firmware.data = fw_lib->sof_fw.fw->data + fw_lib->sof_fw.payload_offset; 574 stripped_firmware.size = fw_lib->sof_fw.fw->size - fw_lib->sof_fw.payload_offset; 575 576 /* prepare DMA for code loader stream */ 577 hext_stream = hda_cl_prepare(sdev->dev, HDA_CL_STREAM_FORMAT, 578 stripped_firmware.size, 579 &dmab, SNDRV_PCM_STREAM_PLAYBACK, false); 580 if (IS_ERR(hext_stream)) { 581 dev_err(sdev->dev, "%s: DMA prepare failed\n", __func__); 582 return PTR_ERR(hext_stream); 583 } 584 585 memcpy(dmab.area, stripped_firmware.data, stripped_firmware.size); 586 587 /* 588 * 1st stage: SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE 589 * Message includes the dma_id to be prepared for the library loading. 590 * If the firmware does not have support for the message, we will 591 * receive -EOPNOTSUPP. In this case we will use single step library 592 * loading and proceed to send the LOAD_LIBRARY message. 593 */ 594 msg.primary = hext_stream->hstream.stream_tag - 1; 595 msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE); 596 msg.primary |= SOF_IPC4_MSG_DIR(SOF_IPC4_MSG_REQUEST); 597 msg.primary |= SOF_IPC4_MSG_TARGET(SOF_IPC4_FW_GEN_MSG); 598 ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0); 599 if (!ret) { 600 int sd_offset = SOF_STREAM_SD_OFFSET(&hext_stream->hstream); 601 unsigned int status; 602 603 /* 604 * Make sure that the FIFOS value is not 0 in SDxFIFOS register 605 * which indicates that the firmware set the GEN bit and we can 606 * continue to start the DMA 607 */ 608 ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_HDA_BAR, 609 sd_offset + SOF_HDA_ADSP_REG_SD_FIFOSIZE, 610 status, 611 status & SOF_HDA_SD_FIFOSIZE_FIFOS_MASK, 612 HDA_DSP_REG_POLL_INTERVAL_US, 613 HDA_DSP_BASEFW_TIMEOUT_US); 614 615 if (ret < 0) 616 dev_warn(sdev->dev, 617 "%s: timeout waiting for FIFOS\n", __func__); 618 } else if (ret != -EOPNOTSUPP) { 619 goto cleanup; 620 } 621 622 ret = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_START); 623 if (ret < 0) { 624 dev_err(sdev->dev, "%s: DMA trigger start failed\n", __func__); 625 goto cleanup; 626 } 627 628 /* 629 * 2nd stage: LOAD_LIBRARY 630 * Message includes the dma_id and the lib_id, the dma_id must be 631 * identical to the one sent via LOAD_LIBRARY_PREPARE 632 */ 633 msg.primary &= ~SOF_IPC4_MSG_TYPE_MASK; 634 msg.primary |= SOF_IPC4_MSG_TYPE_SET(SOF_IPC4_GLB_LOAD_LIBRARY); 635 msg.primary |= SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID(fw_lib->id); 636 ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0); 637 638 /* Stop the DMA channel */ 639 ret1 = hda_cl_trigger(sdev->dev, hext_stream, SNDRV_PCM_TRIGGER_STOP); 640 if (ret1 < 0) { 641 dev_err(sdev->dev, "%s: DMA trigger stop failed\n", __func__); 642 if (!ret) 643 ret = ret1; 644 } 645 646 cleanup: 647 /* clean up even in case of error and return the first error */ 648 ret1 = hda_cl_cleanup(sdev->dev, &dmab, hext_stream); 649 if (ret1 < 0) { 650 dev_err(sdev->dev, "%s: Code loader DSP cleanup failed\n", __func__); 651 652 /* set return value to indicate cleanup failure */ 653 if (!ret) 654 ret = ret1; 655 } 656 657 return ret; 658 } 659 EXPORT_SYMBOL_NS(hda_dsp_ipc4_load_library, SND_SOC_SOF_INTEL_HDA_COMMON); 660 661 int hda_dsp_ext_man_get_cavs_config_data(struct snd_sof_dev *sdev, 662 const struct sof_ext_man_elem_header *hdr) 663 { 664 const struct sof_ext_man_cavs_config_data *config_data = 665 container_of(hdr, struct sof_ext_man_cavs_config_data, hdr); 666 struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; 667 int i, elem_num; 668 669 /* calculate total number of config data elements */ 670 elem_num = (hdr->size - sizeof(struct sof_ext_man_elem_header)) 671 / sizeof(struct sof_config_elem); 672 if (elem_num <= 0) { 673 dev_err(sdev->dev, "cavs config data is inconsistent: %d\n", elem_num); 674 return -EINVAL; 675 } 676 677 for (i = 0; i < elem_num; i++) 678 switch (config_data->elems[i].token) { 679 case SOF_EXT_MAN_CAVS_CONFIG_EMPTY: 680 /* skip empty token */ 681 break; 682 case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO: 683 hda->clk_config_lpro = config_data->elems[i].value; 684 dev_dbg(sdev->dev, "FW clock config: %s\n", 685 hda->clk_config_lpro ? "LPRO" : "HPRO"); 686 break; 687 case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE: 688 case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE: 689 /* These elements are defined but not being used yet. No warn is required */ 690 break; 691 default: 692 dev_info(sdev->dev, "unsupported token type: %d\n", 693 config_data->elems[i].token); 694 } 695 696 return 0; 697 } 698 EXPORT_SYMBOL_NS(hda_dsp_ext_man_get_cavs_config_data, SND_SOC_SOF_INTEL_HDA_COMMON); 699
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.