1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * skl-topology.c - Implements Platform component ALSA controls/widget 4 * handlers. 5 * 6 * Copyright (C) 2014-2015 Intel Corp 7 * Author: Jeeja KP <jeeja.kp@intel.com> 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 */ 10 11 #include <linux/slab.h> 12 #include <linux/types.h> 13 #include <linux/firmware.h> 14 #include <linux/uuid.h> 15 #include <sound/intel-nhlt.h> 16 #include <sound/soc.h> 17 #include <sound/soc-acpi.h> 18 #include <sound/soc-topology.h> 19 #include <uapi/sound/snd_sst_tokens.h> 20 #include <uapi/sound/skl-tplg-interface.h> 21 #include "skl-sst-dsp.h" 22 #include "skl-sst-ipc.h" 23 #include "skl-topology.h" 24 #include "skl.h" 25 #include "../common/sst-dsp.h" 26 #include "../common/sst-dsp-priv.h" 27 28 #define SKL_CH_FIXUP_MASK (1 << 0) 29 #define SKL_RATE_FIXUP_MASK (1 << 1) 30 #define SKL_FMT_FIXUP_MASK (1 << 2) 31 #define SKL_IN_DIR_BIT_MASK BIT(0) 32 #define SKL_PIN_COUNT_MASK GENMASK(7, 4) 33 34 static const int mic_mono_list[] = { 35 0, 1, 2, 3, 36 }; 37 static const int mic_stereo_list[][SKL_CH_STEREO] = { 38 {0, 1}, {0, 2}, {0, 3}, {1, 2}, {1, 3}, {2, 3}, 39 }; 40 static const int mic_trio_list[][SKL_CH_TRIO] = { 41 {0, 1, 2}, {0, 1, 3}, {0, 2, 3}, {1, 2, 3}, 42 }; 43 static const int mic_quatro_list[][SKL_CH_QUATRO] = { 44 {0, 1, 2, 3}, 45 }; 46 47 #define CHECK_HW_PARAMS(ch, freq, bps, prm_ch, prm_freq, prm_bps) \ 48 ((ch == prm_ch) && (bps == prm_bps) && (freq == prm_freq)) 49 50 void skl_tplg_d0i3_get(struct skl_dev *skl, enum d0i3_capability caps) 51 { 52 struct skl_d0i3_data *d0i3 = &skl->d0i3; 53 54 switch (caps) { 55 case SKL_D0I3_NONE: 56 d0i3->non_d0i3++; 57 break; 58 59 case SKL_D0I3_STREAMING: 60 d0i3->streaming++; 61 break; 62 63 case SKL_D0I3_NON_STREAMING: 64 d0i3->non_streaming++; 65 break; 66 } 67 } 68 69 void skl_tplg_d0i3_put(struct skl_dev *skl, enum d0i3_capability caps) 70 { 71 struct skl_d0i3_data *d0i3 = &skl->d0i3; 72 73 switch (caps) { 74 case SKL_D0I3_NONE: 75 d0i3->non_d0i3--; 76 break; 77 78 case SKL_D0I3_STREAMING: 79 d0i3->streaming--; 80 break; 81 82 case SKL_D0I3_NON_STREAMING: 83 d0i3->non_streaming--; 84 break; 85 } 86 } 87 88 /* 89 * SKL DSP driver modelling uses only few DAPM widgets so for rest we will 90 * ignore. This helpers checks if the SKL driver handles this widget type 91 */ 92 static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w, 93 struct device *dev) 94 { 95 if (w->dapm->dev != dev) 96 return false; 97 98 switch (w->id) { 99 case snd_soc_dapm_dai_link: 100 case snd_soc_dapm_dai_in: 101 case snd_soc_dapm_aif_in: 102 case snd_soc_dapm_aif_out: 103 case snd_soc_dapm_dai_out: 104 case snd_soc_dapm_switch: 105 case snd_soc_dapm_output: 106 case snd_soc_dapm_mux: 107 108 return false; 109 default: 110 return true; 111 } 112 } 113 114 static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg) 115 { 116 struct skl_module_iface *iface = &mcfg->module->formats[mcfg->fmt_idx]; 117 118 dev_dbg(skl->dev, "Dumping config\n"); 119 dev_dbg(skl->dev, "Input Format:\n"); 120 dev_dbg(skl->dev, "channels = %d\n", iface->inputs[0].fmt.channels); 121 dev_dbg(skl->dev, "s_freq = %d\n", iface->inputs[0].fmt.s_freq); 122 dev_dbg(skl->dev, "ch_cfg = %d\n", iface->inputs[0].fmt.ch_cfg); 123 dev_dbg(skl->dev, "valid bit depth = %d\n", 124 iface->inputs[0].fmt.valid_bit_depth); 125 dev_dbg(skl->dev, "Output Format:\n"); 126 dev_dbg(skl->dev, "channels = %d\n", iface->outputs[0].fmt.channels); 127 dev_dbg(skl->dev, "s_freq = %d\n", iface->outputs[0].fmt.s_freq); 128 dev_dbg(skl->dev, "valid bit depth = %d\n", 129 iface->outputs[0].fmt.valid_bit_depth); 130 dev_dbg(skl->dev, "ch_cfg = %d\n", iface->outputs[0].fmt.ch_cfg); 131 } 132 133 static void skl_tplg_update_chmap(struct skl_module_fmt *fmt, int chs) 134 { 135 int slot_map = 0xFFFFFFFF; 136 int start_slot = 0; 137 int i; 138 139 for (i = 0; i < chs; i++) { 140 /* 141 * For 2 channels with starting slot as 0, slot map will 142 * look like 0xFFFFFF10. 143 */ 144 slot_map &= (~(0xF << (4 * i)) | (start_slot << (4 * i))); 145 start_slot++; 146 } 147 fmt->ch_map = slot_map; 148 } 149 150 static void skl_tplg_update_params(struct skl_module_fmt *fmt, 151 struct skl_pipe_params *params, int fixup) 152 { 153 if (fixup & SKL_RATE_FIXUP_MASK) 154 fmt->s_freq = params->s_freq; 155 if (fixup & SKL_CH_FIXUP_MASK) { 156 fmt->channels = params->ch; 157 skl_tplg_update_chmap(fmt, fmt->channels); 158 } 159 if (fixup & SKL_FMT_FIXUP_MASK) { 160 fmt->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 161 162 /* 163 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 164 * container so update bit depth accordingly 165 */ 166 switch (fmt->valid_bit_depth) { 167 case SKL_DEPTH_16BIT: 168 fmt->bit_depth = fmt->valid_bit_depth; 169 break; 170 171 default: 172 fmt->bit_depth = SKL_DEPTH_32BIT; 173 break; 174 } 175 } 176 177 } 178 179 /* 180 * A pipeline may have modules which impact the pcm parameters, like SRC, 181 * channel converter, format converter. 182 * We need to calculate the output params by applying the 'fixup' 183 * Topology will tell driver which type of fixup is to be applied by 184 * supplying the fixup mask, so based on that we calculate the output 185 * 186 * Now In FE the pcm hw_params is source/target format. Same is applicable 187 * for BE with its hw_params invoked. 188 * here based on FE, BE pipeline and direction we calculate the input and 189 * outfix and then apply that for a module 190 */ 191 static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg, 192 struct skl_pipe_params *params, bool is_fe) 193 { 194 int in_fixup, out_fixup; 195 struct skl_module_fmt *in_fmt, *out_fmt; 196 197 /* Fixups will be applied to pin 0 only */ 198 in_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].inputs[0].fmt; 199 out_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].outputs[0].fmt; 200 201 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 202 if (is_fe) { 203 in_fixup = m_cfg->params_fixup; 204 out_fixup = (~m_cfg->converter) & 205 m_cfg->params_fixup; 206 } else { 207 out_fixup = m_cfg->params_fixup; 208 in_fixup = (~m_cfg->converter) & 209 m_cfg->params_fixup; 210 } 211 } else { 212 if (is_fe) { 213 out_fixup = m_cfg->params_fixup; 214 in_fixup = (~m_cfg->converter) & 215 m_cfg->params_fixup; 216 } else { 217 in_fixup = m_cfg->params_fixup; 218 out_fixup = (~m_cfg->converter) & 219 m_cfg->params_fixup; 220 } 221 } 222 223 skl_tplg_update_params(in_fmt, params, in_fixup); 224 skl_tplg_update_params(out_fmt, params, out_fixup); 225 } 226 227 /* 228 * A module needs input and output buffers, which are dependent upon pcm 229 * params, so once we have calculate params, we need buffer calculation as 230 * well. 231 */ 232 static void skl_tplg_update_buffer_size(struct skl_dev *skl, 233 struct skl_module_cfg *mcfg) 234 { 235 int multiplier = 1; 236 struct skl_module_fmt *in_fmt, *out_fmt; 237 struct skl_module_res *res; 238 239 /* Since fixups is applied to pin 0 only, ibs, obs needs 240 * change for pin 0 only 241 */ 242 res = &mcfg->module->resources[mcfg->res_idx]; 243 in_fmt = &mcfg->module->formats[mcfg->fmt_idx].inputs[0].fmt; 244 out_fmt = &mcfg->module->formats[mcfg->fmt_idx].outputs[0].fmt; 245 246 if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT) 247 multiplier = 5; 248 249 res->ibs = DIV_ROUND_UP(in_fmt->s_freq, 1000) * 250 in_fmt->channels * (in_fmt->bit_depth >> 3) * 251 multiplier; 252 253 res->obs = DIV_ROUND_UP(out_fmt->s_freq, 1000) * 254 out_fmt->channels * (out_fmt->bit_depth >> 3) * 255 multiplier; 256 } 257 258 static u8 skl_tplg_be_dev_type(int dev_type) 259 { 260 int ret; 261 262 switch (dev_type) { 263 case SKL_DEVICE_BT: 264 ret = NHLT_DEVICE_BT; 265 break; 266 267 case SKL_DEVICE_DMIC: 268 ret = NHLT_DEVICE_DMIC; 269 break; 270 271 case SKL_DEVICE_I2S: 272 ret = NHLT_DEVICE_I2S; 273 break; 274 275 default: 276 ret = NHLT_DEVICE_INVALID; 277 break; 278 } 279 280 return ret; 281 } 282 283 static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w, 284 struct skl_dev *skl) 285 { 286 struct skl_module_cfg *m_cfg = w->priv; 287 int link_type, dir; 288 u32 ch, s_freq, s_fmt, s_cont; 289 struct nhlt_specific_cfg *cfg; 290 u8 dev_type = skl_tplg_be_dev_type(m_cfg->dev_type); 291 int fmt_idx = m_cfg->fmt_idx; 292 struct skl_module_iface *m_iface = &m_cfg->module->formats[fmt_idx]; 293 294 /* check if we already have blob */ 295 if (m_cfg->formats_config[SKL_PARAM_INIT].caps_size > 0) 296 return 0; 297 298 dev_dbg(skl->dev, "Applying default cfg blob\n"); 299 switch (m_cfg->dev_type) { 300 case SKL_DEVICE_DMIC: 301 link_type = NHLT_LINK_DMIC; 302 dir = SNDRV_PCM_STREAM_CAPTURE; 303 s_freq = m_iface->inputs[0].fmt.s_freq; 304 s_fmt = m_iface->inputs[0].fmt.valid_bit_depth; 305 s_cont = m_iface->inputs[0].fmt.bit_depth; 306 ch = m_iface->inputs[0].fmt.channels; 307 break; 308 309 case SKL_DEVICE_I2S: 310 link_type = NHLT_LINK_SSP; 311 if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) { 312 dir = SNDRV_PCM_STREAM_PLAYBACK; 313 s_freq = m_iface->outputs[0].fmt.s_freq; 314 s_fmt = m_iface->outputs[0].fmt.valid_bit_depth; 315 s_cont = m_iface->outputs[0].fmt.bit_depth; 316 ch = m_iface->outputs[0].fmt.channels; 317 } else { 318 dir = SNDRV_PCM_STREAM_CAPTURE; 319 s_freq = m_iface->inputs[0].fmt.s_freq; 320 s_fmt = m_iface->inputs[0].fmt.valid_bit_depth; 321 s_cont = m_iface->inputs[0].fmt.bit_depth; 322 ch = m_iface->inputs[0].fmt.channels; 323 } 324 break; 325 326 default: 327 return -EINVAL; 328 } 329 330 /* update the blob based on virtual bus_id and default params */ 331 cfg = intel_nhlt_get_endpoint_blob(skl->dev, skl->nhlt, m_cfg->vbus_id, 332 link_type, s_fmt, s_cont, ch, 333 s_freq, dir, dev_type); 334 if (cfg) { 335 m_cfg->formats_config[SKL_PARAM_INIT].caps_size = cfg->size; 336 m_cfg->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps; 337 } else { 338 dev_err(skl->dev, "Blob NULL for id %x type %d dirn %d\n", 339 m_cfg->vbus_id, link_type, dir); 340 dev_err(skl->dev, "PCM: ch %d, freq %d, fmt %d/%d\n", 341 ch, s_freq, s_fmt, s_cont); 342 return -EIO; 343 } 344 345 return 0; 346 } 347 348 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w, 349 struct skl_dev *skl) 350 { 351 struct skl_module_cfg *m_cfg = w->priv; 352 struct skl_pipe_params *params = m_cfg->pipe->p_params; 353 int p_conn_type = m_cfg->pipe->conn_type; 354 bool is_fe; 355 356 if (!m_cfg->params_fixup) 357 return; 358 359 dev_dbg(skl->dev, "Mconfig for widget=%s BEFORE updation\n", 360 w->name); 361 362 skl_dump_mconfig(skl, m_cfg); 363 364 if (p_conn_type == SKL_PIPE_CONN_TYPE_FE) 365 is_fe = true; 366 else 367 is_fe = false; 368 369 skl_tplg_update_params_fixup(m_cfg, params, is_fe); 370 skl_tplg_update_buffer_size(skl, m_cfg); 371 372 dev_dbg(skl->dev, "Mconfig for widget=%s AFTER updation\n", 373 w->name); 374 375 skl_dump_mconfig(skl, m_cfg); 376 } 377 378 /* 379 * some modules can have multiple params set from user control and 380 * need to be set after module is initialized. If set_param flag is 381 * set module params will be done after module is initialised. 382 */ 383 static int skl_tplg_set_module_params(struct snd_soc_dapm_widget *w, 384 struct skl_dev *skl) 385 { 386 int i, ret; 387 struct skl_module_cfg *mconfig = w->priv; 388 const struct snd_kcontrol_new *k; 389 struct soc_bytes_ext *sb; 390 struct skl_algo_data *bc; 391 struct skl_specific_cfg *sp_cfg; 392 393 if (mconfig->formats_config[SKL_PARAM_SET].caps_size > 0 && 394 mconfig->formats_config[SKL_PARAM_SET].set_params == SKL_PARAM_SET) { 395 sp_cfg = &mconfig->formats_config[SKL_PARAM_SET]; 396 ret = skl_set_module_params(skl, sp_cfg->caps, 397 sp_cfg->caps_size, 398 sp_cfg->param_id, mconfig); 399 if (ret < 0) 400 return ret; 401 } 402 403 for (i = 0; i < w->num_kcontrols; i++) { 404 k = &w->kcontrol_news[i]; 405 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 406 sb = (void *) k->private_value; 407 bc = (struct skl_algo_data *)sb->dobj.private; 408 409 if (bc->set_params == SKL_PARAM_SET) { 410 ret = skl_set_module_params(skl, 411 (u32 *)bc->params, bc->size, 412 bc->param_id, mconfig); 413 if (ret < 0) 414 return ret; 415 } 416 } 417 } 418 419 return 0; 420 } 421 422 /* 423 * some module param can set from user control and this is required as 424 * when module is initailzed. if module param is required in init it is 425 * identifed by set_param flag. if set_param flag is not set, then this 426 * parameter needs to set as part of module init. 427 */ 428 static int skl_tplg_set_module_init_data(struct snd_soc_dapm_widget *w) 429 { 430 const struct snd_kcontrol_new *k; 431 struct soc_bytes_ext *sb; 432 struct skl_algo_data *bc; 433 struct skl_module_cfg *mconfig = w->priv; 434 int i; 435 436 for (i = 0; i < w->num_kcontrols; i++) { 437 k = &w->kcontrol_news[i]; 438 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 439 sb = (struct soc_bytes_ext *)k->private_value; 440 bc = (struct skl_algo_data *)sb->dobj.private; 441 442 if (bc->set_params != SKL_PARAM_INIT) 443 continue; 444 445 mconfig->formats_config[SKL_PARAM_INIT].caps = 446 (u32 *)bc->params; 447 mconfig->formats_config[SKL_PARAM_INIT].caps_size = 448 bc->size; 449 450 break; 451 } 452 } 453 454 return 0; 455 } 456 457 static int skl_tplg_module_prepare(struct skl_dev *skl, struct skl_pipe *pipe, 458 struct snd_soc_dapm_widget *w, struct skl_module_cfg *mcfg) 459 { 460 switch (mcfg->dev_type) { 461 case SKL_DEVICE_HDAHOST: 462 return skl_pcm_host_dma_prepare(skl->dev, pipe->p_params); 463 464 case SKL_DEVICE_HDALINK: 465 return skl_pcm_link_dma_prepare(skl->dev, pipe->p_params); 466 } 467 468 return 0; 469 } 470 471 /* 472 * Inside a pipe instance, we can have various modules. These modules need 473 * to instantiated in DSP by invoking INIT_MODULE IPC, which is achieved by 474 * skl_init_module() routine, so invoke that for all modules in a pipeline 475 */ 476 static int 477 skl_tplg_init_pipe_modules(struct skl_dev *skl, struct skl_pipe *pipe) 478 { 479 struct skl_pipe_module *w_module; 480 struct snd_soc_dapm_widget *w; 481 struct skl_module_cfg *mconfig; 482 u8 cfg_idx; 483 int ret = 0; 484 485 list_for_each_entry(w_module, &pipe->w_list, node) { 486 guid_t *uuid_mod; 487 w = w_module->w; 488 mconfig = w->priv; 489 490 /* check if module ids are populated */ 491 if (mconfig->id.module_id < 0) { 492 dev_err(skl->dev, 493 "module %pUL id not populated\n", 494 (guid_t *)mconfig->guid); 495 return -EIO; 496 } 497 498 cfg_idx = mconfig->pipe->cur_config_idx; 499 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 500 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 501 502 if (mconfig->module->loadable && skl->dsp->fw_ops.load_mod) { 503 ret = skl->dsp->fw_ops.load_mod(skl->dsp, 504 mconfig->id.module_id, mconfig->guid); 505 if (ret < 0) 506 return ret; 507 } 508 509 /* prepare the DMA if the module is gateway cpr */ 510 ret = skl_tplg_module_prepare(skl, pipe, w, mconfig); 511 if (ret < 0) 512 return ret; 513 514 /* update blob if blob is null for be with default value */ 515 skl_tplg_update_be_blob(w, skl); 516 517 /* 518 * apply fix/conversion to module params based on 519 * FE/BE params 520 */ 521 skl_tplg_update_module_params(w, skl); 522 uuid_mod = (guid_t *)mconfig->guid; 523 mconfig->id.pvt_id = skl_get_pvt_id(skl, uuid_mod, 524 mconfig->id.instance_id); 525 if (mconfig->id.pvt_id < 0) 526 return ret; 527 skl_tplg_set_module_init_data(w); 528 529 ret = skl_dsp_get_core(skl->dsp, mconfig->core_id); 530 if (ret < 0) { 531 dev_err(skl->dev, "Failed to wake up core %d ret=%d\n", 532 mconfig->core_id, ret); 533 return ret; 534 } 535 536 ret = skl_init_module(skl, mconfig); 537 if (ret < 0) { 538 skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id); 539 goto err; 540 } 541 542 ret = skl_tplg_set_module_params(w, skl); 543 if (ret < 0) 544 goto err; 545 } 546 547 return 0; 548 err: 549 skl_dsp_put_core(skl->dsp, mconfig->core_id); 550 return ret; 551 } 552 553 static int skl_tplg_unload_pipe_modules(struct skl_dev *skl, 554 struct skl_pipe *pipe) 555 { 556 int ret = 0; 557 struct skl_pipe_module *w_module; 558 struct skl_module_cfg *mconfig; 559 560 list_for_each_entry(w_module, &pipe->w_list, node) { 561 guid_t *uuid_mod; 562 mconfig = w_module->w->priv; 563 uuid_mod = (guid_t *)mconfig->guid; 564 565 if (mconfig->module->loadable && skl->dsp->fw_ops.unload_mod) { 566 ret = skl->dsp->fw_ops.unload_mod(skl->dsp, 567 mconfig->id.module_id); 568 if (ret < 0) 569 return -EIO; 570 } 571 skl_put_pvt_id(skl, uuid_mod, &mconfig->id.pvt_id); 572 573 ret = skl_dsp_put_core(skl->dsp, mconfig->core_id); 574 if (ret < 0) { 575 /* don't return; continue with other modules */ 576 dev_err(skl->dev, "Failed to sleep core %d ret=%d\n", 577 mconfig->core_id, ret); 578 } 579 } 580 581 /* no modules to unload in this path, so return */ 582 return ret; 583 } 584 585 static void skl_tplg_set_pipe_config_idx(struct skl_pipe *pipe, int idx) 586 { 587 pipe->cur_config_idx = idx; 588 pipe->memory_pages = pipe->configs[idx].mem_pages; 589 } 590 591 /* 592 * Here, we select pipe format based on the pipe type and pipe 593 * direction to determine the current config index for the pipeline. 594 * The config index is then used to select proper module resources. 595 * Intermediate pipes currently have a fixed format hence we select the 596 * 0th configuratation by default for such pipes. 597 */ 598 static int 599 skl_tplg_get_pipe_config(struct skl_dev *skl, struct skl_module_cfg *mconfig) 600 { 601 struct skl_pipe *pipe = mconfig->pipe; 602 struct skl_pipe_params *params = pipe->p_params; 603 struct skl_path_config *pconfig = &pipe->configs[0]; 604 struct skl_pipe_fmt *fmt = NULL; 605 bool in_fmt = false; 606 int i; 607 608 if (pipe->nr_cfgs == 0) { 609 skl_tplg_set_pipe_config_idx(pipe, 0); 610 return 0; 611 } 612 613 if (pipe->conn_type == SKL_PIPE_CONN_TYPE_NONE || pipe->nr_cfgs == 1) { 614 dev_dbg(skl->dev, "No conn_type or just 1 pathcfg, taking 0th for %d\n", 615 pipe->ppl_id); 616 skl_tplg_set_pipe_config_idx(pipe, 0); 617 return 0; 618 } 619 620 if ((pipe->conn_type == SKL_PIPE_CONN_TYPE_FE && 621 pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) || 622 (pipe->conn_type == SKL_PIPE_CONN_TYPE_BE && 623 pipe->direction == SNDRV_PCM_STREAM_CAPTURE)) 624 in_fmt = true; 625 626 for (i = 0; i < pipe->nr_cfgs; i++) { 627 pconfig = &pipe->configs[i]; 628 if (in_fmt) 629 fmt = &pconfig->in_fmt; 630 else 631 fmt = &pconfig->out_fmt; 632 633 if (CHECK_HW_PARAMS(params->ch, params->s_freq, params->s_fmt, 634 fmt->channels, fmt->freq, fmt->bps)) { 635 skl_tplg_set_pipe_config_idx(pipe, i); 636 dev_dbg(skl->dev, "Using pipe config: %d\n", i); 637 return 0; 638 } 639 } 640 641 dev_err(skl->dev, "Invalid pipe config: %d %d %d for pipe: %d\n", 642 params->ch, params->s_freq, params->s_fmt, pipe->ppl_id); 643 return -EINVAL; 644 } 645 646 /* 647 * Mixer module represents a pipeline. So in the Pre-PMU event of mixer we 648 * need create the pipeline. So we do following: 649 * - Create the pipeline 650 * - Initialize the modules in pipeline 651 * - finally bind all modules together 652 */ 653 static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 654 struct skl_dev *skl) 655 { 656 int ret; 657 struct skl_module_cfg *mconfig = w->priv; 658 struct skl_pipe_module *w_module; 659 struct skl_pipe *s_pipe = mconfig->pipe; 660 struct skl_module_cfg *src_module = NULL, *dst_module, *module; 661 struct skl_module_deferred_bind *modules; 662 663 ret = skl_tplg_get_pipe_config(skl, mconfig); 664 if (ret < 0) 665 return ret; 666 667 /* 668 * Create a list of modules for pipe. 669 * This list contains modules from source to sink 670 */ 671 ret = skl_create_pipeline(skl, mconfig->pipe); 672 if (ret < 0) 673 return ret; 674 675 /* Init all pipe modules from source to sink */ 676 ret = skl_tplg_init_pipe_modules(skl, s_pipe); 677 if (ret < 0) 678 return ret; 679 680 /* Bind modules from source to sink */ 681 list_for_each_entry(w_module, &s_pipe->w_list, node) { 682 dst_module = w_module->w->priv; 683 684 if (src_module == NULL) { 685 src_module = dst_module; 686 continue; 687 } 688 689 ret = skl_bind_modules(skl, src_module, dst_module); 690 if (ret < 0) 691 return ret; 692 693 src_module = dst_module; 694 } 695 696 /* 697 * When the destination module is initialized, check for these modules 698 * in deferred bind list. If found, bind them. 699 */ 700 list_for_each_entry(w_module, &s_pipe->w_list, node) { 701 if (list_empty(&skl->bind_list)) 702 break; 703 704 list_for_each_entry(modules, &skl->bind_list, node) { 705 module = w_module->w->priv; 706 if (modules->dst == module) 707 skl_bind_modules(skl, modules->src, 708 modules->dst); 709 } 710 } 711 712 return 0; 713 } 714 715 static int skl_fill_sink_instance_id(struct skl_dev *skl, u32 *params, 716 int size, struct skl_module_cfg *mcfg) 717 { 718 int i, pvt_id; 719 720 if (mcfg->m_type == SKL_MODULE_TYPE_KPB) { 721 struct skl_kpb_params *kpb_params = 722 (struct skl_kpb_params *)params; 723 struct skl_mod_inst_map *inst = kpb_params->u.map; 724 725 for (i = 0; i < kpb_params->num_modules; i++) { 726 pvt_id = skl_get_pvt_instance_id_map(skl, inst->mod_id, 727 inst->inst_id); 728 if (pvt_id < 0) 729 return -EINVAL; 730 731 inst->inst_id = pvt_id; 732 inst++; 733 } 734 } 735 736 return 0; 737 } 738 /* 739 * Some modules require params to be set after the module is bound to 740 * all pins connected. 741 * 742 * The module provider initializes set_param flag for such modules and we 743 * send params after binding 744 */ 745 static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w, 746 struct skl_module_cfg *mcfg, struct skl_dev *skl) 747 { 748 int i, ret; 749 struct skl_module_cfg *mconfig = w->priv; 750 const struct snd_kcontrol_new *k; 751 struct soc_bytes_ext *sb; 752 struct skl_algo_data *bc; 753 struct skl_specific_cfg *sp_cfg; 754 u32 *params; 755 756 /* 757 * check all out/in pins are in bind state. 758 * if so set the module param 759 */ 760 for (i = 0; i < mcfg->module->max_output_pins; i++) { 761 if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE) 762 return 0; 763 } 764 765 for (i = 0; i < mcfg->module->max_input_pins; i++) { 766 if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE) 767 return 0; 768 } 769 770 if (mconfig->formats_config[SKL_PARAM_BIND].caps_size > 0 && 771 mconfig->formats_config[SKL_PARAM_BIND].set_params == 772 SKL_PARAM_BIND) { 773 sp_cfg = &mconfig->formats_config[SKL_PARAM_BIND]; 774 ret = skl_set_module_params(skl, sp_cfg->caps, 775 sp_cfg->caps_size, 776 sp_cfg->param_id, mconfig); 777 if (ret < 0) 778 return ret; 779 } 780 781 for (i = 0; i < w->num_kcontrols; i++) { 782 k = &w->kcontrol_news[i]; 783 if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 784 sb = (void *) k->private_value; 785 bc = (struct skl_algo_data *)sb->dobj.private; 786 787 if (bc->set_params == SKL_PARAM_BIND) { 788 params = kmemdup(bc->params, bc->max, GFP_KERNEL); 789 if (!params) 790 return -ENOMEM; 791 792 skl_fill_sink_instance_id(skl, params, bc->max, 793 mconfig); 794 795 ret = skl_set_module_params(skl, params, 796 bc->max, bc->param_id, mconfig); 797 kfree(params); 798 799 if (ret < 0) 800 return ret; 801 } 802 } 803 } 804 805 return 0; 806 } 807 808 static int skl_get_module_id(struct skl_dev *skl, guid_t *uuid) 809 { 810 struct uuid_module *module; 811 812 list_for_each_entry(module, &skl->uuid_list, list) { 813 if (guid_equal(uuid, &module->uuid)) 814 return module->id; 815 } 816 817 return -EINVAL; 818 } 819 820 static int skl_tplg_find_moduleid_from_uuid(struct skl_dev *skl, 821 const struct snd_kcontrol_new *k) 822 { 823 struct soc_bytes_ext *sb = (void *) k->private_value; 824 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 825 struct skl_kpb_params *uuid_params, *params; 826 struct hdac_bus *bus = skl_to_bus(skl); 827 int i, size, module_id; 828 829 if (bc->set_params == SKL_PARAM_BIND && bc->max) { 830 uuid_params = (struct skl_kpb_params *)bc->params; 831 size = struct_size(params, u.map, uuid_params->num_modules); 832 833 params = devm_kzalloc(bus->dev, size, GFP_KERNEL); 834 if (!params) 835 return -ENOMEM; 836 837 params->num_modules = uuid_params->num_modules; 838 839 for (i = 0; i < uuid_params->num_modules; i++) { 840 module_id = skl_get_module_id(skl, 841 &uuid_params->u.map_uuid[i].mod_uuid); 842 if (module_id < 0) { 843 devm_kfree(bus->dev, params); 844 return -EINVAL; 845 } 846 847 params->u.map[i].mod_id = module_id; 848 params->u.map[i].inst_id = 849 uuid_params->u.map_uuid[i].inst_id; 850 } 851 852 devm_kfree(bus->dev, bc->params); 853 bc->params = (char *)params; 854 bc->max = size; 855 } 856 857 return 0; 858 } 859 860 /* 861 * Retrieve the module id from UUID mentioned in the 862 * post bind params 863 */ 864 void skl_tplg_add_moduleid_in_bind_params(struct skl_dev *skl, 865 struct snd_soc_dapm_widget *w) 866 { 867 struct skl_module_cfg *mconfig = w->priv; 868 int i; 869 870 /* 871 * Post bind params are used for only for KPB 872 * to set copier instances to drain the data 873 * in fast mode 874 */ 875 if (mconfig->m_type != SKL_MODULE_TYPE_KPB) 876 return; 877 878 for (i = 0; i < w->num_kcontrols; i++) 879 if ((w->kcontrol_news[i].access & 880 SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) && 881 (skl_tplg_find_moduleid_from_uuid(skl, 882 &w->kcontrol_news[i]) < 0)) 883 dev_err(skl->dev, 884 "%s: invalid kpb post bind params\n", 885 __func__); 886 } 887 888 static int skl_tplg_module_add_deferred_bind(struct skl_dev *skl, 889 struct skl_module_cfg *src, struct skl_module_cfg *dst) 890 { 891 struct skl_module_deferred_bind *m_list, *modules; 892 int i; 893 894 /* only supported for module with static pin connection */ 895 for (i = 0; i < dst->module->max_input_pins; i++) { 896 struct skl_module_pin *pin = &dst->m_in_pin[i]; 897 898 if (pin->is_dynamic) 899 continue; 900 901 if ((pin->id.module_id == src->id.module_id) && 902 (pin->id.instance_id == src->id.instance_id)) { 903 904 if (!list_empty(&skl->bind_list)) { 905 list_for_each_entry(modules, &skl->bind_list, node) { 906 if (modules->src == src && modules->dst == dst) 907 return 0; 908 } 909 } 910 911 m_list = kzalloc(sizeof(*m_list), GFP_KERNEL); 912 if (!m_list) 913 return -ENOMEM; 914 915 m_list->src = src; 916 m_list->dst = dst; 917 918 list_add(&m_list->node, &skl->bind_list); 919 } 920 } 921 922 return 0; 923 } 924 925 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, 926 struct skl_dev *skl, 927 struct snd_soc_dapm_widget *src_w, 928 struct skl_module_cfg *src_mconfig) 929 { 930 struct snd_soc_dapm_path *p; 931 struct snd_soc_dapm_widget *sink = NULL, *next_sink = NULL; 932 struct skl_module_cfg *sink_mconfig; 933 int ret; 934 935 snd_soc_dapm_widget_for_each_sink_path(w, p) { 936 if (!p->connect) 937 continue; 938 939 dev_dbg(skl->dev, 940 "%s: src widget=%s\n", __func__, w->name); 941 dev_dbg(skl->dev, 942 "%s: sink widget=%s\n", __func__, p->sink->name); 943 944 next_sink = p->sink; 945 946 if (!is_skl_dsp_widget_type(p->sink, skl->dev)) 947 return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig); 948 949 /* 950 * here we will check widgets in sink pipelines, so that 951 * can be any widgets type and we are only interested if 952 * they are ones used for SKL so check that first 953 */ 954 if ((p->sink->priv != NULL) && 955 is_skl_dsp_widget_type(p->sink, skl->dev)) { 956 957 sink = p->sink; 958 sink_mconfig = sink->priv; 959 960 /* 961 * Modules other than PGA leaf can be connected 962 * directly or via switch to a module in another 963 * pipeline. EX: reference path 964 * when the path is enabled, the dst module that needs 965 * to be bound may not be initialized. if the module is 966 * not initialized, add these modules in the deferred 967 * bind list and when the dst module is initialised, 968 * bind this module to the dst_module in deferred list. 969 */ 970 if (((src_mconfig->m_state == SKL_MODULE_INIT_DONE) 971 && (sink_mconfig->m_state == SKL_MODULE_UNINIT))) { 972 973 ret = skl_tplg_module_add_deferred_bind(skl, 974 src_mconfig, sink_mconfig); 975 976 if (ret < 0) 977 return ret; 978 979 } 980 981 982 if (src_mconfig->m_state == SKL_MODULE_UNINIT || 983 sink_mconfig->m_state == SKL_MODULE_UNINIT) 984 continue; 985 986 /* Bind source to sink, mixin is always source */ 987 ret = skl_bind_modules(skl, src_mconfig, sink_mconfig); 988 if (ret) 989 return ret; 990 991 /* set module params after bind */ 992 skl_tplg_set_module_bind_params(src_w, 993 src_mconfig, skl); 994 skl_tplg_set_module_bind_params(sink, 995 sink_mconfig, skl); 996 997 /* Start sinks pipe first */ 998 if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) { 999 if (sink_mconfig->pipe->conn_type != 1000 SKL_PIPE_CONN_TYPE_FE) 1001 ret = skl_run_pipe(skl, 1002 sink_mconfig->pipe); 1003 if (ret) 1004 return ret; 1005 } 1006 } 1007 } 1008 1009 if (!sink && next_sink) 1010 return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig); 1011 1012 return 0; 1013 } 1014 1015 /* 1016 * A PGA represents a module in a pipeline. So in the Pre-PMU event of PGA 1017 * we need to do following: 1018 * - Bind to sink pipeline 1019 * Since the sink pipes can be running and we don't get mixer event on 1020 * connect for already running mixer, we need to find the sink pipes 1021 * here and bind to them. This way dynamic connect works. 1022 * - Start sink pipeline, if not running 1023 * - Then run current pipe 1024 */ 1025 static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, 1026 struct skl_dev *skl) 1027 { 1028 struct skl_module_cfg *src_mconfig; 1029 int ret = 0; 1030 1031 src_mconfig = w->priv; 1032 1033 /* 1034 * find which sink it is connected to, bind with the sink, 1035 * if sink is not started, start sink pipe first, then start 1036 * this pipe 1037 */ 1038 ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig); 1039 if (ret) 1040 return ret; 1041 1042 /* Start source pipe last after starting all sinks */ 1043 if (src_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1044 return skl_run_pipe(skl, src_mconfig->pipe); 1045 1046 return 0; 1047 } 1048 1049 static struct snd_soc_dapm_widget *skl_get_src_dsp_widget( 1050 struct snd_soc_dapm_widget *w, struct skl_dev *skl) 1051 { 1052 struct snd_soc_dapm_path *p; 1053 struct snd_soc_dapm_widget *src_w = NULL; 1054 1055 snd_soc_dapm_widget_for_each_source_path(w, p) { 1056 src_w = p->source; 1057 if (!p->connect) 1058 continue; 1059 1060 dev_dbg(skl->dev, "sink widget=%s\n", w->name); 1061 dev_dbg(skl->dev, "src widget=%s\n", p->source->name); 1062 1063 /* 1064 * here we will check widgets in sink pipelines, so that can 1065 * be any widgets type and we are only interested if they are 1066 * ones used for SKL so check that first 1067 */ 1068 if ((p->source->priv != NULL) && 1069 is_skl_dsp_widget_type(p->source, skl->dev)) { 1070 return p->source; 1071 } 1072 } 1073 1074 if (src_w != NULL) 1075 return skl_get_src_dsp_widget(src_w, skl); 1076 1077 return NULL; 1078 } 1079 1080 /* 1081 * in the Post-PMU event of mixer we need to do following: 1082 * - Check if this pipe is running 1083 * - if not, then 1084 * - bind this pipeline to its source pipeline 1085 * if source pipe is already running, this means it is a dynamic 1086 * connection and we need to bind only to that pipe 1087 * - start this pipeline 1088 */ 1089 static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w, 1090 struct skl_dev *skl) 1091 { 1092 int ret = 0; 1093 struct snd_soc_dapm_widget *source, *sink; 1094 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1095 int src_pipe_started = 0; 1096 1097 sink = w; 1098 sink_mconfig = sink->priv; 1099 1100 /* 1101 * If source pipe is already started, that means source is driving 1102 * one more sink before this sink got connected, Since source is 1103 * started, bind this sink to source and start this pipe. 1104 */ 1105 source = skl_get_src_dsp_widget(w, skl); 1106 if (source != NULL) { 1107 src_mconfig = source->priv; 1108 sink_mconfig = sink->priv; 1109 src_pipe_started = 1; 1110 1111 /* 1112 * check pipe state, then no need to bind or start the 1113 * pipe 1114 */ 1115 if (src_mconfig->pipe->state != SKL_PIPE_STARTED) 1116 src_pipe_started = 0; 1117 } 1118 1119 if (src_pipe_started) { 1120 ret = skl_bind_modules(skl, src_mconfig, sink_mconfig); 1121 if (ret) 1122 return ret; 1123 1124 /* set module params after bind */ 1125 skl_tplg_set_module_bind_params(source, src_mconfig, skl); 1126 skl_tplg_set_module_bind_params(sink, sink_mconfig, skl); 1127 1128 if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE) 1129 ret = skl_run_pipe(skl, sink_mconfig->pipe); 1130 } 1131 1132 return ret; 1133 } 1134 1135 /* 1136 * in the Pre-PMD event of mixer we need to do following: 1137 * - Stop the pipe 1138 * - find the source connections and remove that from dapm_path_list 1139 * - unbind with source pipelines if still connected 1140 */ 1141 static int skl_tplg_mixer_dapm_pre_pmd_event(struct snd_soc_dapm_widget *w, 1142 struct skl_dev *skl) 1143 { 1144 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1145 int ret = 0, i; 1146 1147 sink_mconfig = w->priv; 1148 1149 /* Stop the pipe */ 1150 ret = skl_stop_pipe(skl, sink_mconfig->pipe); 1151 if (ret) 1152 return ret; 1153 1154 for (i = 0; i < sink_mconfig->module->max_input_pins; i++) { 1155 if (sink_mconfig->m_in_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1156 src_mconfig = sink_mconfig->m_in_pin[i].tgt_mcfg; 1157 if (!src_mconfig) 1158 continue; 1159 1160 ret = skl_unbind_modules(skl, 1161 src_mconfig, sink_mconfig); 1162 } 1163 } 1164 1165 return ret; 1166 } 1167 1168 /* 1169 * in the Post-PMD event of mixer we need to do following: 1170 * - Unbind the modules within the pipeline 1171 * - Delete the pipeline (modules are not required to be explicitly 1172 * deleted, pipeline delete is enough here 1173 */ 1174 static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1175 struct skl_dev *skl) 1176 { 1177 struct skl_module_cfg *mconfig = w->priv; 1178 struct skl_pipe_module *w_module; 1179 struct skl_module_cfg *src_module = NULL, *dst_module; 1180 struct skl_pipe *s_pipe = mconfig->pipe; 1181 struct skl_module_deferred_bind *modules, *tmp; 1182 1183 if (s_pipe->state == SKL_PIPE_INVALID) 1184 return -EINVAL; 1185 1186 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1187 if (list_empty(&skl->bind_list)) 1188 break; 1189 1190 src_module = w_module->w->priv; 1191 1192 list_for_each_entry_safe(modules, tmp, &skl->bind_list, node) { 1193 /* 1194 * When the destination module is deleted, Unbind the 1195 * modules from deferred bind list. 1196 */ 1197 if (modules->dst == src_module) { 1198 skl_unbind_modules(skl, modules->src, 1199 modules->dst); 1200 } 1201 1202 /* 1203 * When the source module is deleted, remove this entry 1204 * from the deferred bind list. 1205 */ 1206 if (modules->src == src_module) { 1207 list_del(&modules->node); 1208 modules->src = NULL; 1209 modules->dst = NULL; 1210 kfree(modules); 1211 } 1212 } 1213 } 1214 1215 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1216 dst_module = w_module->w->priv; 1217 1218 if (src_module == NULL) { 1219 src_module = dst_module; 1220 continue; 1221 } 1222 1223 skl_unbind_modules(skl, src_module, dst_module); 1224 src_module = dst_module; 1225 } 1226 1227 skl_delete_pipe(skl, mconfig->pipe); 1228 1229 list_for_each_entry(w_module, &s_pipe->w_list, node) { 1230 src_module = w_module->w->priv; 1231 src_module->m_state = SKL_MODULE_UNINIT; 1232 } 1233 1234 return skl_tplg_unload_pipe_modules(skl, s_pipe); 1235 } 1236 1237 /* 1238 * in the Post-PMD event of PGA we need to do following: 1239 * - Stop the pipeline 1240 * - In source pipe is connected, unbind with source pipelines 1241 */ 1242 static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, 1243 struct skl_dev *skl) 1244 { 1245 struct skl_module_cfg *src_mconfig, *sink_mconfig; 1246 int ret = 0, i; 1247 1248 src_mconfig = w->priv; 1249 1250 /* Stop the pipe since this is a mixin module */ 1251 ret = skl_stop_pipe(skl, src_mconfig->pipe); 1252 if (ret) 1253 return ret; 1254 1255 for (i = 0; i < src_mconfig->module->max_output_pins; i++) { 1256 if (src_mconfig->m_out_pin[i].pin_state == SKL_PIN_BIND_DONE) { 1257 sink_mconfig = src_mconfig->m_out_pin[i].tgt_mcfg; 1258 if (!sink_mconfig) 1259 continue; 1260 /* 1261 * This is a connecter and if path is found that means 1262 * unbind between source and sink has not happened yet 1263 */ 1264 ret = skl_unbind_modules(skl, src_mconfig, 1265 sink_mconfig); 1266 } 1267 } 1268 1269 return ret; 1270 } 1271 1272 /* 1273 * In modelling, we assume there will be ONLY one mixer in a pipeline. If a 1274 * second one is required that is created as another pipe entity. 1275 * The mixer is responsible for pipe management and represent a pipeline 1276 * instance 1277 */ 1278 static int skl_tplg_mixer_event(struct snd_soc_dapm_widget *w, 1279 struct snd_kcontrol *k, int event) 1280 { 1281 struct snd_soc_dapm_context *dapm = w->dapm; 1282 struct skl_dev *skl = get_skl_ctx(dapm->dev); 1283 1284 switch (event) { 1285 case SND_SOC_DAPM_PRE_PMU: 1286 return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); 1287 1288 case SND_SOC_DAPM_POST_PMU: 1289 return skl_tplg_mixer_dapm_post_pmu_event(w, skl); 1290 1291 case SND_SOC_DAPM_PRE_PMD: 1292 return skl_tplg_mixer_dapm_pre_pmd_event(w, skl); 1293 1294 case SND_SOC_DAPM_POST_PMD: 1295 return skl_tplg_mixer_dapm_post_pmd_event(w, skl); 1296 } 1297 1298 return 0; 1299 } 1300 1301 /* 1302 * In modelling, we assumed rest of the modules in pipeline are PGA. But we 1303 * are interested in last PGA (leaf PGA) in a pipeline to disconnect with 1304 * the sink when it is running (two FE to one BE or one FE to two BE) 1305 * scenarios 1306 */ 1307 static int skl_tplg_pga_event(struct snd_soc_dapm_widget *w, 1308 struct snd_kcontrol *k, int event) 1309 1310 { 1311 struct snd_soc_dapm_context *dapm = w->dapm; 1312 struct skl_dev *skl = get_skl_ctx(dapm->dev); 1313 1314 switch (event) { 1315 case SND_SOC_DAPM_PRE_PMU: 1316 return skl_tplg_pga_dapm_pre_pmu_event(w, skl); 1317 1318 case SND_SOC_DAPM_POST_PMD: 1319 return skl_tplg_pga_dapm_post_pmd_event(w, skl); 1320 } 1321 1322 return 0; 1323 } 1324 1325 static int skl_tplg_multi_config_set_get(struct snd_kcontrol *kcontrol, 1326 struct snd_ctl_elem_value *ucontrol, 1327 bool is_set) 1328 { 1329 struct snd_soc_component *component = 1330 snd_soc_kcontrol_component(kcontrol); 1331 struct hdac_bus *bus = snd_soc_component_get_drvdata(component); 1332 struct skl_dev *skl = bus_to_skl(bus); 1333 struct skl_pipeline *ppl; 1334 struct skl_pipe *pipe = NULL; 1335 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1336 u32 *pipe_id; 1337 1338 if (!ec) 1339 return -EINVAL; 1340 1341 if (is_set && ucontrol->value.enumerated.item[0] > ec->items) 1342 return -EINVAL; 1343 1344 pipe_id = ec->dobj.private; 1345 1346 list_for_each_entry(ppl, &skl->ppl_list, node) { 1347 if (ppl->pipe->ppl_id == *pipe_id) { 1348 pipe = ppl->pipe; 1349 break; 1350 } 1351 } 1352 if (!pipe) 1353 return -EIO; 1354 1355 if (is_set) 1356 skl_tplg_set_pipe_config_idx(pipe, ucontrol->value.enumerated.item[0]); 1357 else 1358 ucontrol->value.enumerated.item[0] = pipe->cur_config_idx; 1359 1360 return 0; 1361 } 1362 1363 static int skl_tplg_multi_config_get(struct snd_kcontrol *kcontrol, 1364 struct snd_ctl_elem_value *ucontrol) 1365 { 1366 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false); 1367 } 1368 1369 static int skl_tplg_multi_config_set(struct snd_kcontrol *kcontrol, 1370 struct snd_ctl_elem_value *ucontrol) 1371 { 1372 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true); 1373 } 1374 1375 static int skl_tplg_multi_config_get_dmic(struct snd_kcontrol *kcontrol, 1376 struct snd_ctl_elem_value *ucontrol) 1377 { 1378 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, false); 1379 } 1380 1381 static int skl_tplg_multi_config_set_dmic(struct snd_kcontrol *kcontrol, 1382 struct snd_ctl_elem_value *ucontrol) 1383 { 1384 return skl_tplg_multi_config_set_get(kcontrol, ucontrol, true); 1385 } 1386 1387 static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol, 1388 unsigned int __user *data, unsigned int size) 1389 { 1390 struct soc_bytes_ext *sb = 1391 (struct soc_bytes_ext *)kcontrol->private_value; 1392 struct skl_algo_data *bc = (struct skl_algo_data *)sb->dobj.private; 1393 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1394 struct skl_module_cfg *mconfig = w->priv; 1395 struct skl_dev *skl = get_skl_ctx(w->dapm->dev); 1396 1397 if (w->power) 1398 skl_get_module_params(skl, (u32 *)bc->params, 1399 bc->size, bc->param_id, mconfig); 1400 1401 /* decrement size for TLV header */ 1402 size -= 2 * sizeof(u32); 1403 1404 /* check size as we don't want to send kernel data */ 1405 if (size > bc->max) 1406 size = bc->max; 1407 1408 if (bc->params) { 1409 if (copy_to_user(data, &bc->param_id, sizeof(u32))) 1410 return -EFAULT; 1411 if (copy_to_user(data + 1, &size, sizeof(u32))) 1412 return -EFAULT; 1413 if (copy_to_user(data + 2, bc->params, size)) 1414 return -EFAULT; 1415 } 1416 1417 return 0; 1418 } 1419 1420 #define SKL_PARAM_VENDOR_ID 0xff 1421 1422 static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol, 1423 const unsigned int __user *data, unsigned int size) 1424 { 1425 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1426 struct skl_module_cfg *mconfig = w->priv; 1427 struct soc_bytes_ext *sb = 1428 (struct soc_bytes_ext *)kcontrol->private_value; 1429 struct skl_algo_data *ac = (struct skl_algo_data *)sb->dobj.private; 1430 struct skl_dev *skl = get_skl_ctx(w->dapm->dev); 1431 1432 if (ac->params) { 1433 if (size > ac->max) 1434 return -EINVAL; 1435 ac->size = size; 1436 1437 if (copy_from_user(ac->params, data, size)) 1438 return -EFAULT; 1439 1440 if (w->power) 1441 return skl_set_module_params(skl, 1442 (u32 *)ac->params, ac->size, 1443 ac->param_id, mconfig); 1444 } 1445 1446 return 0; 1447 } 1448 1449 static int skl_tplg_mic_control_get(struct snd_kcontrol *kcontrol, 1450 struct snd_ctl_elem_value *ucontrol) 1451 { 1452 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1453 struct skl_module_cfg *mconfig = w->priv; 1454 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1455 u32 ch_type = *((u32 *)ec->dobj.private); 1456 1457 if (mconfig->dmic_ch_type == ch_type) 1458 ucontrol->value.enumerated.item[0] = 1459 mconfig->dmic_ch_combo_index; 1460 else 1461 ucontrol->value.enumerated.item[0] = 0; 1462 1463 return 0; 1464 } 1465 1466 static int skl_fill_mic_sel_params(struct skl_module_cfg *mconfig, 1467 struct skl_mic_sel_config *mic_cfg, struct device *dev) 1468 { 1469 struct skl_specific_cfg *sp_cfg = 1470 &mconfig->formats_config[SKL_PARAM_INIT]; 1471 1472 sp_cfg->caps_size = sizeof(struct skl_mic_sel_config); 1473 sp_cfg->set_params = SKL_PARAM_SET; 1474 sp_cfg->param_id = 0x00; 1475 if (!sp_cfg->caps) { 1476 sp_cfg->caps = devm_kzalloc(dev, sp_cfg->caps_size, GFP_KERNEL); 1477 if (!sp_cfg->caps) 1478 return -ENOMEM; 1479 } 1480 1481 mic_cfg->mic_switch = SKL_MIC_SEL_SWITCH; 1482 mic_cfg->flags = 0; 1483 memcpy(sp_cfg->caps, mic_cfg, sp_cfg->caps_size); 1484 1485 return 0; 1486 } 1487 1488 static int skl_tplg_mic_control_set(struct snd_kcontrol *kcontrol, 1489 struct snd_ctl_elem_value *ucontrol) 1490 { 1491 struct snd_soc_dapm_widget *w = snd_soc_dapm_kcontrol_widget(kcontrol); 1492 struct skl_module_cfg *mconfig = w->priv; 1493 struct skl_mic_sel_config mic_cfg = {0}; 1494 struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value; 1495 u32 ch_type = *((u32 *)ec->dobj.private); 1496 const int *list; 1497 u8 in_ch, out_ch, index; 1498 1499 mconfig->dmic_ch_type = ch_type; 1500 mconfig->dmic_ch_combo_index = ucontrol->value.enumerated.item[0]; 1501 1502 /* enum control index 0 is INVALID, so no channels to be set */ 1503 if (mconfig->dmic_ch_combo_index == 0) 1504 return 0; 1505 1506 /* No valid channel selection map for index 0, so offset by 1 */ 1507 index = mconfig->dmic_ch_combo_index - 1; 1508 1509 switch (ch_type) { 1510 case SKL_CH_MONO: 1511 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_mono_list)) 1512 return -EINVAL; 1513 1514 list = &mic_mono_list[index]; 1515 break; 1516 1517 case SKL_CH_STEREO: 1518 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_stereo_list)) 1519 return -EINVAL; 1520 1521 list = mic_stereo_list[index]; 1522 break; 1523 1524 case SKL_CH_TRIO: 1525 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_trio_list)) 1526 return -EINVAL; 1527 1528 list = mic_trio_list[index]; 1529 break; 1530 1531 case SKL_CH_QUATRO: 1532 if (mconfig->dmic_ch_combo_index > ARRAY_SIZE(mic_quatro_list)) 1533 return -EINVAL; 1534 1535 list = mic_quatro_list[index]; 1536 break; 1537 1538 default: 1539 dev_err(w->dapm->dev, 1540 "Invalid channel %d for mic_select module\n", 1541 ch_type); 1542 return -EINVAL; 1543 1544 } 1545 1546 /* channel type enum map to number of chanels for that type */ 1547 for (out_ch = 0; out_ch < ch_type; out_ch++) { 1548 in_ch = list[out_ch]; 1549 mic_cfg.blob[out_ch][in_ch] = SKL_DEFAULT_MIC_SEL_GAIN; 1550 } 1551 1552 return skl_fill_mic_sel_params(mconfig, &mic_cfg, w->dapm->dev); 1553 } 1554 1555 /* 1556 * Fill the dma id for host and link. In case of passthrough 1557 * pipeline, this will both host and link in the same 1558 * pipeline, so need to copy the link and host based on dev_type 1559 */ 1560 static void skl_tplg_fill_dma_id(struct skl_module_cfg *mcfg, 1561 struct skl_pipe_params *params) 1562 { 1563 struct skl_pipe *pipe = mcfg->pipe; 1564 1565 if (pipe->passthru) { 1566 switch (mcfg->dev_type) { 1567 case SKL_DEVICE_HDALINK: 1568 pipe->p_params->link_dma_id = params->link_dma_id; 1569 pipe->p_params->link_index = params->link_index; 1570 pipe->p_params->link_bps = params->link_bps; 1571 break; 1572 1573 case SKL_DEVICE_HDAHOST: 1574 pipe->p_params->host_dma_id = params->host_dma_id; 1575 pipe->p_params->host_bps = params->host_bps; 1576 break; 1577 1578 default: 1579 break; 1580 } 1581 pipe->p_params->s_fmt = params->s_fmt; 1582 pipe->p_params->ch = params->ch; 1583 pipe->p_params->s_freq = params->s_freq; 1584 pipe->p_params->stream = params->stream; 1585 pipe->p_params->format = params->format; 1586 1587 } else { 1588 memcpy(pipe->p_params, params, sizeof(*params)); 1589 } 1590 } 1591 1592 /* 1593 * The FE params are passed by hw_params of the DAI. 1594 * On hw_params, the params are stored in Gateway module of the FE and we 1595 * need to calculate the format in DSP module configuration, that 1596 * conversion is done here 1597 */ 1598 int skl_tplg_update_pipe_params(struct device *dev, 1599 struct skl_module_cfg *mconfig, 1600 struct skl_pipe_params *params) 1601 { 1602 struct skl_module_res *res; 1603 struct skl_dev *skl = get_skl_ctx(dev); 1604 struct skl_module_fmt *format = NULL; 1605 u8 cfg_idx = mconfig->pipe->cur_config_idx; 1606 1607 res = &mconfig->module->resources[mconfig->res_idx]; 1608 skl_tplg_fill_dma_id(mconfig, params); 1609 mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx; 1610 mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx; 1611 1612 if (skl->nr_modules) 1613 return 0; 1614 1615 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) 1616 format = &mconfig->module->formats[mconfig->fmt_idx].inputs[0].fmt; 1617 else 1618 format = &mconfig->module->formats[mconfig->fmt_idx].outputs[0].fmt; 1619 1620 /* set the hw_params */ 1621 format->s_freq = params->s_freq; 1622 format->channels = params->ch; 1623 format->valid_bit_depth = skl_get_bit_depth(params->s_fmt); 1624 1625 /* 1626 * 16 bit is 16 bit container whereas 24 bit is in 32 bit 1627 * container so update bit depth accordingly 1628 */ 1629 switch (format->valid_bit_depth) { 1630 case SKL_DEPTH_16BIT: 1631 format->bit_depth = format->valid_bit_depth; 1632 break; 1633 1634 case SKL_DEPTH_24BIT: 1635 case SKL_DEPTH_32BIT: 1636 format->bit_depth = SKL_DEPTH_32BIT; 1637 break; 1638 1639 default: 1640 dev_err(dev, "Invalid bit depth %x for pipe\n", 1641 format->valid_bit_depth); 1642 return -EINVAL; 1643 } 1644 1645 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1646 res->ibs = (format->s_freq / 1000) * 1647 (format->channels) * 1648 (format->bit_depth >> 3); 1649 } else { 1650 res->obs = (format->s_freq / 1000) * 1651 (format->channels) * 1652 (format->bit_depth >> 3); 1653 } 1654 1655 return 0; 1656 } 1657 1658 /* 1659 * Query the module config for the FE DAI 1660 * This is used to find the hw_params set for that DAI and apply to FE 1661 * pipeline 1662 */ 1663 struct skl_module_cfg * 1664 skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream) 1665 { 1666 struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(dai, stream); 1667 struct snd_soc_dapm_path *p = NULL; 1668 1669 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1670 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1671 if (p->connect && p->sink->power && 1672 !is_skl_dsp_widget_type(p->sink, dai->dev)) 1673 continue; 1674 1675 if (p->sink->priv) { 1676 dev_dbg(dai->dev, "set params for %s\n", 1677 p->sink->name); 1678 return p->sink->priv; 1679 } 1680 } 1681 } else { 1682 snd_soc_dapm_widget_for_each_source_path(w, p) { 1683 if (p->connect && p->source->power && 1684 !is_skl_dsp_widget_type(p->source, dai->dev)) 1685 continue; 1686 1687 if (p->source->priv) { 1688 dev_dbg(dai->dev, "set params for %s\n", 1689 p->source->name); 1690 return p->source->priv; 1691 } 1692 } 1693 } 1694 1695 return NULL; 1696 } 1697 1698 static struct skl_module_cfg *skl_get_mconfig_pb_cpr( 1699 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1700 { 1701 struct snd_soc_dapm_path *p; 1702 struct skl_module_cfg *mconfig = NULL; 1703 1704 snd_soc_dapm_widget_for_each_source_path(w, p) { 1705 if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) { 1706 if (p->connect && 1707 (p->sink->id == snd_soc_dapm_aif_out) && 1708 p->source->priv) { 1709 mconfig = p->source->priv; 1710 return mconfig; 1711 } 1712 mconfig = skl_get_mconfig_pb_cpr(dai, p->source); 1713 if (mconfig) 1714 return mconfig; 1715 } 1716 } 1717 return mconfig; 1718 } 1719 1720 static struct skl_module_cfg *skl_get_mconfig_cap_cpr( 1721 struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w) 1722 { 1723 struct snd_soc_dapm_path *p; 1724 struct skl_module_cfg *mconfig = NULL; 1725 1726 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1727 if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) { 1728 if (p->connect && 1729 (p->source->id == snd_soc_dapm_aif_in) && 1730 p->sink->priv) { 1731 mconfig = p->sink->priv; 1732 return mconfig; 1733 } 1734 mconfig = skl_get_mconfig_cap_cpr(dai, p->sink); 1735 if (mconfig) 1736 return mconfig; 1737 } 1738 } 1739 return mconfig; 1740 } 1741 1742 struct skl_module_cfg * 1743 skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream) 1744 { 1745 struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(dai, stream); 1746 struct skl_module_cfg *mconfig; 1747 1748 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1749 mconfig = skl_get_mconfig_pb_cpr(dai, w); 1750 } else { 1751 mconfig = skl_get_mconfig_cap_cpr(dai, w); 1752 } 1753 return mconfig; 1754 } 1755 1756 static u8 skl_tplg_be_link_type(int dev_type) 1757 { 1758 int ret; 1759 1760 switch (dev_type) { 1761 case SKL_DEVICE_BT: 1762 ret = NHLT_LINK_SSP; 1763 break; 1764 1765 case SKL_DEVICE_DMIC: 1766 ret = NHLT_LINK_DMIC; 1767 break; 1768 1769 case SKL_DEVICE_I2S: 1770 ret = NHLT_LINK_SSP; 1771 break; 1772 1773 case SKL_DEVICE_HDALINK: 1774 ret = NHLT_LINK_HDA; 1775 break; 1776 1777 default: 1778 ret = NHLT_LINK_INVALID; 1779 break; 1780 } 1781 1782 return ret; 1783 } 1784 1785 /* 1786 * Fill the BE gateway parameters 1787 * The BE gateway expects a blob of parameters which are kept in the ACPI 1788 * NHLT blob, so query the blob for interface type (i2s/pdm) and instance. 1789 * The port can have multiple settings so pick based on the pipeline 1790 * parameters 1791 */ 1792 static int skl_tplg_be_fill_pipe_params(struct snd_soc_dai *dai, 1793 struct skl_module_cfg *mconfig, 1794 struct skl_pipe_params *params) 1795 { 1796 struct nhlt_specific_cfg *cfg; 1797 struct skl_pipe *pipe = mconfig->pipe; 1798 struct skl_pipe_params save = *pipe->p_params; 1799 struct skl_pipe_fmt *pipe_fmt; 1800 struct skl_dev *skl = get_skl_ctx(dai->dev); 1801 int link_type = skl_tplg_be_link_type(mconfig->dev_type); 1802 u8 dev_type = skl_tplg_be_dev_type(mconfig->dev_type); 1803 int ret; 1804 1805 skl_tplg_fill_dma_id(mconfig, params); 1806 1807 if (link_type == NHLT_LINK_HDA) 1808 return 0; 1809 1810 *pipe->p_params = *params; 1811 ret = skl_tplg_get_pipe_config(skl, mconfig); 1812 if (ret) 1813 goto err; 1814 1815 dev_dbg(skl->dev, "%s using pipe config: %d\n", __func__, pipe->cur_config_idx); 1816 if (pipe->direction == SNDRV_PCM_STREAM_PLAYBACK) 1817 pipe_fmt = &pipe->configs[pipe->cur_config_idx].out_fmt; 1818 else 1819 pipe_fmt = &pipe->configs[pipe->cur_config_idx].in_fmt; 1820 1821 /* update the blob based on virtual bus_id*/ 1822 cfg = intel_nhlt_get_endpoint_blob(dai->dev, skl->nhlt, 1823 mconfig->vbus_id, link_type, 1824 pipe_fmt->bps, params->s_cont, 1825 pipe_fmt->channels, pipe_fmt->freq, 1826 pipe->direction, dev_type); 1827 if (cfg) { 1828 mconfig->formats_config[SKL_PARAM_INIT].caps_size = cfg->size; 1829 mconfig->formats_config[SKL_PARAM_INIT].caps = (u32 *)&cfg->caps; 1830 } else { 1831 dev_err(dai->dev, "Blob NULL for id:%d type:%d dirn:%d ch:%d, freq:%d, fmt:%d\n", 1832 mconfig->vbus_id, link_type, params->stream, 1833 params->ch, params->s_freq, params->s_fmt); 1834 ret = -EINVAL; 1835 goto err; 1836 } 1837 1838 return 0; 1839 1840 err: 1841 *pipe->p_params = save; 1842 return ret; 1843 } 1844 1845 static int skl_tplg_be_set_src_pipe_params(struct snd_soc_dai *dai, 1846 struct snd_soc_dapm_widget *w, 1847 struct skl_pipe_params *params) 1848 { 1849 struct snd_soc_dapm_path *p; 1850 int ret = -EIO; 1851 1852 snd_soc_dapm_widget_for_each_source_path(w, p) { 1853 if (p->connect && is_skl_dsp_widget_type(p->source, dai->dev) && 1854 p->source->priv) { 1855 1856 ret = skl_tplg_be_fill_pipe_params(dai, 1857 p->source->priv, params); 1858 if (ret < 0) 1859 return ret; 1860 } else { 1861 ret = skl_tplg_be_set_src_pipe_params(dai, 1862 p->source, params); 1863 if (ret < 0) 1864 return ret; 1865 } 1866 } 1867 1868 return ret; 1869 } 1870 1871 static int skl_tplg_be_set_sink_pipe_params(struct snd_soc_dai *dai, 1872 struct snd_soc_dapm_widget *w, struct skl_pipe_params *params) 1873 { 1874 struct snd_soc_dapm_path *p; 1875 int ret = -EIO; 1876 1877 snd_soc_dapm_widget_for_each_sink_path(w, p) { 1878 if (p->connect && is_skl_dsp_widget_type(p->sink, dai->dev) && 1879 p->sink->priv) { 1880 1881 ret = skl_tplg_be_fill_pipe_params(dai, 1882 p->sink->priv, params); 1883 if (ret < 0) 1884 return ret; 1885 } else { 1886 ret = skl_tplg_be_set_sink_pipe_params( 1887 dai, p->sink, params); 1888 if (ret < 0) 1889 return ret; 1890 } 1891 } 1892 1893 return ret; 1894 } 1895 1896 /* 1897 * BE hw_params can be a source parameters (capture) or sink parameters 1898 * (playback). Based on sink and source we need to either find the source 1899 * list or the sink list and set the pipeline parameters 1900 */ 1901 int skl_tplg_be_update_params(struct snd_soc_dai *dai, 1902 struct skl_pipe_params *params) 1903 { 1904 struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(dai, params->stream); 1905 1906 if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) { 1907 return skl_tplg_be_set_src_pipe_params(dai, w, params); 1908 } else { 1909 return skl_tplg_be_set_sink_pipe_params(dai, w, params); 1910 } 1911 } 1912 1913 static const struct snd_soc_tplg_widget_events skl_tplg_widget_ops[] = { 1914 {SKL_MIXER_EVENT, skl_tplg_mixer_event}, 1915 {SKL_VMIXER_EVENT, skl_tplg_mixer_event}, 1916 {SKL_PGA_EVENT, skl_tplg_pga_event}, 1917 }; 1918 1919 static const struct snd_soc_tplg_bytes_ext_ops skl_tlv_ops[] = { 1920 {SKL_CONTROL_TYPE_BYTE_TLV, skl_tplg_tlv_control_get, 1921 skl_tplg_tlv_control_set}, 1922 }; 1923 1924 static const struct snd_soc_tplg_kcontrol_ops skl_tplg_kcontrol_ops[] = { 1925 { 1926 .id = SKL_CONTROL_TYPE_MIC_SELECT, 1927 .get = skl_tplg_mic_control_get, 1928 .put = skl_tplg_mic_control_set, 1929 }, 1930 { 1931 .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT, 1932 .get = skl_tplg_multi_config_get, 1933 .put = skl_tplg_multi_config_set, 1934 }, 1935 { 1936 .id = SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC, 1937 .get = skl_tplg_multi_config_get_dmic, 1938 .put = skl_tplg_multi_config_set_dmic, 1939 } 1940 }; 1941 1942 static int skl_tplg_fill_pipe_cfg(struct device *dev, 1943 struct skl_pipe *pipe, u32 tkn, 1944 u32 tkn_val, int conf_idx, int dir) 1945 { 1946 struct skl_pipe_fmt *fmt; 1947 struct skl_path_config *config; 1948 1949 switch (dir) { 1950 case SKL_DIR_IN: 1951 fmt = &pipe->configs[conf_idx].in_fmt; 1952 break; 1953 1954 case SKL_DIR_OUT: 1955 fmt = &pipe->configs[conf_idx].out_fmt; 1956 break; 1957 1958 default: 1959 dev_err(dev, "Invalid direction: %d\n", dir); 1960 return -EINVAL; 1961 } 1962 1963 config = &pipe->configs[conf_idx]; 1964 1965 switch (tkn) { 1966 case SKL_TKN_U32_CFG_FREQ: 1967 fmt->freq = tkn_val; 1968 break; 1969 1970 case SKL_TKN_U8_CFG_CHAN: 1971 fmt->channels = tkn_val; 1972 break; 1973 1974 case SKL_TKN_U8_CFG_BPS: 1975 fmt->bps = tkn_val; 1976 break; 1977 1978 case SKL_TKN_U32_PATH_MEM_PGS: 1979 config->mem_pages = tkn_val; 1980 break; 1981 1982 default: 1983 dev_err(dev, "Invalid token config: %d\n", tkn); 1984 return -EINVAL; 1985 } 1986 1987 return 0; 1988 } 1989 1990 static int skl_tplg_fill_pipe_tkn(struct device *dev, 1991 struct skl_pipe *pipe, u32 tkn, 1992 u32 tkn_val) 1993 { 1994 1995 switch (tkn) { 1996 case SKL_TKN_U32_PIPE_CONN_TYPE: 1997 pipe->conn_type = tkn_val; 1998 break; 1999 2000 case SKL_TKN_U32_PIPE_PRIORITY: 2001 pipe->pipe_priority = tkn_val; 2002 break; 2003 2004 case SKL_TKN_U32_PIPE_MEM_PGS: 2005 pipe->memory_pages = tkn_val; 2006 break; 2007 2008 case SKL_TKN_U32_PMODE: 2009 pipe->lp_mode = tkn_val; 2010 break; 2011 2012 case SKL_TKN_U32_PIPE_DIRECTION: 2013 pipe->direction = tkn_val; 2014 break; 2015 2016 case SKL_TKN_U32_NUM_CONFIGS: 2017 pipe->nr_cfgs = tkn_val; 2018 break; 2019 2020 default: 2021 dev_err(dev, "Token not handled %d\n", tkn); 2022 return -EINVAL; 2023 } 2024 2025 return 0; 2026 } 2027 2028 /* 2029 * Add pipeline by parsing the relevant tokens 2030 * Return an existing pipe if the pipe already exists. 2031 */ 2032 static int skl_tplg_add_pipe(struct device *dev, 2033 struct skl_module_cfg *mconfig, struct skl_dev *skl, 2034 struct snd_soc_tplg_vendor_value_elem *tkn_elem) 2035 { 2036 struct skl_pipeline *ppl; 2037 struct skl_pipe *pipe; 2038 struct skl_pipe_params *params; 2039 2040 list_for_each_entry(ppl, &skl->ppl_list, node) { 2041 if (ppl->pipe->ppl_id == tkn_elem->value) { 2042 mconfig->pipe = ppl->pipe; 2043 return -EEXIST; 2044 } 2045 } 2046 2047 ppl = devm_kzalloc(dev, sizeof(*ppl), GFP_KERNEL); 2048 if (!ppl) 2049 return -ENOMEM; 2050 2051 pipe = devm_kzalloc(dev, sizeof(*pipe), GFP_KERNEL); 2052 if (!pipe) 2053 return -ENOMEM; 2054 2055 params = devm_kzalloc(dev, sizeof(*params), GFP_KERNEL); 2056 if (!params) 2057 return -ENOMEM; 2058 2059 pipe->p_params = params; 2060 pipe->ppl_id = tkn_elem->value; 2061 INIT_LIST_HEAD(&pipe->w_list); 2062 2063 ppl->pipe = pipe; 2064 list_add(&ppl->node, &skl->ppl_list); 2065 2066 mconfig->pipe = pipe; 2067 mconfig->pipe->state = SKL_PIPE_INVALID; 2068 2069 return 0; 2070 } 2071 2072 static int skl_tplg_get_uuid(struct device *dev, guid_t *guid, 2073 struct snd_soc_tplg_vendor_uuid_elem *uuid_tkn) 2074 { 2075 if (uuid_tkn->token == SKL_TKN_UUID) { 2076 import_guid(guid, uuid_tkn->uuid); 2077 return 0; 2078 } 2079 2080 dev_err(dev, "Not an UUID token %d\n", uuid_tkn->token); 2081 2082 return -EINVAL; 2083 } 2084 2085 static int skl_tplg_fill_pin(struct device *dev, 2086 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2087 struct skl_module_pin *m_pin, 2088 int pin_index) 2089 { 2090 int ret; 2091 2092 switch (tkn_elem->token) { 2093 case SKL_TKN_U32_PIN_MOD_ID: 2094 m_pin[pin_index].id.module_id = tkn_elem->value; 2095 break; 2096 2097 case SKL_TKN_U32_PIN_INST_ID: 2098 m_pin[pin_index].id.instance_id = tkn_elem->value; 2099 break; 2100 2101 case SKL_TKN_UUID: 2102 ret = skl_tplg_get_uuid(dev, &m_pin[pin_index].id.mod_uuid, 2103 (struct snd_soc_tplg_vendor_uuid_elem *)tkn_elem); 2104 if (ret < 0) 2105 return ret; 2106 2107 break; 2108 2109 default: 2110 dev_err(dev, "%d Not a pin token\n", tkn_elem->token); 2111 return -EINVAL; 2112 } 2113 2114 return 0; 2115 } 2116 2117 /* 2118 * Parse for pin config specific tokens to fill up the 2119 * module private data 2120 */ 2121 static int skl_tplg_fill_pins_info(struct device *dev, 2122 struct skl_module_cfg *mconfig, 2123 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2124 int dir, int pin_count) 2125 { 2126 int ret; 2127 struct skl_module_pin *m_pin; 2128 2129 switch (dir) { 2130 case SKL_DIR_IN: 2131 m_pin = mconfig->m_in_pin; 2132 break; 2133 2134 case SKL_DIR_OUT: 2135 m_pin = mconfig->m_out_pin; 2136 break; 2137 2138 default: 2139 dev_err(dev, "Invalid direction value\n"); 2140 return -EINVAL; 2141 } 2142 2143 ret = skl_tplg_fill_pin(dev, tkn_elem, m_pin, pin_count); 2144 if (ret < 0) 2145 return ret; 2146 2147 m_pin[pin_count].in_use = false; 2148 m_pin[pin_count].pin_state = SKL_PIN_UNBIND; 2149 2150 return 0; 2151 } 2152 2153 /* 2154 * Fill up input/output module config format based 2155 * on the direction 2156 */ 2157 static int skl_tplg_fill_fmt(struct device *dev, 2158 struct skl_module_fmt *dst_fmt, 2159 u32 tkn, u32 value) 2160 { 2161 switch (tkn) { 2162 case SKL_TKN_U32_FMT_CH: 2163 dst_fmt->channels = value; 2164 break; 2165 2166 case SKL_TKN_U32_FMT_FREQ: 2167 dst_fmt->s_freq = value; 2168 break; 2169 2170 case SKL_TKN_U32_FMT_BIT_DEPTH: 2171 dst_fmt->bit_depth = value; 2172 break; 2173 2174 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2175 dst_fmt->valid_bit_depth = value; 2176 break; 2177 2178 case SKL_TKN_U32_FMT_CH_CONFIG: 2179 dst_fmt->ch_cfg = value; 2180 break; 2181 2182 case SKL_TKN_U32_FMT_INTERLEAVE: 2183 dst_fmt->interleaving_style = value; 2184 break; 2185 2186 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2187 dst_fmt->sample_type = value; 2188 break; 2189 2190 case SKL_TKN_U32_FMT_CH_MAP: 2191 dst_fmt->ch_map = value; 2192 break; 2193 2194 default: 2195 dev_err(dev, "Invalid token %d\n", tkn); 2196 return -EINVAL; 2197 } 2198 2199 return 0; 2200 } 2201 2202 static int skl_tplg_widget_fill_fmt(struct device *dev, 2203 struct skl_module_iface *fmt, 2204 u32 tkn, u32 val, u32 dir, int fmt_idx) 2205 { 2206 struct skl_module_fmt *dst_fmt; 2207 2208 if (!fmt) 2209 return -EINVAL; 2210 2211 switch (dir) { 2212 case SKL_DIR_IN: 2213 dst_fmt = &fmt->inputs[fmt_idx].fmt; 2214 break; 2215 2216 case SKL_DIR_OUT: 2217 dst_fmt = &fmt->outputs[fmt_idx].fmt; 2218 break; 2219 2220 default: 2221 dev_err(dev, "Invalid direction: %d\n", dir); 2222 return -EINVAL; 2223 } 2224 2225 return skl_tplg_fill_fmt(dev, dst_fmt, tkn, val); 2226 } 2227 2228 static void skl_tplg_fill_pin_dynamic_val( 2229 struct skl_module_pin *mpin, u32 pin_count, u32 value) 2230 { 2231 int i; 2232 2233 for (i = 0; i < pin_count; i++) 2234 mpin[i].is_dynamic = value; 2235 } 2236 2237 /* 2238 * Resource table in the manifest has pin specific resources 2239 * like pin and pin buffer size 2240 */ 2241 static int skl_tplg_manifest_pin_res_tkn(struct device *dev, 2242 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2243 struct skl_module_res *res, int pin_idx, int dir) 2244 { 2245 struct skl_module_pin_resources *m_pin; 2246 2247 switch (dir) { 2248 case SKL_DIR_IN: 2249 m_pin = &res->input[pin_idx]; 2250 break; 2251 2252 case SKL_DIR_OUT: 2253 m_pin = &res->output[pin_idx]; 2254 break; 2255 2256 default: 2257 dev_err(dev, "Invalid pin direction: %d\n", dir); 2258 return -EINVAL; 2259 } 2260 2261 switch (tkn_elem->token) { 2262 case SKL_TKN_MM_U32_RES_PIN_ID: 2263 m_pin->pin_index = tkn_elem->value; 2264 break; 2265 2266 case SKL_TKN_MM_U32_PIN_BUF: 2267 m_pin->buf_size = tkn_elem->value; 2268 break; 2269 2270 default: 2271 dev_err(dev, "Invalid token: %d\n", tkn_elem->token); 2272 return -EINVAL; 2273 } 2274 2275 return 0; 2276 } 2277 2278 /* 2279 * Fill module specific resources from the manifest's resource 2280 * table like CPS, DMA size, mem_pages. 2281 */ 2282 static int skl_tplg_fill_res_tkn(struct device *dev, 2283 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2284 struct skl_module_res *res, 2285 int pin_idx, int dir) 2286 { 2287 int ret, tkn_count = 0; 2288 2289 if (!res) 2290 return -EINVAL; 2291 2292 switch (tkn_elem->token) { 2293 case SKL_TKN_MM_U32_DMA_SIZE: 2294 res->dma_buffer_size = tkn_elem->value; 2295 break; 2296 2297 case SKL_TKN_MM_U32_CPC: 2298 res->cpc = tkn_elem->value; 2299 break; 2300 2301 case SKL_TKN_U32_MEM_PAGES: 2302 res->is_pages = tkn_elem->value; 2303 break; 2304 2305 case SKL_TKN_U32_OBS: 2306 res->obs = tkn_elem->value; 2307 break; 2308 2309 case SKL_TKN_U32_IBS: 2310 res->ibs = tkn_elem->value; 2311 break; 2312 2313 case SKL_TKN_MM_U32_RES_PIN_ID: 2314 case SKL_TKN_MM_U32_PIN_BUF: 2315 ret = skl_tplg_manifest_pin_res_tkn(dev, tkn_elem, res, 2316 pin_idx, dir); 2317 if (ret < 0) 2318 return ret; 2319 break; 2320 2321 case SKL_TKN_MM_U32_CPS: 2322 case SKL_TKN_U32_MAX_MCPS: 2323 /* ignore unused tokens */ 2324 break; 2325 2326 default: 2327 dev_err(dev, "Not a res type token: %d", tkn_elem->token); 2328 return -EINVAL; 2329 2330 } 2331 tkn_count++; 2332 2333 return tkn_count; 2334 } 2335 2336 /* 2337 * Parse tokens to fill up the module private data 2338 */ 2339 static int skl_tplg_get_token(struct device *dev, 2340 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 2341 struct skl_dev *skl, struct skl_module_cfg *mconfig) 2342 { 2343 int tkn_count = 0; 2344 int ret; 2345 static int is_pipe_exists; 2346 static int pin_index, dir, conf_idx; 2347 struct skl_module_iface *iface = NULL; 2348 struct skl_module_res *res = NULL; 2349 int res_idx = mconfig->res_idx; 2350 int fmt_idx = mconfig->fmt_idx; 2351 2352 /* 2353 * If the manifest structure contains no modules, fill all 2354 * the module data to 0th index. 2355 * res_idx and fmt_idx are default set to 0. 2356 */ 2357 if (skl->nr_modules == 0) { 2358 res = &mconfig->module->resources[res_idx]; 2359 iface = &mconfig->module->formats[fmt_idx]; 2360 } 2361 2362 if (tkn_elem->token > SKL_TKN_MAX) 2363 return -EINVAL; 2364 2365 switch (tkn_elem->token) { 2366 case SKL_TKN_U8_IN_QUEUE_COUNT: 2367 mconfig->module->max_input_pins = tkn_elem->value; 2368 break; 2369 2370 case SKL_TKN_U8_OUT_QUEUE_COUNT: 2371 mconfig->module->max_output_pins = tkn_elem->value; 2372 break; 2373 2374 case SKL_TKN_U8_DYN_IN_PIN: 2375 if (!mconfig->m_in_pin) 2376 mconfig->m_in_pin = 2377 devm_kcalloc(dev, MAX_IN_QUEUE, 2378 sizeof(*mconfig->m_in_pin), 2379 GFP_KERNEL); 2380 if (!mconfig->m_in_pin) 2381 return -ENOMEM; 2382 2383 skl_tplg_fill_pin_dynamic_val(mconfig->m_in_pin, MAX_IN_QUEUE, 2384 tkn_elem->value); 2385 break; 2386 2387 case SKL_TKN_U8_DYN_OUT_PIN: 2388 if (!mconfig->m_out_pin) 2389 mconfig->m_out_pin = 2390 devm_kcalloc(dev, MAX_IN_QUEUE, 2391 sizeof(*mconfig->m_in_pin), 2392 GFP_KERNEL); 2393 if (!mconfig->m_out_pin) 2394 return -ENOMEM; 2395 2396 skl_tplg_fill_pin_dynamic_val(mconfig->m_out_pin, MAX_OUT_QUEUE, 2397 tkn_elem->value); 2398 break; 2399 2400 case SKL_TKN_U8_TIME_SLOT: 2401 mconfig->time_slot = tkn_elem->value; 2402 break; 2403 2404 case SKL_TKN_U8_CORE_ID: 2405 mconfig->core_id = tkn_elem->value; 2406 break; 2407 2408 case SKL_TKN_U8_MOD_TYPE: 2409 mconfig->m_type = tkn_elem->value; 2410 break; 2411 2412 case SKL_TKN_U8_DEV_TYPE: 2413 mconfig->dev_type = tkn_elem->value; 2414 break; 2415 2416 case SKL_TKN_U8_HW_CONN_TYPE: 2417 mconfig->hw_conn_type = tkn_elem->value; 2418 break; 2419 2420 case SKL_TKN_U16_MOD_INST_ID: 2421 mconfig->id.instance_id = 2422 tkn_elem->value; 2423 break; 2424 2425 case SKL_TKN_U32_MEM_PAGES: 2426 case SKL_TKN_U32_MAX_MCPS: 2427 case SKL_TKN_U32_OBS: 2428 case SKL_TKN_U32_IBS: 2429 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_index, dir); 2430 if (ret < 0) 2431 return ret; 2432 2433 break; 2434 2435 case SKL_TKN_U32_VBUS_ID: 2436 mconfig->vbus_id = tkn_elem->value; 2437 break; 2438 2439 case SKL_TKN_U32_PARAMS_FIXUP: 2440 mconfig->params_fixup = tkn_elem->value; 2441 break; 2442 2443 case SKL_TKN_U32_CONVERTER: 2444 mconfig->converter = tkn_elem->value; 2445 break; 2446 2447 case SKL_TKN_U32_D0I3_CAPS: 2448 mconfig->d0i3_caps = tkn_elem->value; 2449 break; 2450 2451 case SKL_TKN_U32_PIPE_ID: 2452 ret = skl_tplg_add_pipe(dev, 2453 mconfig, skl, tkn_elem); 2454 2455 if (ret < 0) { 2456 if (ret == -EEXIST) { 2457 is_pipe_exists = 1; 2458 break; 2459 } 2460 return is_pipe_exists; 2461 } 2462 2463 break; 2464 2465 case SKL_TKN_U32_PIPE_CONFIG_ID: 2466 conf_idx = tkn_elem->value; 2467 break; 2468 2469 case SKL_TKN_U32_PIPE_CONN_TYPE: 2470 case SKL_TKN_U32_PIPE_PRIORITY: 2471 case SKL_TKN_U32_PIPE_MEM_PGS: 2472 case SKL_TKN_U32_PMODE: 2473 case SKL_TKN_U32_PIPE_DIRECTION: 2474 case SKL_TKN_U32_NUM_CONFIGS: 2475 if (is_pipe_exists) { 2476 ret = skl_tplg_fill_pipe_tkn(dev, mconfig->pipe, 2477 tkn_elem->token, tkn_elem->value); 2478 if (ret < 0) 2479 return ret; 2480 } 2481 2482 break; 2483 2484 case SKL_TKN_U32_PATH_MEM_PGS: 2485 case SKL_TKN_U32_CFG_FREQ: 2486 case SKL_TKN_U8_CFG_CHAN: 2487 case SKL_TKN_U8_CFG_BPS: 2488 if (mconfig->pipe->nr_cfgs) { 2489 ret = skl_tplg_fill_pipe_cfg(dev, mconfig->pipe, 2490 tkn_elem->token, tkn_elem->value, 2491 conf_idx, dir); 2492 if (ret < 0) 2493 return ret; 2494 } 2495 break; 2496 2497 case SKL_TKN_CFG_MOD_RES_ID: 2498 mconfig->mod_cfg[conf_idx].res_idx = tkn_elem->value; 2499 break; 2500 2501 case SKL_TKN_CFG_MOD_FMT_ID: 2502 mconfig->mod_cfg[conf_idx].fmt_idx = tkn_elem->value; 2503 break; 2504 2505 /* 2506 * SKL_TKN_U32_DIR_PIN_COUNT token has the value for both 2507 * direction and the pin count. The first four bits represent 2508 * direction and next four the pin count. 2509 */ 2510 case SKL_TKN_U32_DIR_PIN_COUNT: 2511 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 2512 pin_index = (tkn_elem->value & 2513 SKL_PIN_COUNT_MASK) >> 4; 2514 2515 break; 2516 2517 case SKL_TKN_U32_FMT_CH: 2518 case SKL_TKN_U32_FMT_FREQ: 2519 case SKL_TKN_U32_FMT_BIT_DEPTH: 2520 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 2521 case SKL_TKN_U32_FMT_CH_CONFIG: 2522 case SKL_TKN_U32_FMT_INTERLEAVE: 2523 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 2524 case SKL_TKN_U32_FMT_CH_MAP: 2525 ret = skl_tplg_widget_fill_fmt(dev, iface, tkn_elem->token, 2526 tkn_elem->value, dir, pin_index); 2527 2528 if (ret < 0) 2529 return ret; 2530 2531 break; 2532 2533 case SKL_TKN_U32_PIN_MOD_ID: 2534 case SKL_TKN_U32_PIN_INST_ID: 2535 case SKL_TKN_UUID: 2536 ret = skl_tplg_fill_pins_info(dev, 2537 mconfig, tkn_elem, dir, 2538 pin_index); 2539 if (ret < 0) 2540 return ret; 2541 2542 break; 2543 2544 case SKL_TKN_U32_FMT_CFG_IDX: 2545 if (tkn_elem->value > SKL_MAX_PARAMS_TYPES) 2546 return -EINVAL; 2547 2548 mconfig->fmt_cfg_idx = tkn_elem->value; 2549 break; 2550 2551 case SKL_TKN_U32_CAPS_SIZE: 2552 mconfig->formats_config[mconfig->fmt_cfg_idx].caps_size = 2553 tkn_elem->value; 2554 2555 break; 2556 2557 case SKL_TKN_U32_CAPS_SET_PARAMS: 2558 mconfig->formats_config[mconfig->fmt_cfg_idx].set_params = 2559 tkn_elem->value; 2560 break; 2561 2562 case SKL_TKN_U32_CAPS_PARAMS_ID: 2563 mconfig->formats_config[mconfig->fmt_cfg_idx].param_id = 2564 tkn_elem->value; 2565 break; 2566 2567 case SKL_TKN_U32_PROC_DOMAIN: 2568 mconfig->domain = 2569 tkn_elem->value; 2570 2571 break; 2572 2573 case SKL_TKN_U32_DMA_BUF_SIZE: 2574 mconfig->dma_buffer_size = tkn_elem->value; 2575 break; 2576 2577 case SKL_TKN_U8_IN_PIN_TYPE: 2578 case SKL_TKN_U8_OUT_PIN_TYPE: 2579 case SKL_TKN_U8_CONN_TYPE: 2580 break; 2581 2582 default: 2583 dev_err(dev, "Token %d not handled\n", 2584 tkn_elem->token); 2585 return -EINVAL; 2586 } 2587 2588 tkn_count++; 2589 2590 return tkn_count; 2591 } 2592 2593 /* 2594 * Parse the vendor array for specific tokens to construct 2595 * module private data 2596 */ 2597 static int skl_tplg_get_tokens(struct device *dev, 2598 char *pvt_data, struct skl_dev *skl, 2599 struct skl_module_cfg *mconfig, int block_size) 2600 { 2601 struct snd_soc_tplg_vendor_array *array; 2602 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2603 int tkn_count = 0, ret; 2604 int off = 0, tuple_size = 0; 2605 bool is_module_guid = true; 2606 2607 if (block_size <= 0) 2608 return -EINVAL; 2609 2610 while (tuple_size < block_size) { 2611 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 2612 2613 off += array->size; 2614 2615 switch (array->type) { 2616 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 2617 dev_warn(dev, "no string tokens expected for skl tplg\n"); 2618 continue; 2619 2620 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 2621 if (is_module_guid) { 2622 ret = skl_tplg_get_uuid(dev, (guid_t *)mconfig->guid, 2623 array->uuid); 2624 is_module_guid = false; 2625 } else { 2626 ret = skl_tplg_get_token(dev, array->value, skl, 2627 mconfig); 2628 } 2629 2630 if (ret < 0) 2631 return ret; 2632 2633 tuple_size += sizeof(*array->uuid); 2634 2635 continue; 2636 2637 default: 2638 tkn_elem = array->value; 2639 tkn_count = 0; 2640 break; 2641 } 2642 2643 while (tkn_count <= (array->num_elems - 1)) { 2644 ret = skl_tplg_get_token(dev, tkn_elem, 2645 skl, mconfig); 2646 2647 if (ret < 0) 2648 return ret; 2649 2650 tkn_count = tkn_count + ret; 2651 tkn_elem++; 2652 } 2653 2654 tuple_size += tkn_count * sizeof(*tkn_elem); 2655 } 2656 2657 return off; 2658 } 2659 2660 /* 2661 * Every data block is preceded by a descriptor to read the number 2662 * of data blocks, they type of the block and it's size 2663 */ 2664 static int skl_tplg_get_desc_blocks(struct device *dev, 2665 struct snd_soc_tplg_vendor_array *array) 2666 { 2667 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 2668 2669 tkn_elem = array->value; 2670 2671 switch (tkn_elem->token) { 2672 case SKL_TKN_U8_NUM_BLOCKS: 2673 case SKL_TKN_U8_BLOCK_TYPE: 2674 case SKL_TKN_U16_BLOCK_SIZE: 2675 return tkn_elem->value; 2676 2677 default: 2678 dev_err(dev, "Invalid descriptor token %d\n", tkn_elem->token); 2679 break; 2680 } 2681 2682 return -EINVAL; 2683 } 2684 2685 static int skl_tplg_get_caps_data(struct device *dev, char *data, 2686 struct skl_module_cfg *mconfig) 2687 { 2688 int idx = mconfig->fmt_cfg_idx; 2689 2690 if (mconfig->formats_config[idx].caps_size > 0) { 2691 mconfig->formats_config[idx].caps = 2692 devm_kzalloc(dev, mconfig->formats_config[idx].caps_size, 2693 GFP_KERNEL); 2694 if (!mconfig->formats_config[idx].caps) 2695 return -ENOMEM; 2696 memcpy(mconfig->formats_config[idx].caps, data, 2697 mconfig->formats_config[idx].caps_size); 2698 } 2699 2700 return mconfig->formats_config[idx].caps_size; 2701 } 2702 2703 /* 2704 * Parse the private data for the token and corresponding value. 2705 * The private data can have multiple data blocks. So, a data block 2706 * is preceded by a descriptor for number of blocks and a descriptor 2707 * for the type and size of the suceeding data block. 2708 */ 2709 static int skl_tplg_get_pvt_data(struct snd_soc_tplg_dapm_widget *tplg_w, 2710 struct skl_dev *skl, struct device *dev, 2711 struct skl_module_cfg *mconfig) 2712 { 2713 struct snd_soc_tplg_vendor_array *array; 2714 int num_blocks, block_size, block_type, off = 0; 2715 char *data; 2716 int ret; 2717 2718 /* Read the NUM_DATA_BLOCKS descriptor */ 2719 array = (struct snd_soc_tplg_vendor_array *)tplg_w->priv.data; 2720 ret = skl_tplg_get_desc_blocks(dev, array); 2721 if (ret < 0) 2722 return ret; 2723 num_blocks = ret; 2724 2725 off += array->size; 2726 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 2727 while (num_blocks > 0) { 2728 array = (struct snd_soc_tplg_vendor_array *) 2729 (tplg_w->priv.data + off); 2730 2731 ret = skl_tplg_get_desc_blocks(dev, array); 2732 2733 if (ret < 0) 2734 return ret; 2735 block_type = ret; 2736 off += array->size; 2737 2738 array = (struct snd_soc_tplg_vendor_array *) 2739 (tplg_w->priv.data + off); 2740 2741 ret = skl_tplg_get_desc_blocks(dev, array); 2742 2743 if (ret < 0) 2744 return ret; 2745 block_size = ret; 2746 off += array->size; 2747 2748 data = (tplg_w->priv.data + off); 2749 2750 if (block_type == SKL_TYPE_TUPLE) { 2751 ret = skl_tplg_get_tokens(dev, data, 2752 skl, mconfig, block_size); 2753 } else { 2754 ret = skl_tplg_get_caps_data(dev, data, mconfig); 2755 } 2756 2757 if (ret < 0) 2758 return ret; 2759 2760 --num_blocks; 2761 off += ret; 2762 } 2763 2764 return 0; 2765 } 2766 2767 static void skl_clear_pin_config(struct snd_soc_component *component, 2768 struct snd_soc_dapm_widget *w) 2769 { 2770 int i; 2771 struct skl_module_cfg *mconfig; 2772 struct skl_pipe *pipe; 2773 2774 if (!strncmp(w->dapm->component->name, component->name, 2775 strlen(component->name))) { 2776 mconfig = w->priv; 2777 pipe = mconfig->pipe; 2778 for (i = 0; i < mconfig->module->max_input_pins; i++) { 2779 mconfig->m_in_pin[i].in_use = false; 2780 mconfig->m_in_pin[i].pin_state = SKL_PIN_UNBIND; 2781 } 2782 for (i = 0; i < mconfig->module->max_output_pins; i++) { 2783 mconfig->m_out_pin[i].in_use = false; 2784 mconfig->m_out_pin[i].pin_state = SKL_PIN_UNBIND; 2785 } 2786 pipe->state = SKL_PIPE_INVALID; 2787 mconfig->m_state = SKL_MODULE_UNINIT; 2788 } 2789 } 2790 2791 void skl_cleanup_resources(struct skl_dev *skl) 2792 { 2793 struct snd_soc_component *soc_component = skl->component; 2794 struct snd_soc_dapm_widget *w; 2795 struct snd_soc_card *card; 2796 2797 if (soc_component == NULL) 2798 return; 2799 2800 card = soc_component->card; 2801 if (!snd_soc_card_is_instantiated(card)) 2802 return; 2803 2804 list_for_each_entry(w, &card->widgets, list) { 2805 if (is_skl_dsp_widget_type(w, skl->dev) && w->priv != NULL) 2806 skl_clear_pin_config(soc_component, w); 2807 } 2808 2809 skl_clear_module_cnt(skl->dsp); 2810 } 2811 2812 /* 2813 * Topology core widget load callback 2814 * 2815 * This is used to save the private data for each widget which gives 2816 * information to the driver about module and pipeline parameters which DSP 2817 * FW expects like ids, resource values, formats etc 2818 */ 2819 static int skl_tplg_widget_load(struct snd_soc_component *cmpnt, int index, 2820 struct snd_soc_dapm_widget *w, 2821 struct snd_soc_tplg_dapm_widget *tplg_w) 2822 { 2823 int ret; 2824 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 2825 struct skl_dev *skl = bus_to_skl(bus); 2826 struct skl_module_cfg *mconfig; 2827 2828 if (!tplg_w->priv.size) 2829 goto bind_event; 2830 2831 mconfig = devm_kzalloc(bus->dev, sizeof(*mconfig), GFP_KERNEL); 2832 2833 if (!mconfig) 2834 return -ENOMEM; 2835 2836 if (skl->nr_modules == 0) { 2837 mconfig->module = devm_kzalloc(bus->dev, 2838 sizeof(*mconfig->module), GFP_KERNEL); 2839 if (!mconfig->module) 2840 return -ENOMEM; 2841 } 2842 2843 w->priv = mconfig; 2844 2845 /* 2846 * module binary can be loaded later, so set it to query when 2847 * module is load for a use case 2848 */ 2849 mconfig->id.module_id = -1; 2850 2851 /* To provide backward compatibility, set default as SKL_PARAM_INIT */ 2852 mconfig->fmt_cfg_idx = SKL_PARAM_INIT; 2853 2854 /* Parse private data for tuples */ 2855 ret = skl_tplg_get_pvt_data(tplg_w, skl, bus->dev, mconfig); 2856 if (ret < 0) 2857 return ret; 2858 2859 skl_debug_init_module(skl->debugfs, w, mconfig); 2860 2861 bind_event: 2862 if (tplg_w->event_type == 0) { 2863 dev_dbg(bus->dev, "ASoC: No event handler required\n"); 2864 return 0; 2865 } 2866 2867 ret = snd_soc_tplg_widget_bind_event(w, skl_tplg_widget_ops, 2868 ARRAY_SIZE(skl_tplg_widget_ops), 2869 tplg_w->event_type); 2870 2871 if (ret) { 2872 dev_err(bus->dev, "%s: No matching event handlers found for %d\n", 2873 __func__, tplg_w->event_type); 2874 return -EINVAL; 2875 } 2876 2877 return 0; 2878 } 2879 2880 static int skl_init_algo_data(struct device *dev, struct soc_bytes_ext *be, 2881 struct snd_soc_tplg_bytes_control *bc) 2882 { 2883 struct skl_algo_data *ac; 2884 struct skl_dfw_algo_data *dfw_ac = 2885 (struct skl_dfw_algo_data *)bc->priv.data; 2886 2887 ac = devm_kzalloc(dev, sizeof(*ac), GFP_KERNEL); 2888 if (!ac) 2889 return -ENOMEM; 2890 2891 /* Fill private data */ 2892 ac->max = dfw_ac->max; 2893 ac->param_id = dfw_ac->param_id; 2894 ac->set_params = dfw_ac->set_params; 2895 ac->size = dfw_ac->max; 2896 2897 if (ac->max) { 2898 ac->params = devm_kzalloc(dev, ac->max, GFP_KERNEL); 2899 if (!ac->params) 2900 return -ENOMEM; 2901 2902 memcpy(ac->params, dfw_ac->params, ac->max); 2903 } 2904 2905 be->dobj.private = ac; 2906 return 0; 2907 } 2908 2909 static int skl_init_enum_data(struct device *dev, struct soc_enum *se, 2910 struct snd_soc_tplg_enum_control *ec) 2911 { 2912 2913 void *data; 2914 2915 if (ec->priv.size) { 2916 data = devm_kzalloc(dev, sizeof(ec->priv.size), GFP_KERNEL); 2917 if (!data) 2918 return -ENOMEM; 2919 memcpy(data, ec->priv.data, ec->priv.size); 2920 se->dobj.private = data; 2921 } 2922 2923 return 0; 2924 2925 } 2926 2927 static int skl_tplg_control_load(struct snd_soc_component *cmpnt, 2928 int index, 2929 struct snd_kcontrol_new *kctl, 2930 struct snd_soc_tplg_ctl_hdr *hdr) 2931 { 2932 struct soc_bytes_ext *sb; 2933 struct snd_soc_tplg_bytes_control *tplg_bc; 2934 struct snd_soc_tplg_enum_control *tplg_ec; 2935 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 2936 struct soc_enum *se; 2937 2938 switch (hdr->ops.info) { 2939 case SND_SOC_TPLG_CTL_BYTES: 2940 tplg_bc = container_of(hdr, 2941 struct snd_soc_tplg_bytes_control, hdr); 2942 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) { 2943 sb = (struct soc_bytes_ext *)kctl->private_value; 2944 if (tplg_bc->priv.size) 2945 return skl_init_algo_data( 2946 bus->dev, sb, tplg_bc); 2947 } 2948 break; 2949 2950 case SND_SOC_TPLG_CTL_ENUM: 2951 tplg_ec = container_of(hdr, 2952 struct snd_soc_tplg_enum_control, hdr); 2953 if (kctl->access & SNDRV_CTL_ELEM_ACCESS_READ) { 2954 se = (struct soc_enum *)kctl->private_value; 2955 if (tplg_ec->priv.size) 2956 skl_init_enum_data(bus->dev, se, tplg_ec); 2957 } 2958 2959 /* 2960 * now that the control initializations are done, remove 2961 * write permission for the DMIC configuration enums to 2962 * avoid conflicts between NHLT settings and user interaction 2963 */ 2964 2965 if (hdr->ops.get == SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC) 2966 kctl->access = SNDRV_CTL_ELEM_ACCESS_READ; 2967 2968 break; 2969 2970 default: 2971 dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n", 2972 hdr->ops.get, hdr->ops.put, hdr->ops.info); 2973 break; 2974 } 2975 2976 return 0; 2977 } 2978 2979 static int skl_tplg_fill_str_mfest_tkn(struct device *dev, 2980 struct snd_soc_tplg_vendor_string_elem *str_elem, 2981 struct skl_dev *skl) 2982 { 2983 int tkn_count = 0; 2984 static int ref_count; 2985 2986 switch (str_elem->token) { 2987 case SKL_TKN_STR_LIB_NAME: 2988 if (ref_count > skl->lib_count - 1) { 2989 ref_count = 0; 2990 return -EINVAL; 2991 } 2992 2993 strncpy(skl->lib_info[ref_count].name, 2994 str_elem->string, 2995 ARRAY_SIZE(skl->lib_info[ref_count].name)); 2996 ref_count++; 2997 break; 2998 2999 default: 3000 dev_err(dev, "Not a string token %d\n", str_elem->token); 3001 break; 3002 } 3003 tkn_count++; 3004 3005 return tkn_count; 3006 } 3007 3008 static int skl_tplg_get_str_tkn(struct device *dev, 3009 struct snd_soc_tplg_vendor_array *array, 3010 struct skl_dev *skl) 3011 { 3012 int tkn_count = 0, ret; 3013 struct snd_soc_tplg_vendor_string_elem *str_elem; 3014 3015 str_elem = (struct snd_soc_tplg_vendor_string_elem *)array->value; 3016 while (tkn_count < array->num_elems) { 3017 ret = skl_tplg_fill_str_mfest_tkn(dev, str_elem, skl); 3018 str_elem++; 3019 3020 if (ret < 0) 3021 return ret; 3022 3023 tkn_count = tkn_count + ret; 3024 } 3025 3026 return tkn_count; 3027 } 3028 3029 static int skl_tplg_manifest_fill_fmt(struct device *dev, 3030 struct skl_module_iface *fmt, 3031 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3032 u32 dir, int fmt_idx) 3033 { 3034 struct skl_module_pin_fmt *dst_fmt; 3035 struct skl_module_fmt *mod_fmt; 3036 int ret; 3037 3038 if (!fmt) 3039 return -EINVAL; 3040 3041 switch (dir) { 3042 case SKL_DIR_IN: 3043 dst_fmt = &fmt->inputs[fmt_idx]; 3044 break; 3045 3046 case SKL_DIR_OUT: 3047 dst_fmt = &fmt->outputs[fmt_idx]; 3048 break; 3049 3050 default: 3051 dev_err(dev, "Invalid direction: %d\n", dir); 3052 return -EINVAL; 3053 } 3054 3055 mod_fmt = &dst_fmt->fmt; 3056 3057 switch (tkn_elem->token) { 3058 case SKL_TKN_MM_U32_INTF_PIN_ID: 3059 dst_fmt->id = tkn_elem->value; 3060 break; 3061 3062 default: 3063 ret = skl_tplg_fill_fmt(dev, mod_fmt, tkn_elem->token, 3064 tkn_elem->value); 3065 if (ret < 0) 3066 return ret; 3067 break; 3068 } 3069 3070 return 0; 3071 } 3072 3073 static int skl_tplg_fill_mod_info(struct device *dev, 3074 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3075 struct skl_module *mod) 3076 { 3077 3078 if (!mod) 3079 return -EINVAL; 3080 3081 switch (tkn_elem->token) { 3082 case SKL_TKN_U8_IN_PIN_TYPE: 3083 mod->input_pin_type = tkn_elem->value; 3084 break; 3085 3086 case SKL_TKN_U8_OUT_PIN_TYPE: 3087 mod->output_pin_type = tkn_elem->value; 3088 break; 3089 3090 case SKL_TKN_U8_IN_QUEUE_COUNT: 3091 mod->max_input_pins = tkn_elem->value; 3092 break; 3093 3094 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3095 mod->max_output_pins = tkn_elem->value; 3096 break; 3097 3098 case SKL_TKN_MM_U8_NUM_RES: 3099 mod->nr_resources = tkn_elem->value; 3100 break; 3101 3102 case SKL_TKN_MM_U8_NUM_INTF: 3103 mod->nr_interfaces = tkn_elem->value; 3104 break; 3105 3106 default: 3107 dev_err(dev, "Invalid mod info token %d", tkn_elem->token); 3108 return -EINVAL; 3109 } 3110 3111 return 0; 3112 } 3113 3114 3115 static int skl_tplg_get_int_tkn(struct device *dev, 3116 struct snd_soc_tplg_vendor_value_elem *tkn_elem, 3117 struct skl_dev *skl) 3118 { 3119 int tkn_count = 0, ret; 3120 static int mod_idx, res_val_idx, intf_val_idx, dir, pin_idx; 3121 struct skl_module_res *res = NULL; 3122 struct skl_module_iface *fmt = NULL; 3123 struct skl_module *mod = NULL; 3124 static struct skl_astate_param *astate_table; 3125 static int astate_cfg_idx, count; 3126 int i; 3127 size_t size; 3128 3129 if (skl->modules) { 3130 mod = skl->modules[mod_idx]; 3131 res = &mod->resources[res_val_idx]; 3132 fmt = &mod->formats[intf_val_idx]; 3133 } 3134 3135 switch (tkn_elem->token) { 3136 case SKL_TKN_U32_LIB_COUNT: 3137 skl->lib_count = tkn_elem->value; 3138 break; 3139 3140 case SKL_TKN_U8_NUM_MOD: 3141 skl->nr_modules = tkn_elem->value; 3142 skl->modules = devm_kcalloc(dev, skl->nr_modules, 3143 sizeof(*skl->modules), GFP_KERNEL); 3144 if (!skl->modules) 3145 return -ENOMEM; 3146 3147 for (i = 0; i < skl->nr_modules; i++) { 3148 skl->modules[i] = devm_kzalloc(dev, 3149 sizeof(struct skl_module), GFP_KERNEL); 3150 if (!skl->modules[i]) 3151 return -ENOMEM; 3152 } 3153 break; 3154 3155 case SKL_TKN_MM_U8_MOD_IDX: 3156 mod_idx = tkn_elem->value; 3157 break; 3158 3159 case SKL_TKN_U32_ASTATE_COUNT: 3160 if (astate_table != NULL) { 3161 dev_err(dev, "More than one entry for A-State count"); 3162 return -EINVAL; 3163 } 3164 3165 if (tkn_elem->value > SKL_MAX_ASTATE_CFG) { 3166 dev_err(dev, "Invalid A-State count %d\n", 3167 tkn_elem->value); 3168 return -EINVAL; 3169 } 3170 3171 size = struct_size(skl->cfg.astate_cfg, astate_table, 3172 tkn_elem->value); 3173 skl->cfg.astate_cfg = devm_kzalloc(dev, size, GFP_KERNEL); 3174 if (!skl->cfg.astate_cfg) 3175 return -ENOMEM; 3176 3177 astate_table = skl->cfg.astate_cfg->astate_table; 3178 count = skl->cfg.astate_cfg->count = tkn_elem->value; 3179 break; 3180 3181 case SKL_TKN_U32_ASTATE_IDX: 3182 if (tkn_elem->value >= count) { 3183 dev_err(dev, "Invalid A-State index %d\n", 3184 tkn_elem->value); 3185 return -EINVAL; 3186 } 3187 3188 astate_cfg_idx = tkn_elem->value; 3189 break; 3190 3191 case SKL_TKN_U32_ASTATE_KCPS: 3192 astate_table[astate_cfg_idx].kcps = tkn_elem->value; 3193 break; 3194 3195 case SKL_TKN_U32_ASTATE_CLK_SRC: 3196 astate_table[astate_cfg_idx].clk_src = tkn_elem->value; 3197 break; 3198 3199 case SKL_TKN_U8_IN_PIN_TYPE: 3200 case SKL_TKN_U8_OUT_PIN_TYPE: 3201 case SKL_TKN_U8_IN_QUEUE_COUNT: 3202 case SKL_TKN_U8_OUT_QUEUE_COUNT: 3203 case SKL_TKN_MM_U8_NUM_RES: 3204 case SKL_TKN_MM_U8_NUM_INTF: 3205 ret = skl_tplg_fill_mod_info(dev, tkn_elem, mod); 3206 if (ret < 0) 3207 return ret; 3208 break; 3209 3210 case SKL_TKN_U32_DIR_PIN_COUNT: 3211 dir = tkn_elem->value & SKL_IN_DIR_BIT_MASK; 3212 pin_idx = (tkn_elem->value & SKL_PIN_COUNT_MASK) >> 4; 3213 break; 3214 3215 case SKL_TKN_MM_U32_RES_ID: 3216 if (!res) 3217 return -EINVAL; 3218 3219 res->id = tkn_elem->value; 3220 res_val_idx = tkn_elem->value; 3221 break; 3222 3223 case SKL_TKN_MM_U32_FMT_ID: 3224 if (!fmt) 3225 return -EINVAL; 3226 3227 fmt->fmt_idx = tkn_elem->value; 3228 intf_val_idx = tkn_elem->value; 3229 break; 3230 3231 case SKL_TKN_MM_U32_CPS: 3232 case SKL_TKN_MM_U32_DMA_SIZE: 3233 case SKL_TKN_MM_U32_CPC: 3234 case SKL_TKN_U32_MEM_PAGES: 3235 case SKL_TKN_U32_OBS: 3236 case SKL_TKN_U32_IBS: 3237 case SKL_TKN_MM_U32_RES_PIN_ID: 3238 case SKL_TKN_MM_U32_PIN_BUF: 3239 ret = skl_tplg_fill_res_tkn(dev, tkn_elem, res, pin_idx, dir); 3240 if (ret < 0) 3241 return ret; 3242 3243 break; 3244 3245 case SKL_TKN_MM_U32_NUM_IN_FMT: 3246 if (!fmt) 3247 return -EINVAL; 3248 3249 res->nr_input_pins = tkn_elem->value; 3250 break; 3251 3252 case SKL_TKN_MM_U32_NUM_OUT_FMT: 3253 if (!fmt) 3254 return -EINVAL; 3255 3256 res->nr_output_pins = tkn_elem->value; 3257 break; 3258 3259 case SKL_TKN_U32_FMT_CH: 3260 case SKL_TKN_U32_FMT_FREQ: 3261 case SKL_TKN_U32_FMT_BIT_DEPTH: 3262 case SKL_TKN_U32_FMT_SAMPLE_SIZE: 3263 case SKL_TKN_U32_FMT_CH_CONFIG: 3264 case SKL_TKN_U32_FMT_INTERLEAVE: 3265 case SKL_TKN_U32_FMT_SAMPLE_TYPE: 3266 case SKL_TKN_U32_FMT_CH_MAP: 3267 case SKL_TKN_MM_U32_INTF_PIN_ID: 3268 ret = skl_tplg_manifest_fill_fmt(dev, fmt, tkn_elem, 3269 dir, pin_idx); 3270 if (ret < 0) 3271 return ret; 3272 break; 3273 3274 default: 3275 dev_err(dev, "Not a manifest token %d\n", tkn_elem->token); 3276 return -EINVAL; 3277 } 3278 tkn_count++; 3279 3280 return tkn_count; 3281 } 3282 3283 /* 3284 * Fill the manifest structure by parsing the tokens based on the 3285 * type. 3286 */ 3287 static int skl_tplg_get_manifest_tkn(struct device *dev, 3288 char *pvt_data, struct skl_dev *skl, 3289 int block_size) 3290 { 3291 int tkn_count = 0, ret; 3292 int off = 0, tuple_size = 0; 3293 u8 uuid_index = 0; 3294 struct snd_soc_tplg_vendor_array *array; 3295 struct snd_soc_tplg_vendor_value_elem *tkn_elem; 3296 3297 if (block_size <= 0) 3298 return -EINVAL; 3299 3300 while (tuple_size < block_size) { 3301 array = (struct snd_soc_tplg_vendor_array *)(pvt_data + off); 3302 off += array->size; 3303 switch (array->type) { 3304 case SND_SOC_TPLG_TUPLE_TYPE_STRING: 3305 ret = skl_tplg_get_str_tkn(dev, array, skl); 3306 3307 if (ret < 0) 3308 return ret; 3309 tkn_count = ret; 3310 3311 tuple_size += tkn_count * 3312 sizeof(struct snd_soc_tplg_vendor_string_elem); 3313 continue; 3314 3315 case SND_SOC_TPLG_TUPLE_TYPE_UUID: 3316 if (array->uuid->token != SKL_TKN_UUID) { 3317 dev_err(dev, "Not an UUID token: %d\n", 3318 array->uuid->token); 3319 return -EINVAL; 3320 } 3321 if (uuid_index >= skl->nr_modules) { 3322 dev_err(dev, "Too many UUID tokens\n"); 3323 return -EINVAL; 3324 } 3325 import_guid(&skl->modules[uuid_index++]->uuid, 3326 array->uuid->uuid); 3327 3328 tuple_size += sizeof(*array->uuid); 3329 continue; 3330 3331 default: 3332 tkn_elem = array->value; 3333 tkn_count = 0; 3334 break; 3335 } 3336 3337 while (tkn_count <= array->num_elems - 1) { 3338 ret = skl_tplg_get_int_tkn(dev, 3339 tkn_elem, skl); 3340 if (ret < 0) 3341 return ret; 3342 3343 tkn_count = tkn_count + ret; 3344 tkn_elem++; 3345 } 3346 tuple_size += (tkn_count * sizeof(*tkn_elem)); 3347 tkn_count = 0; 3348 } 3349 3350 return off; 3351 } 3352 3353 /* 3354 * Parse manifest private data for tokens. The private data block is 3355 * preceded by descriptors for type and size of data block. 3356 */ 3357 static int skl_tplg_get_manifest_data(struct snd_soc_tplg_manifest *manifest, 3358 struct device *dev, struct skl_dev *skl) 3359 { 3360 struct snd_soc_tplg_vendor_array *array; 3361 int num_blocks, block_size = 0, block_type, off = 0; 3362 char *data; 3363 int ret; 3364 3365 /* Read the NUM_DATA_BLOCKS descriptor */ 3366 array = (struct snd_soc_tplg_vendor_array *)manifest->priv.data; 3367 ret = skl_tplg_get_desc_blocks(dev, array); 3368 if (ret < 0) 3369 return ret; 3370 num_blocks = ret; 3371 3372 off += array->size; 3373 /* Read the BLOCK_TYPE and BLOCK_SIZE descriptor */ 3374 while (num_blocks > 0) { 3375 array = (struct snd_soc_tplg_vendor_array *) 3376 (manifest->priv.data + off); 3377 ret = skl_tplg_get_desc_blocks(dev, array); 3378 3379 if (ret < 0) 3380 return ret; 3381 block_type = ret; 3382 off += array->size; 3383 3384 array = (struct snd_soc_tplg_vendor_array *) 3385 (manifest->priv.data + off); 3386 3387 ret = skl_tplg_get_desc_blocks(dev, array); 3388 3389 if (ret < 0) 3390 return ret; 3391 block_size = ret; 3392 off += array->size; 3393 3394 data = (manifest->priv.data + off); 3395 3396 if (block_type == SKL_TYPE_TUPLE) { 3397 ret = skl_tplg_get_manifest_tkn(dev, data, skl, 3398 block_size); 3399 3400 if (ret < 0) 3401 return ret; 3402 3403 --num_blocks; 3404 } else { 3405 return -EINVAL; 3406 } 3407 off += ret; 3408 } 3409 3410 return 0; 3411 } 3412 3413 static int skl_manifest_load(struct snd_soc_component *cmpnt, int index, 3414 struct snd_soc_tplg_manifest *manifest) 3415 { 3416 struct hdac_bus *bus = snd_soc_component_get_drvdata(cmpnt); 3417 struct skl_dev *skl = bus_to_skl(bus); 3418 3419 /* proceed only if we have private data defined */ 3420 if (manifest->priv.size == 0) 3421 return 0; 3422 3423 skl_tplg_get_manifest_data(manifest, bus->dev, skl); 3424 3425 if (skl->lib_count > SKL_MAX_LIB) { 3426 dev_err(bus->dev, "Exceeding max Library count. Got:%d\n", 3427 skl->lib_count); 3428 return -EINVAL; 3429 } 3430 3431 return 0; 3432 } 3433 3434 static int skl_tplg_complete(struct snd_soc_component *component) 3435 { 3436 struct snd_soc_dobj *dobj; 3437 struct snd_soc_acpi_mach *mach; 3438 struct snd_ctl_elem_value *val; 3439 int i; 3440 3441 val = kmalloc(sizeof(*val), GFP_KERNEL); 3442 if (!val) 3443 return -ENOMEM; 3444 3445 mach = dev_get_platdata(component->card->dev); 3446 list_for_each_entry(dobj, &component->dobj_list, list) { 3447 struct snd_kcontrol *kcontrol = dobj->control.kcontrol; 3448 struct soc_enum *se; 3449 char **texts; 3450 char chan_text[4]; 3451 3452 if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol || 3453 kcontrol->put != skl_tplg_multi_config_set_dmic) 3454 continue; 3455 3456 se = (struct soc_enum *)kcontrol->private_value; 3457 texts = dobj->control.dtexts; 3458 sprintf(chan_text, "c%d", mach->mach_params.dmic_num); 3459 3460 for (i = 0; i < se->items; i++) { 3461 if (strstr(texts[i], chan_text)) { 3462 memset(val, 0, sizeof(*val)); 3463 val->value.enumerated.item[0] = i; 3464 kcontrol->put(kcontrol, val); 3465 } 3466 } 3467 } 3468 3469 kfree(val); 3470 return 0; 3471 } 3472 3473 static const struct snd_soc_tplg_ops skl_tplg_ops = { 3474 .widget_load = skl_tplg_widget_load, 3475 .control_load = skl_tplg_control_load, 3476 .bytes_ext_ops = skl_tlv_ops, 3477 .bytes_ext_ops_count = ARRAY_SIZE(skl_tlv_ops), 3478 .io_ops = skl_tplg_kcontrol_ops, 3479 .io_ops_count = ARRAY_SIZE(skl_tplg_kcontrol_ops), 3480 .manifest = skl_manifest_load, 3481 .dai_load = skl_dai_load, 3482 .complete = skl_tplg_complete, 3483 }; 3484 3485 /* 3486 * A pipe can have multiple modules, each of them will be a DAPM widget as 3487 * well. While managing a pipeline we need to get the list of all the 3488 * widgets in a pipelines, so this helper - skl_tplg_create_pipe_widget_list() 3489 * helps to get the SKL type widgets in that pipeline 3490 */ 3491 static int skl_tplg_create_pipe_widget_list(struct snd_soc_component *component) 3492 { 3493 struct snd_soc_dapm_widget *w; 3494 struct skl_module_cfg *mcfg = NULL; 3495 struct skl_pipe_module *p_module = NULL; 3496 struct skl_pipe *pipe; 3497 3498 list_for_each_entry(w, &component->card->widgets, list) { 3499 if (is_skl_dsp_widget_type(w, component->dev) && w->priv) { 3500 mcfg = w->priv; 3501 pipe = mcfg->pipe; 3502 3503 p_module = devm_kzalloc(component->dev, 3504 sizeof(*p_module), GFP_KERNEL); 3505 if (!p_module) 3506 return -ENOMEM; 3507 3508 p_module->w = w; 3509 list_add_tail(&p_module->node, &pipe->w_list); 3510 } 3511 } 3512 3513 return 0; 3514 } 3515 3516 static void skl_tplg_set_pipe_type(struct skl_dev *skl, struct skl_pipe *pipe) 3517 { 3518 struct skl_pipe_module *w_module; 3519 struct snd_soc_dapm_widget *w; 3520 struct skl_module_cfg *mconfig; 3521 bool host_found = false, link_found = false; 3522 3523 list_for_each_entry(w_module, &pipe->w_list, node) { 3524 w = w_module->w; 3525 mconfig = w->priv; 3526 3527 if (mconfig->dev_type == SKL_DEVICE_HDAHOST) 3528 host_found = true; 3529 else if (mconfig->dev_type != SKL_DEVICE_NONE) 3530 link_found = true; 3531 } 3532 3533 if (host_found && link_found) 3534 pipe->passthru = true; 3535 else 3536 pipe->passthru = false; 3537 } 3538 3539 /* 3540 * SKL topology init routine 3541 */ 3542 int skl_tplg_init(struct snd_soc_component *component, struct hdac_bus *bus) 3543 { 3544 int ret; 3545 const struct firmware *fw; 3546 struct skl_dev *skl = bus_to_skl(bus); 3547 struct skl_pipeline *ppl; 3548 3549 ret = request_firmware(&fw, skl->tplg_name, bus->dev); 3550 if (ret < 0) { 3551 char alt_tplg_name[64]; 3552 3553 snprintf(alt_tplg_name, sizeof(alt_tplg_name), "%s-tplg.bin", 3554 skl->mach->drv_name); 3555 dev_info(bus->dev, "tplg fw %s load failed with %d, trying alternative tplg name %s", 3556 skl->tplg_name, ret, alt_tplg_name); 3557 3558 ret = request_firmware(&fw, alt_tplg_name, bus->dev); 3559 if (!ret) 3560 goto component_load; 3561 3562 dev_info(bus->dev, "tplg %s failed with %d, falling back to dfw_sst.bin", 3563 alt_tplg_name, ret); 3564 3565 ret = request_firmware(&fw, "dfw_sst.bin", bus->dev); 3566 if (ret < 0) { 3567 dev_err(bus->dev, "Fallback tplg fw %s load failed with %d\n", 3568 "dfw_sst.bin", ret); 3569 return ret; 3570 } 3571 } 3572 3573 component_load: 3574 ret = snd_soc_tplg_component_load(component, &skl_tplg_ops, fw); 3575 if (ret < 0) { 3576 dev_err(bus->dev, "tplg component load failed%d\n", ret); 3577 goto err; 3578 } 3579 3580 ret = skl_tplg_create_pipe_widget_list(component); 3581 if (ret < 0) { 3582 dev_err(bus->dev, "tplg create pipe widget list failed%d\n", 3583 ret); 3584 goto err; 3585 } 3586 3587 list_for_each_entry(ppl, &skl->ppl_list, node) 3588 skl_tplg_set_pipe_type(skl, ppl->pipe); 3589 3590 err: 3591 release_firmware(fw); 3592 return ret; 3593 } 3594 3595 void skl_tplg_exit(struct snd_soc_component *component, struct hdac_bus *bus) 3596 { 3597 struct skl_dev *skl = bus_to_skl(bus); 3598 struct skl_pipeline *ppl, *tmp; 3599 3600 list_for_each_entry_safe(ppl, tmp, &skl->ppl_list, node) 3601 list_del(&ppl->node); 3602 3603 /* clean up topology */ 3604 snd_soc_tplg_component_remove(component); 3605 } 3606
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.