1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Apple SoCs MCA driver 4 // 5 // Copyright (C) The Asahi Linux Contributors 6 // 7 // The MCA peripheral is made up of a number of identical units called clusters. 8 // Each cluster has its separate clock parent, SYNC signal generator, carries 9 // four SERDES units and has a dedicated I2S port on the SoC's periphery. 10 // 11 // The clusters can operate independently, or can be combined together in a 12 // configurable manner. We mostly treat them as self-contained independent 13 // units and don't configure any cross-cluster connections except for the I2S 14 // ports. The I2S ports can be routed to any of the clusters (irrespective 15 // of their native cluster). We map this onto ASoC's (DPCM) notion of backend 16 // and frontend DAIs. The 'cluster guts' are frontends which are dynamically 17 // routed to backend I2S ports. 18 // 19 // DAI references in devicetree are resolved to backends. The routing between 20 // frontends and backends is determined by the machine driver in the DAPM paths 21 // it supplies. 22 23 #include <linux/bitfield.h> 24 #include <linux/clk.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/init.h> 27 #include <linux/kernel.h> 28 #include <linux/module.h> 29 #include <linux/of.h> 30 #include <linux/of_clk.h> 31 #include <linux/of_dma.h> 32 #include <linux/platform_device.h> 33 #include <linux/pm_domain.h> 34 #include <linux/regmap.h> 35 #include <linux/reset.h> 36 #include <linux/slab.h> 37 38 #include <sound/core.h> 39 #include <sound/pcm.h> 40 #include <sound/pcm_params.h> 41 #include <sound/soc.h> 42 #include <sound/dmaengine_pcm.h> 43 44 #define USE_RXB_FOR_CAPTURE 45 46 /* Relative to cluster base */ 47 #define REG_STATUS 0x0 48 #define STATUS_MCLK_EN BIT(0) 49 #define REG_MCLK_CONF 0x4 50 #define MCLK_CONF_DIV GENMASK(11, 8) 51 52 #define REG_SYNCGEN_STATUS 0x100 53 #define SYNCGEN_STATUS_EN BIT(0) 54 #define REG_SYNCGEN_MCLK_SEL 0x104 55 #define SYNCGEN_MCLK_SEL GENMASK(3, 0) 56 #define REG_SYNCGEN_HI_PERIOD 0x108 57 #define REG_SYNCGEN_LO_PERIOD 0x10c 58 59 #define REG_PORT_ENABLES 0x600 60 #define PORT_ENABLES_CLOCKS GENMASK(2, 1) 61 #define PORT_ENABLES_TX_DATA BIT(3) 62 #define REG_PORT_CLOCK_SEL 0x604 63 #define PORT_CLOCK_SEL GENMASK(11, 8) 64 #define REG_PORT_DATA_SEL 0x608 65 #define PORT_DATA_SEL_TXA(cl) (1 << ((cl)*2)) 66 #define PORT_DATA_SEL_TXB(cl) (2 << ((cl)*2)) 67 68 #define REG_INTSTATE 0x700 69 #define REG_INTMASK 0x704 70 71 /* Bases of serdes units (relative to cluster) */ 72 #define CLUSTER_RXA_OFF 0x200 73 #define CLUSTER_TXA_OFF 0x300 74 #define CLUSTER_RXB_OFF 0x400 75 #define CLUSTER_TXB_OFF 0x500 76 77 #define CLUSTER_TX_OFF CLUSTER_TXA_OFF 78 79 #ifndef USE_RXB_FOR_CAPTURE 80 #define CLUSTER_RX_OFF CLUSTER_RXA_OFF 81 #else 82 #define CLUSTER_RX_OFF CLUSTER_RXB_OFF 83 #endif 84 85 /* Relative to serdes unit base */ 86 #define REG_SERDES_STATUS 0x00 87 #define SERDES_STATUS_EN BIT(0) 88 #define SERDES_STATUS_RST BIT(1) 89 #define REG_TX_SERDES_CONF 0x04 90 #define REG_RX_SERDES_CONF 0x08 91 #define SERDES_CONF_NCHANS GENMASK(3, 0) 92 #define SERDES_CONF_WIDTH_MASK GENMASK(8, 4) 93 #define SERDES_CONF_WIDTH_16BIT 0x40 94 #define SERDES_CONF_WIDTH_20BIT 0x80 95 #define SERDES_CONF_WIDTH_24BIT 0xc0 96 #define SERDES_CONF_WIDTH_32BIT 0x100 97 #define SERDES_CONF_BCLK_POL 0x400 98 #define SERDES_CONF_LSB_FIRST 0x800 99 #define SERDES_CONF_UNK1 BIT(12) 100 #define SERDES_CONF_UNK2 BIT(13) 101 #define SERDES_CONF_UNK3 BIT(14) 102 #define SERDES_CONF_NO_DATA_FEEDBACK BIT(15) 103 #define SERDES_CONF_SYNC_SEL GENMASK(18, 16) 104 #define REG_TX_SERDES_BITSTART 0x08 105 #define REG_RX_SERDES_BITSTART 0x0c 106 #define REG_TX_SERDES_SLOTMASK 0x0c 107 #define REG_RX_SERDES_SLOTMASK 0x10 108 #define REG_RX_SERDES_PORT 0x04 109 110 /* Relative to switch base */ 111 #define REG_DMA_ADAPTER_A(cl) (0x8000 * (cl)) 112 #define REG_DMA_ADAPTER_B(cl) (0x8000 * (cl) + 0x4000) 113 #define DMA_ADAPTER_TX_LSB_PAD GENMASK(4, 0) 114 #define DMA_ADAPTER_TX_NCHANS GENMASK(6, 5) 115 #define DMA_ADAPTER_RX_MSB_PAD GENMASK(12, 8) 116 #define DMA_ADAPTER_RX_NCHANS GENMASK(14, 13) 117 #define DMA_ADAPTER_NCHANS GENMASK(22, 20) 118 119 #define SWITCH_STRIDE 0x8000 120 #define CLUSTER_STRIDE 0x4000 121 122 #define MAX_NCLUSTERS 6 123 124 #define APPLE_MCA_FMTBITS (SNDRV_PCM_FMTBIT_S16_LE | \ 125 SNDRV_PCM_FMTBIT_S24_LE | \ 126 SNDRV_PCM_FMTBIT_S32_LE) 127 128 struct mca_cluster { 129 int no; 130 __iomem void *base; 131 struct mca_data *host; 132 struct device *pd_dev; 133 struct clk *clk_parent; 134 struct dma_chan *dma_chans[SNDRV_PCM_STREAM_LAST + 1]; 135 136 bool port_started[SNDRV_PCM_STREAM_LAST + 1]; 137 int port_driver; /* The cluster driving this cluster's port */ 138 139 bool clocks_in_use[SNDRV_PCM_STREAM_LAST + 1]; 140 struct device_link *pd_link; 141 142 unsigned int bclk_ratio; 143 144 /* Masks etc. picked up via the set_tdm_slot method */ 145 int tdm_slots; 146 int tdm_slot_width; 147 unsigned int tdm_tx_mask; 148 unsigned int tdm_rx_mask; 149 }; 150 151 struct mca_data { 152 struct device *dev; 153 154 __iomem void *switch_base; 155 156 struct device *pd_dev; 157 struct reset_control *rstc; 158 struct device_link *pd_link; 159 160 /* Mutex for accessing port_driver of foreign clusters */ 161 struct mutex port_mutex; 162 163 int nclusters; 164 struct mca_cluster clusters[] __counted_by(nclusters); 165 }; 166 167 static void mca_modify(struct mca_cluster *cl, int regoffset, u32 mask, u32 val) 168 { 169 __iomem void *ptr = cl->base + regoffset; 170 u32 newval; 171 172 newval = (val & mask) | (readl_relaxed(ptr) & ~mask); 173 writel_relaxed(newval, ptr); 174 } 175 176 /* 177 * Get the cluster of FE or BE DAI 178 */ 179 static struct mca_cluster *mca_dai_to_cluster(struct snd_soc_dai *dai) 180 { 181 struct mca_data *mca = snd_soc_dai_get_drvdata(dai); 182 /* 183 * FE DAIs are 0 ... nclusters - 1 184 * BE DAIs are nclusters ... 2*nclusters - 1 185 */ 186 int cluster_no = dai->id % mca->nclusters; 187 188 return &mca->clusters[cluster_no]; 189 } 190 191 /* called before PCM trigger */ 192 static void mca_fe_early_trigger(struct snd_pcm_substream *substream, int cmd, 193 struct snd_soc_dai *dai) 194 { 195 struct mca_cluster *cl = mca_dai_to_cluster(dai); 196 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 197 int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF; 198 int serdes_conf = 199 serdes_unit + (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF); 200 201 switch (cmd) { 202 case SNDRV_PCM_TRIGGER_START: 203 case SNDRV_PCM_TRIGGER_RESUME: 204 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 205 mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL, 206 FIELD_PREP(SERDES_CONF_SYNC_SEL, 0)); 207 mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL, 208 FIELD_PREP(SERDES_CONF_SYNC_SEL, 7)); 209 mca_modify(cl, serdes_unit + REG_SERDES_STATUS, 210 SERDES_STATUS_EN | SERDES_STATUS_RST, 211 SERDES_STATUS_RST); 212 /* 213 * Experiments suggest that it takes at most ~1 us 214 * for the bit to clear, so wait 2 us for good measure. 215 */ 216 udelay(2); 217 WARN_ON(readl_relaxed(cl->base + serdes_unit + REG_SERDES_STATUS) & 218 SERDES_STATUS_RST); 219 mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL, 220 FIELD_PREP(SERDES_CONF_SYNC_SEL, 0)); 221 mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL, 222 FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1)); 223 break; 224 default: 225 break; 226 } 227 } 228 229 static int mca_fe_trigger(struct snd_pcm_substream *substream, int cmd, 230 struct snd_soc_dai *dai) 231 { 232 struct mca_cluster *cl = mca_dai_to_cluster(dai); 233 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 234 int serdes_unit = is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF; 235 236 switch (cmd) { 237 case SNDRV_PCM_TRIGGER_START: 238 case SNDRV_PCM_TRIGGER_RESUME: 239 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 240 mca_modify(cl, serdes_unit + REG_SERDES_STATUS, 241 SERDES_STATUS_EN | SERDES_STATUS_RST, 242 SERDES_STATUS_EN); 243 break; 244 245 case SNDRV_PCM_TRIGGER_STOP: 246 case SNDRV_PCM_TRIGGER_SUSPEND: 247 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 248 mca_modify(cl, serdes_unit + REG_SERDES_STATUS, 249 SERDES_STATUS_EN, 0); 250 break; 251 252 default: 253 return -EINVAL; 254 } 255 256 return 0; 257 } 258 259 static int mca_fe_enable_clocks(struct mca_cluster *cl) 260 { 261 struct mca_data *mca = cl->host; 262 int ret; 263 264 ret = clk_prepare_enable(cl->clk_parent); 265 if (ret) { 266 dev_err(mca->dev, 267 "cluster %d: unable to enable clock parent: %d\n", 268 cl->no, ret); 269 return ret; 270 } 271 272 /* 273 * We can't power up the device earlier than this because 274 * the power state driver would error out on seeing the device 275 * as clock-gated. 276 */ 277 cl->pd_link = device_link_add(mca->dev, cl->pd_dev, 278 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | 279 DL_FLAG_RPM_ACTIVE); 280 if (!cl->pd_link) { 281 dev_err(mca->dev, 282 "cluster %d: unable to prop-up power domain\n", cl->no); 283 clk_disable_unprepare(cl->clk_parent); 284 return -EINVAL; 285 } 286 287 writel_relaxed(cl->no + 1, cl->base + REG_SYNCGEN_MCLK_SEL); 288 mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 289 SYNCGEN_STATUS_EN); 290 mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, STATUS_MCLK_EN); 291 292 return 0; 293 } 294 295 static void mca_fe_disable_clocks(struct mca_cluster *cl) 296 { 297 mca_modify(cl, REG_SYNCGEN_STATUS, SYNCGEN_STATUS_EN, 0); 298 mca_modify(cl, REG_STATUS, STATUS_MCLK_EN, 0); 299 300 device_link_del(cl->pd_link); 301 clk_disable_unprepare(cl->clk_parent); 302 } 303 304 static bool mca_fe_clocks_in_use(struct mca_cluster *cl) 305 { 306 struct mca_data *mca = cl->host; 307 struct mca_cluster *be_cl; 308 int stream, i; 309 310 mutex_lock(&mca->port_mutex); 311 for (i = 0; i < mca->nclusters; i++) { 312 be_cl = &mca->clusters[i]; 313 314 if (be_cl->port_driver != cl->no) 315 continue; 316 317 for_each_pcm_streams(stream) { 318 if (be_cl->clocks_in_use[stream]) { 319 mutex_unlock(&mca->port_mutex); 320 return true; 321 } 322 } 323 } 324 mutex_unlock(&mca->port_mutex); 325 return false; 326 } 327 328 static int mca_be_prepare(struct snd_pcm_substream *substream, 329 struct snd_soc_dai *dai) 330 { 331 struct mca_cluster *cl = mca_dai_to_cluster(dai); 332 struct mca_data *mca = cl->host; 333 struct mca_cluster *fe_cl; 334 int ret; 335 336 if (cl->port_driver < 0) 337 return -EINVAL; 338 339 fe_cl = &mca->clusters[cl->port_driver]; 340 341 /* 342 * Typically the CODECs we are paired with will require clocks 343 * to be present at time of unmute with the 'mute_stream' op 344 * or at time of DAPM widget power-up. We need to enable clocks 345 * here at the latest (frontend prepare would be too late). 346 */ 347 if (!mca_fe_clocks_in_use(fe_cl)) { 348 ret = mca_fe_enable_clocks(fe_cl); 349 if (ret < 0) 350 return ret; 351 } 352 353 cl->clocks_in_use[substream->stream] = true; 354 355 return 0; 356 } 357 358 static int mca_be_hw_free(struct snd_pcm_substream *substream, 359 struct snd_soc_dai *dai) 360 { 361 struct mca_cluster *cl = mca_dai_to_cluster(dai); 362 struct mca_data *mca = cl->host; 363 struct mca_cluster *fe_cl; 364 365 if (cl->port_driver < 0) 366 return -EINVAL; 367 368 /* 369 * We are operating on a foreign cluster here, but since we 370 * belong to the same PCM, accesses should have been 371 * synchronized at ASoC level. 372 */ 373 fe_cl = &mca->clusters[cl->port_driver]; 374 if (!mca_fe_clocks_in_use(fe_cl)) 375 return 0; /* Nothing to do */ 376 377 cl->clocks_in_use[substream->stream] = false; 378 379 if (!mca_fe_clocks_in_use(fe_cl)) 380 mca_fe_disable_clocks(fe_cl); 381 382 return 0; 383 } 384 385 static unsigned int mca_crop_mask(unsigned int mask, int nchans) 386 { 387 while (hweight32(mask) > nchans) 388 mask &= ~(1 << __fls(mask)); 389 390 return mask; 391 } 392 393 static int mca_configure_serdes(struct mca_cluster *cl, int serdes_unit, 394 unsigned int mask, int slots, int nchans, 395 int slot_width, bool is_tx, int port) 396 { 397 __iomem void *serdes_base = cl->base + serdes_unit; 398 u32 serdes_conf, serdes_conf_mask; 399 400 serdes_conf_mask = SERDES_CONF_WIDTH_MASK | SERDES_CONF_NCHANS; 401 serdes_conf = FIELD_PREP(SERDES_CONF_NCHANS, max(slots, 1) - 1); 402 switch (slot_width) { 403 case 16: 404 serdes_conf |= SERDES_CONF_WIDTH_16BIT; 405 break; 406 case 20: 407 serdes_conf |= SERDES_CONF_WIDTH_20BIT; 408 break; 409 case 24: 410 serdes_conf |= SERDES_CONF_WIDTH_24BIT; 411 break; 412 case 32: 413 serdes_conf |= SERDES_CONF_WIDTH_32BIT; 414 break; 415 default: 416 goto err; 417 } 418 419 serdes_conf_mask |= SERDES_CONF_SYNC_SEL; 420 serdes_conf |= FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1); 421 422 if (is_tx) { 423 serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 | 424 SERDES_CONF_UNK3; 425 serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 | 426 SERDES_CONF_UNK3; 427 } else { 428 serdes_conf_mask |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 | 429 SERDES_CONF_UNK3 | 430 SERDES_CONF_NO_DATA_FEEDBACK; 431 serdes_conf |= SERDES_CONF_UNK1 | SERDES_CONF_UNK2 | 432 SERDES_CONF_NO_DATA_FEEDBACK; 433 } 434 435 mca_modify(cl, 436 serdes_unit + 437 (is_tx ? REG_TX_SERDES_CONF : REG_RX_SERDES_CONF), 438 serdes_conf_mask, serdes_conf); 439 440 if (is_tx) { 441 writel_relaxed(0xffffffff, 442 serdes_base + REG_TX_SERDES_SLOTMASK); 443 writel_relaxed(~((u32)mca_crop_mask(mask, nchans)), 444 serdes_base + REG_TX_SERDES_SLOTMASK + 0x4); 445 writel_relaxed(0xffffffff, 446 serdes_base + REG_TX_SERDES_SLOTMASK + 0x8); 447 writel_relaxed(~((u32)mask), 448 serdes_base + REG_TX_SERDES_SLOTMASK + 0xc); 449 } else { 450 writel_relaxed(0xffffffff, 451 serdes_base + REG_RX_SERDES_SLOTMASK); 452 writel_relaxed(~((u32)mca_crop_mask(mask, nchans)), 453 serdes_base + REG_RX_SERDES_SLOTMASK + 0x4); 454 writel_relaxed(1 << port, 455 serdes_base + REG_RX_SERDES_PORT); 456 } 457 458 return 0; 459 460 err: 461 dev_err(cl->host->dev, 462 "unsupported SERDES configuration requested (mask=0x%x slots=%d slot_width=%d)\n", 463 mask, slots, slot_width); 464 return -EINVAL; 465 } 466 467 static int mca_fe_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, 468 unsigned int rx_mask, int slots, int slot_width) 469 { 470 struct mca_cluster *cl = mca_dai_to_cluster(dai); 471 472 cl->tdm_slots = slots; 473 cl->tdm_slot_width = slot_width; 474 cl->tdm_tx_mask = tx_mask; 475 cl->tdm_rx_mask = rx_mask; 476 477 return 0; 478 } 479 480 static int mca_fe_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) 481 { 482 struct mca_cluster *cl = mca_dai_to_cluster(dai); 483 struct mca_data *mca = cl->host; 484 bool fpol_inv = false; 485 u32 serdes_conf = 0; 486 u32 bitstart; 487 488 if ((fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) != 489 SND_SOC_DAIFMT_BP_FP) 490 goto err; 491 492 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 493 case SND_SOC_DAIFMT_I2S: 494 fpol_inv = 0; 495 bitstart = 1; 496 break; 497 case SND_SOC_DAIFMT_LEFT_J: 498 fpol_inv = 1; 499 bitstart = 0; 500 break; 501 default: 502 goto err; 503 } 504 505 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 506 case SND_SOC_DAIFMT_NB_IF: 507 case SND_SOC_DAIFMT_IB_IF: 508 fpol_inv ^= 1; 509 break; 510 } 511 512 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 513 case SND_SOC_DAIFMT_NB_NF: 514 case SND_SOC_DAIFMT_NB_IF: 515 serdes_conf |= SERDES_CONF_BCLK_POL; 516 break; 517 } 518 519 if (!fpol_inv) 520 goto err; 521 522 mca_modify(cl, CLUSTER_TX_OFF + REG_TX_SERDES_CONF, 523 SERDES_CONF_BCLK_POL, serdes_conf); 524 mca_modify(cl, CLUSTER_RX_OFF + REG_RX_SERDES_CONF, 525 SERDES_CONF_BCLK_POL, serdes_conf); 526 writel_relaxed(bitstart, 527 cl->base + CLUSTER_TX_OFF + REG_TX_SERDES_BITSTART); 528 writel_relaxed(bitstart, 529 cl->base + CLUSTER_RX_OFF + REG_RX_SERDES_BITSTART); 530 531 return 0; 532 533 err: 534 dev_err(mca->dev, "unsupported DAI format (0x%x) requested\n", fmt); 535 return -EINVAL; 536 } 537 538 static int mca_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio) 539 { 540 struct mca_cluster *cl = mca_dai_to_cluster(dai); 541 542 cl->bclk_ratio = ratio; 543 544 return 0; 545 } 546 547 static int mca_fe_get_port(struct snd_pcm_substream *substream) 548 { 549 struct snd_soc_pcm_runtime *fe = snd_soc_substream_to_rtd(substream); 550 struct snd_soc_pcm_runtime *be; 551 struct snd_soc_dpcm *dpcm; 552 553 be = NULL; 554 for_each_dpcm_be(fe, substream->stream, dpcm) { 555 be = dpcm->be; 556 break; 557 } 558 559 if (!be) 560 return -EINVAL; 561 562 return mca_dai_to_cluster(snd_soc_rtd_to_cpu(be, 0))->no; 563 } 564 565 static int mca_fe_hw_params(struct snd_pcm_substream *substream, 566 struct snd_pcm_hw_params *params, 567 struct snd_soc_dai *dai) 568 { 569 struct mca_cluster *cl = mca_dai_to_cluster(dai); 570 struct mca_data *mca = cl->host; 571 struct device *dev = mca->dev; 572 unsigned int samp_rate = params_rate(params); 573 bool is_tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 574 bool refine_tdm = false; 575 unsigned long bclk_ratio; 576 unsigned int tdm_slots, tdm_slot_width, tdm_mask; 577 u32 regval, pad; 578 int ret, port, nchans_ceiled; 579 580 if (!cl->tdm_slot_width) { 581 /* 582 * We were not given TDM settings from above, set initial 583 * guesses which will later be refined. 584 */ 585 tdm_slot_width = params_width(params); 586 tdm_slots = params_channels(params); 587 refine_tdm = true; 588 } else { 589 tdm_slot_width = cl->tdm_slot_width; 590 tdm_slots = cl->tdm_slots; 591 tdm_mask = is_tx ? cl->tdm_tx_mask : cl->tdm_rx_mask; 592 } 593 594 if (cl->bclk_ratio) 595 bclk_ratio = cl->bclk_ratio; 596 else 597 bclk_ratio = tdm_slot_width * tdm_slots; 598 599 if (refine_tdm) { 600 int nchannels = params_channels(params); 601 602 if (nchannels > 2) { 603 dev_err(dev, "missing TDM for stream with two or more channels\n"); 604 return -EINVAL; 605 } 606 607 if ((bclk_ratio % nchannels) != 0) { 608 dev_err(dev, "BCLK ratio (%ld) not divisible by no. of channels (%d)\n", 609 bclk_ratio, nchannels); 610 return -EINVAL; 611 } 612 613 tdm_slot_width = bclk_ratio / nchannels; 614 615 if (tdm_slot_width > 32 && nchannels == 1) 616 tdm_slot_width = 32; 617 618 if (tdm_slot_width < params_width(params)) { 619 dev_err(dev, "TDM slots too narrow (tdm=%d params=%d)\n", 620 tdm_slot_width, params_width(params)); 621 return -EINVAL; 622 } 623 624 tdm_mask = (1 << tdm_slots) - 1; 625 } 626 627 port = mca_fe_get_port(substream); 628 if (port < 0) 629 return port; 630 631 ret = mca_configure_serdes(cl, is_tx ? CLUSTER_TX_OFF : CLUSTER_RX_OFF, 632 tdm_mask, tdm_slots, params_channels(params), 633 tdm_slot_width, is_tx, port); 634 if (ret) 635 return ret; 636 637 pad = 32 - params_width(params); 638 639 /* 640 * TODO: Here the register semantics aren't clear. 641 */ 642 nchans_ceiled = min_t(int, params_channels(params), 4); 643 regval = FIELD_PREP(DMA_ADAPTER_NCHANS, nchans_ceiled) | 644 FIELD_PREP(DMA_ADAPTER_TX_NCHANS, 0x2) | 645 FIELD_PREP(DMA_ADAPTER_RX_NCHANS, 0x2) | 646 FIELD_PREP(DMA_ADAPTER_TX_LSB_PAD, pad) | 647 FIELD_PREP(DMA_ADAPTER_RX_MSB_PAD, pad); 648 649 #ifndef USE_RXB_FOR_CAPTURE 650 writel_relaxed(regval, mca->switch_base + REG_DMA_ADAPTER_A(cl->no)); 651 #else 652 if (is_tx) 653 writel_relaxed(regval, 654 mca->switch_base + REG_DMA_ADAPTER_A(cl->no)); 655 else 656 writel_relaxed(regval, 657 mca->switch_base + REG_DMA_ADAPTER_B(cl->no)); 658 #endif 659 660 if (!mca_fe_clocks_in_use(cl)) { 661 /* 662 * Set up FSYNC duty cycle as even as possible. 663 */ 664 writel_relaxed((bclk_ratio / 2) - 1, 665 cl->base + REG_SYNCGEN_HI_PERIOD); 666 writel_relaxed(((bclk_ratio + 1) / 2) - 1, 667 cl->base + REG_SYNCGEN_LO_PERIOD); 668 writel_relaxed(FIELD_PREP(MCLK_CONF_DIV, 0x1), 669 cl->base + REG_MCLK_CONF); 670 671 ret = clk_set_rate(cl->clk_parent, bclk_ratio * samp_rate); 672 if (ret) { 673 dev_err(mca->dev, "cluster %d: unable to set clock parent: %d\n", 674 cl->no, ret); 675 return ret; 676 } 677 } 678 679 return 0; 680 } 681 682 static const struct snd_soc_dai_ops mca_fe_ops = { 683 .set_fmt = mca_fe_set_fmt, 684 .set_bclk_ratio = mca_set_bclk_ratio, 685 .set_tdm_slot = mca_fe_set_tdm_slot, 686 .hw_params = mca_fe_hw_params, 687 .trigger = mca_fe_trigger, 688 }; 689 690 static bool mca_be_started(struct mca_cluster *cl) 691 { 692 int stream; 693 694 for_each_pcm_streams(stream) 695 if (cl->port_started[stream]) 696 return true; 697 return false; 698 } 699 700 static int mca_be_startup(struct snd_pcm_substream *substream, 701 struct snd_soc_dai *dai) 702 { 703 struct snd_soc_pcm_runtime *be = snd_soc_substream_to_rtd(substream); 704 struct snd_soc_pcm_runtime *fe; 705 struct mca_cluster *cl = mca_dai_to_cluster(dai); 706 struct mca_cluster *fe_cl; 707 struct mca_data *mca = cl->host; 708 struct snd_soc_dpcm *dpcm; 709 710 fe = NULL; 711 712 for_each_dpcm_fe(be, substream->stream, dpcm) { 713 if (fe && dpcm->fe != fe) { 714 dev_err(mca->dev, "many FE per one BE unsupported\n"); 715 return -EINVAL; 716 } 717 718 fe = dpcm->fe; 719 } 720 721 if (!fe) 722 return -EINVAL; 723 724 fe_cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(fe, 0)); 725 726 if (mca_be_started(cl)) { 727 /* 728 * Port is already started in the other direction. 729 * Make sure there isn't a conflict with another cluster 730 * driving the port. 731 */ 732 if (cl->port_driver != fe_cl->no) 733 return -EINVAL; 734 735 cl->port_started[substream->stream] = true; 736 return 0; 737 } 738 739 writel_relaxed(PORT_ENABLES_CLOCKS | PORT_ENABLES_TX_DATA, 740 cl->base + REG_PORT_ENABLES); 741 writel_relaxed(FIELD_PREP(PORT_CLOCK_SEL, fe_cl->no + 1), 742 cl->base + REG_PORT_CLOCK_SEL); 743 writel_relaxed(PORT_DATA_SEL_TXA(fe_cl->no), 744 cl->base + REG_PORT_DATA_SEL); 745 mutex_lock(&mca->port_mutex); 746 cl->port_driver = fe_cl->no; 747 mutex_unlock(&mca->port_mutex); 748 cl->port_started[substream->stream] = true; 749 750 return 0; 751 } 752 753 static void mca_be_shutdown(struct snd_pcm_substream *substream, 754 struct snd_soc_dai *dai) 755 { 756 struct mca_cluster *cl = mca_dai_to_cluster(dai); 757 struct mca_data *mca = cl->host; 758 759 cl->port_started[substream->stream] = false; 760 761 if (!mca_be_started(cl)) { 762 /* 763 * Were we the last direction to shutdown? 764 * Turn off the lights. 765 */ 766 writel_relaxed(0, cl->base + REG_PORT_ENABLES); 767 writel_relaxed(0, cl->base + REG_PORT_DATA_SEL); 768 mutex_lock(&mca->port_mutex); 769 cl->port_driver = -1; 770 mutex_unlock(&mca->port_mutex); 771 } 772 } 773 774 static const struct snd_soc_dai_ops mca_be_ops = { 775 .prepare = mca_be_prepare, 776 .hw_free = mca_be_hw_free, 777 .startup = mca_be_startup, 778 .shutdown = mca_be_shutdown, 779 }; 780 781 static int mca_set_runtime_hwparams(struct snd_soc_component *component, 782 struct snd_pcm_substream *substream, 783 struct dma_chan *chan) 784 { 785 struct device *dma_dev = chan->device->dev; 786 struct snd_dmaengine_dai_dma_data dma_data = {}; 787 int ret; 788 789 struct snd_pcm_hardware hw; 790 791 memset(&hw, 0, sizeof(hw)); 792 793 hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | 794 SNDRV_PCM_INFO_INTERLEAVED; 795 hw.periods_min = 2; 796 hw.periods_max = UINT_MAX; 797 hw.period_bytes_min = 256; 798 hw.period_bytes_max = dma_get_max_seg_size(dma_dev); 799 hw.buffer_bytes_max = SIZE_MAX; 800 hw.fifo_size = 16; 801 802 ret = snd_dmaengine_pcm_refine_runtime_hwparams(substream, &dma_data, 803 &hw, chan); 804 805 if (ret) 806 return ret; 807 808 return snd_soc_set_runtime_hwparams(substream, &hw); 809 } 810 811 static int mca_pcm_open(struct snd_soc_component *component, 812 struct snd_pcm_substream *substream) 813 { 814 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); 815 struct mca_cluster *cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(rtd, 0)); 816 struct dma_chan *chan = cl->dma_chans[substream->stream]; 817 int ret; 818 819 if (rtd->dai_link->no_pcm) 820 return 0; 821 822 ret = mca_set_runtime_hwparams(component, substream, chan); 823 if (ret) 824 return ret; 825 826 return snd_dmaengine_pcm_open(substream, chan); 827 } 828 829 static int mca_hw_params(struct snd_soc_component *component, 830 struct snd_pcm_substream *substream, 831 struct snd_pcm_hw_params *params) 832 { 833 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); 834 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream); 835 struct dma_slave_config slave_config; 836 int ret; 837 838 if (rtd->dai_link->no_pcm) 839 return 0; 840 841 memset(&slave_config, 0, sizeof(slave_config)); 842 ret = snd_hwparams_to_dma_slave_config(substream, params, 843 &slave_config); 844 if (ret < 0) 845 return ret; 846 847 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 848 slave_config.dst_port_window_size = 849 min_t(u32, params_channels(params), 4); 850 else 851 slave_config.src_port_window_size = 852 min_t(u32, params_channels(params), 4); 853 854 return dmaengine_slave_config(chan, &slave_config); 855 } 856 857 static int mca_close(struct snd_soc_component *component, 858 struct snd_pcm_substream *substream) 859 { 860 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); 861 862 if (rtd->dai_link->no_pcm) 863 return 0; 864 865 return snd_dmaengine_pcm_close(substream); 866 } 867 868 static int mca_trigger(struct snd_soc_component *component, 869 struct snd_pcm_substream *substream, int cmd) 870 { 871 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); 872 873 if (rtd->dai_link->no_pcm) 874 return 0; 875 876 /* 877 * Before we do the PCM trigger proper, insert an opportunity 878 * to reset the frontend's SERDES. 879 */ 880 mca_fe_early_trigger(substream, cmd, snd_soc_rtd_to_cpu(rtd, 0)); 881 882 return snd_dmaengine_pcm_trigger(substream, cmd); 883 } 884 885 static snd_pcm_uframes_t mca_pointer(struct snd_soc_component *component, 886 struct snd_pcm_substream *substream) 887 { 888 struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); 889 890 if (rtd->dai_link->no_pcm) 891 return -ENOTSUPP; 892 893 return snd_dmaengine_pcm_pointer(substream); 894 } 895 896 static struct dma_chan *mca_request_dma_channel(struct mca_cluster *cl, unsigned int stream) 897 { 898 bool is_tx = (stream == SNDRV_PCM_STREAM_PLAYBACK); 899 #ifndef USE_RXB_FOR_CAPTURE 900 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL, 901 is_tx ? "tx%da" : "rx%da", cl->no); 902 #else 903 char *name = devm_kasprintf(cl->host->dev, GFP_KERNEL, 904 is_tx ? "tx%da" : "rx%db", cl->no); 905 #endif 906 return of_dma_request_slave_channel(cl->host->dev->of_node, name); 907 908 } 909 910 static void mca_pcm_free(struct snd_soc_component *component, 911 struct snd_pcm *pcm) 912 { 913 struct snd_soc_pcm_runtime *rtd = snd_pcm_chip(pcm); 914 struct mca_cluster *cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(rtd, 0)); 915 unsigned int i; 916 917 if (rtd->dai_link->no_pcm) 918 return; 919 920 for_each_pcm_streams(i) { 921 struct snd_pcm_substream *substream = 922 rtd->pcm->streams[i].substream; 923 924 if (!substream || !cl->dma_chans[i]) 925 continue; 926 927 dma_release_channel(cl->dma_chans[i]); 928 cl->dma_chans[i] = NULL; 929 } 930 } 931 932 933 static int mca_pcm_new(struct snd_soc_component *component, 934 struct snd_soc_pcm_runtime *rtd) 935 { 936 struct mca_cluster *cl = mca_dai_to_cluster(snd_soc_rtd_to_cpu(rtd, 0)); 937 unsigned int i; 938 939 if (rtd->dai_link->no_pcm) 940 return 0; 941 942 for_each_pcm_streams(i) { 943 struct snd_pcm_substream *substream = 944 rtd->pcm->streams[i].substream; 945 struct dma_chan *chan; 946 947 if (!substream) 948 continue; 949 950 chan = mca_request_dma_channel(cl, i); 951 952 if (IS_ERR_OR_NULL(chan)) { 953 mca_pcm_free(component, rtd->pcm); 954 955 if (chan && PTR_ERR(chan) == -EPROBE_DEFER) 956 return PTR_ERR(chan); 957 958 dev_err(component->dev, "unable to obtain DMA channel (stream %d cluster %d): %pe\n", 959 i, cl->no, chan); 960 961 if (!chan) 962 return -EINVAL; 963 return PTR_ERR(chan); 964 } 965 966 cl->dma_chans[i] = chan; 967 snd_pcm_set_managed_buffer(substream, SNDRV_DMA_TYPE_DEV_IRAM, 968 chan->device->dev, 512 * 1024 * 6, 969 SIZE_MAX); 970 } 971 972 return 0; 973 } 974 975 static const struct snd_soc_component_driver mca_component = { 976 .name = "apple-mca", 977 .open = mca_pcm_open, 978 .close = mca_close, 979 .hw_params = mca_hw_params, 980 .trigger = mca_trigger, 981 .pointer = mca_pointer, 982 .pcm_construct = mca_pcm_new, 983 .pcm_destruct = mca_pcm_free, 984 }; 985 986 static void apple_mca_release(struct mca_data *mca) 987 { 988 int i; 989 990 for (i = 0; i < mca->nclusters; i++) { 991 struct mca_cluster *cl = &mca->clusters[i]; 992 993 if (!IS_ERR_OR_NULL(cl->clk_parent)) 994 clk_put(cl->clk_parent); 995 996 if (!IS_ERR_OR_NULL(cl->pd_dev)) 997 dev_pm_domain_detach(cl->pd_dev, true); 998 } 999 1000 if (mca->pd_link) 1001 device_link_del(mca->pd_link); 1002 1003 if (!IS_ERR_OR_NULL(mca->pd_dev)) 1004 dev_pm_domain_detach(mca->pd_dev, true); 1005 1006 reset_control_rearm(mca->rstc); 1007 } 1008 1009 static int apple_mca_probe(struct platform_device *pdev) 1010 { 1011 struct mca_data *mca; 1012 struct mca_cluster *clusters; 1013 struct snd_soc_dai_driver *dai_drivers; 1014 struct resource *res; 1015 void __iomem *base; 1016 int nclusters; 1017 int ret, i; 1018 1019 base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1020 if (IS_ERR(base)) 1021 return PTR_ERR(base); 1022 1023 if (resource_size(res) < CLUSTER_STRIDE) 1024 return -EINVAL; 1025 nclusters = (resource_size(res) - CLUSTER_STRIDE) / CLUSTER_STRIDE + 1; 1026 1027 mca = devm_kzalloc(&pdev->dev, struct_size(mca, clusters, nclusters), 1028 GFP_KERNEL); 1029 if (!mca) 1030 return -ENOMEM; 1031 mca->dev = &pdev->dev; 1032 mca->nclusters = nclusters; 1033 mutex_init(&mca->port_mutex); 1034 platform_set_drvdata(pdev, mca); 1035 clusters = mca->clusters; 1036 1037 mca->switch_base = 1038 devm_platform_ioremap_resource(pdev, 1); 1039 if (IS_ERR(mca->switch_base)) 1040 return PTR_ERR(mca->switch_base); 1041 1042 mca->rstc = devm_reset_control_get_optional_shared(&pdev->dev, NULL); 1043 if (IS_ERR(mca->rstc)) 1044 return PTR_ERR(mca->rstc); 1045 1046 dai_drivers = devm_kzalloc( 1047 &pdev->dev, sizeof(*dai_drivers) * 2 * nclusters, GFP_KERNEL); 1048 if (!dai_drivers) 1049 return -ENOMEM; 1050 1051 mca->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, 0); 1052 if (IS_ERR(mca->pd_dev)) 1053 return -EINVAL; 1054 1055 mca->pd_link = device_link_add(&pdev->dev, mca->pd_dev, 1056 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME | 1057 DL_FLAG_RPM_ACTIVE); 1058 if (!mca->pd_link) { 1059 ret = -EINVAL; 1060 /* Prevent an unbalanced reset rearm */ 1061 mca->rstc = NULL; 1062 goto err_release; 1063 } 1064 1065 reset_control_reset(mca->rstc); 1066 1067 for (i = 0; i < nclusters; i++) { 1068 struct mca_cluster *cl = &clusters[i]; 1069 struct snd_soc_dai_driver *fe = 1070 &dai_drivers[mca->nclusters + i]; 1071 struct snd_soc_dai_driver *be = &dai_drivers[i]; 1072 1073 cl->host = mca; 1074 cl->no = i; 1075 cl->base = base + CLUSTER_STRIDE * i; 1076 cl->port_driver = -1; 1077 cl->clk_parent = of_clk_get(pdev->dev.of_node, i); 1078 if (IS_ERR(cl->clk_parent)) { 1079 dev_err(&pdev->dev, "unable to obtain clock %d: %ld\n", 1080 i, PTR_ERR(cl->clk_parent)); 1081 ret = PTR_ERR(cl->clk_parent); 1082 goto err_release; 1083 } 1084 cl->pd_dev = dev_pm_domain_attach_by_id(&pdev->dev, i + 1); 1085 if (IS_ERR(cl->pd_dev)) { 1086 dev_err(&pdev->dev, 1087 "unable to obtain cluster %d PD: %ld\n", i, 1088 PTR_ERR(cl->pd_dev)); 1089 ret = PTR_ERR(cl->pd_dev); 1090 goto err_release; 1091 } 1092 1093 fe->id = i; 1094 fe->name = 1095 devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-pcm-%d", i); 1096 if (!fe->name) { 1097 ret = -ENOMEM; 1098 goto err_release; 1099 } 1100 fe->ops = &mca_fe_ops; 1101 fe->playback.channels_min = 1; 1102 fe->playback.channels_max = 32; 1103 fe->playback.rates = SNDRV_PCM_RATE_8000_192000; 1104 fe->playback.formats = APPLE_MCA_FMTBITS; 1105 fe->capture.channels_min = 1; 1106 fe->capture.channels_max = 32; 1107 fe->capture.rates = SNDRV_PCM_RATE_8000_192000; 1108 fe->capture.formats = APPLE_MCA_FMTBITS; 1109 fe->symmetric_rate = 1; 1110 1111 fe->playback.stream_name = 1112 devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d TX", i); 1113 fe->capture.stream_name = 1114 devm_kasprintf(&pdev->dev, GFP_KERNEL, "PCM%d RX", i); 1115 1116 if (!fe->playback.stream_name || !fe->capture.stream_name) { 1117 ret = -ENOMEM; 1118 goto err_release; 1119 } 1120 1121 be->id = i + nclusters; 1122 be->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "mca-i2s-%d", i); 1123 if (!be->name) { 1124 ret = -ENOMEM; 1125 goto err_release; 1126 } 1127 be->ops = &mca_be_ops; 1128 be->playback.channels_min = 1; 1129 be->playback.channels_max = 32; 1130 be->playback.rates = SNDRV_PCM_RATE_8000_192000; 1131 be->playback.formats = APPLE_MCA_FMTBITS; 1132 be->capture.channels_min = 1; 1133 be->capture.channels_max = 32; 1134 be->capture.rates = SNDRV_PCM_RATE_8000_192000; 1135 be->capture.formats = APPLE_MCA_FMTBITS; 1136 1137 be->playback.stream_name = 1138 devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d TX", i); 1139 be->capture.stream_name = 1140 devm_kasprintf(&pdev->dev, GFP_KERNEL, "I2S%d RX", i); 1141 if (!be->playback.stream_name || !be->capture.stream_name) { 1142 ret = -ENOMEM; 1143 goto err_release; 1144 } 1145 } 1146 1147 ret = snd_soc_register_component(&pdev->dev, &mca_component, 1148 dai_drivers, nclusters * 2); 1149 if (ret) { 1150 dev_err(&pdev->dev, "unable to register ASoC component: %d\n", 1151 ret); 1152 goto err_release; 1153 } 1154 1155 return 0; 1156 1157 err_release: 1158 apple_mca_release(mca); 1159 return ret; 1160 } 1161 1162 static void apple_mca_remove(struct platform_device *pdev) 1163 { 1164 struct mca_data *mca = platform_get_drvdata(pdev); 1165 1166 snd_soc_unregister_component(&pdev->dev); 1167 apple_mca_release(mca); 1168 } 1169 1170 static const struct of_device_id apple_mca_of_match[] = { 1171 { .compatible = "apple,mca", }, 1172 {} 1173 }; 1174 MODULE_DEVICE_TABLE(of, apple_mca_of_match); 1175 1176 static struct platform_driver apple_mca_driver = { 1177 .driver = { 1178 .name = "apple-mca", 1179 .of_match_table = apple_mca_of_match, 1180 }, 1181 .probe = apple_mca_probe, 1182 .remove_new = apple_mca_remove, 1183 }; 1184 module_platform_driver(apple_mca_driver); 1185 1186 MODULE_AUTHOR("Martin PoviĊĦer <povik+lin@cutebit.org>"); 1187 MODULE_DESCRIPTION("ASoC Apple MCA driver"); 1188 MODULE_LICENSE("GPL"); 1189
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.