1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // idma.c - I2S0 internal DMA driver 4 // 5 // Copyright (c) 2011 Samsung Electronics Co., Ltd. 6 // http://www.samsung.com 7 8 #include <linux/interrupt.h> 9 #include <linux/platform_device.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/slab.h> 12 #include <linux/module.h> 13 #include <sound/pcm.h> 14 #include <sound/pcm_params.h> 15 #include <sound/soc.h> 16 17 #include "i2s.h" 18 #include "idma.h" 19 #include "i2s-regs.h" 20 21 #define ST_RUNNING (1<<0) 22 #define ST_OPENED (1<<1) 23 24 static const struct snd_pcm_hardware idma_hardware = { 25 .info = SNDRV_PCM_INFO_INTERLEAVED | 26 SNDRV_PCM_INFO_BLOCK_TRANSFER | 27 SNDRV_PCM_INFO_MMAP | 28 SNDRV_PCM_INFO_MMAP_VALID | 29 SNDRV_PCM_INFO_PAUSE | 30 SNDRV_PCM_INFO_RESUME, 31 .buffer_bytes_max = MAX_IDMA_BUFFER, 32 .period_bytes_min = 128, 33 .period_bytes_max = MAX_IDMA_PERIOD, 34 .periods_min = 1, 35 .periods_max = 2, 36 }; 37 38 struct idma_ctrl { 39 spinlock_t lock; 40 int state; 41 dma_addr_t start; 42 dma_addr_t pos; 43 dma_addr_t end; 44 dma_addr_t period; 45 dma_addr_t periodsz; 46 void *token; 47 void (*cb)(void *dt, int bytes_xfer); 48 }; 49 50 static struct idma_info { 51 spinlock_t lock; 52 void __iomem *regs; 53 dma_addr_t lp_tx_addr; 54 } idma; 55 56 static int idma_irq; 57 58 static void idma_getpos(dma_addr_t *src) 59 { 60 *src = idma.lp_tx_addr + 61 (readl(idma.regs + I2STRNCNT) & 0xffffff) * 4; 62 } 63 64 static int idma_enqueue(struct snd_pcm_substream *substream) 65 { 66 struct snd_pcm_runtime *runtime = substream->runtime; 67 struct idma_ctrl *prtd = substream->runtime->private_data; 68 u32 val; 69 70 spin_lock(&prtd->lock); 71 prtd->token = (void *) substream; 72 spin_unlock(&prtd->lock); 73 74 /* Internal DMA Level0 Interrupt Address */ 75 val = idma.lp_tx_addr + prtd->periodsz; 76 writel(val, idma.regs + I2SLVL0ADDR); 77 78 /* Start address0 of I2S internal DMA operation. */ 79 val = idma.lp_tx_addr; 80 writel(val, idma.regs + I2SSTR0); 81 82 /* 83 * Transfer block size for I2S internal DMA. 84 * Should decide transfer size before start dma operation 85 */ 86 val = readl(idma.regs + I2SSIZE); 87 val &= ~(I2SSIZE_TRNMSK << I2SSIZE_SHIFT); 88 val |= (((runtime->dma_bytes >> 2) & 89 I2SSIZE_TRNMSK) << I2SSIZE_SHIFT); 90 writel(val, idma.regs + I2SSIZE); 91 92 val = readl(idma.regs + I2SAHB); 93 val |= AHB_INTENLVL0; 94 writel(val, idma.regs + I2SAHB); 95 96 return 0; 97 } 98 99 static void idma_setcallbk(struct snd_pcm_substream *substream, 100 void (*cb)(void *, int)) 101 { 102 struct idma_ctrl *prtd = substream->runtime->private_data; 103 104 spin_lock(&prtd->lock); 105 prtd->cb = cb; 106 spin_unlock(&prtd->lock); 107 } 108 109 static void idma_control(int op) 110 { 111 u32 val = readl(idma.regs + I2SAHB); 112 113 spin_lock(&idma.lock); 114 115 switch (op) { 116 case LPAM_DMA_START: 117 val |= (AHB_INTENLVL0 | AHB_DMAEN); 118 break; 119 case LPAM_DMA_STOP: 120 val &= ~(AHB_INTENLVL0 | AHB_DMAEN); 121 break; 122 default: 123 spin_unlock(&idma.lock); 124 return; 125 } 126 127 writel(val, idma.regs + I2SAHB); 128 spin_unlock(&idma.lock); 129 } 130 131 static void idma_done(void *id, int bytes_xfer) 132 { 133 struct snd_pcm_substream *substream = id; 134 struct idma_ctrl *prtd = substream->runtime->private_data; 135 136 if (prtd && (prtd->state & ST_RUNNING)) 137 snd_pcm_period_elapsed(substream); 138 } 139 140 static int idma_hw_params(struct snd_soc_component *component, 141 struct snd_pcm_substream *substream, 142 struct snd_pcm_hw_params *params) 143 { 144 struct snd_pcm_runtime *runtime = substream->runtime; 145 struct idma_ctrl *prtd = substream->runtime->private_data; 146 u32 mod = readl(idma.regs + I2SMOD); 147 u32 ahb = readl(idma.regs + I2SAHB); 148 149 ahb |= (AHB_DMARLD | AHB_INTMASK); 150 mod |= MOD_TXS_IDMA; 151 writel(ahb, idma.regs + I2SAHB); 152 writel(mod, idma.regs + I2SMOD); 153 154 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); 155 runtime->dma_bytes = params_buffer_bytes(params); 156 157 prtd->start = prtd->pos = runtime->dma_addr; 158 prtd->period = params_periods(params); 159 prtd->periodsz = params_period_bytes(params); 160 prtd->end = runtime->dma_addr + runtime->dma_bytes; 161 162 idma_setcallbk(substream, idma_done); 163 164 return 0; 165 } 166 167 static int idma_hw_free(struct snd_soc_component *component, 168 struct snd_pcm_substream *substream) 169 { 170 snd_pcm_set_runtime_buffer(substream, NULL); 171 172 return 0; 173 } 174 175 static int idma_prepare(struct snd_soc_component *component, 176 struct snd_pcm_substream *substream) 177 { 178 struct idma_ctrl *prtd = substream->runtime->private_data; 179 180 prtd->pos = prtd->start; 181 182 /* flush the DMA channel */ 183 idma_control(LPAM_DMA_STOP); 184 idma_enqueue(substream); 185 186 return 0; 187 } 188 189 static int idma_trigger(struct snd_soc_component *component, 190 struct snd_pcm_substream *substream, int cmd) 191 { 192 struct idma_ctrl *prtd = substream->runtime->private_data; 193 int ret = 0; 194 195 spin_lock(&prtd->lock); 196 197 switch (cmd) { 198 case SNDRV_PCM_TRIGGER_RESUME: 199 case SNDRV_PCM_TRIGGER_START: 200 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 201 prtd->state |= ST_RUNNING; 202 idma_control(LPAM_DMA_START); 203 break; 204 205 case SNDRV_PCM_TRIGGER_SUSPEND: 206 case SNDRV_PCM_TRIGGER_STOP: 207 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 208 prtd->state &= ~ST_RUNNING; 209 idma_control(LPAM_DMA_STOP); 210 break; 211 212 default: 213 ret = -EINVAL; 214 break; 215 } 216 217 spin_unlock(&prtd->lock); 218 219 return ret; 220 } 221 222 static snd_pcm_uframes_t 223 idma_pointer(struct snd_soc_component *component, 224 struct snd_pcm_substream *substream) 225 { 226 struct snd_pcm_runtime *runtime = substream->runtime; 227 struct idma_ctrl *prtd = runtime->private_data; 228 dma_addr_t src; 229 unsigned long res; 230 231 spin_lock(&prtd->lock); 232 233 idma_getpos(&src); 234 res = src - prtd->start; 235 236 spin_unlock(&prtd->lock); 237 238 return bytes_to_frames(substream->runtime, res); 239 } 240 241 static int idma_mmap(struct snd_soc_component *component, 242 struct snd_pcm_substream *substream, 243 struct vm_area_struct *vma) 244 { 245 struct snd_pcm_runtime *runtime = substream->runtime; 246 unsigned long size, offset; 247 248 /* From snd_pcm_lib_mmap_iomem */ 249 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 250 size = vma->vm_end - vma->vm_start; 251 offset = vma->vm_pgoff << PAGE_SHIFT; 252 return io_remap_pfn_range(vma, vma->vm_start, 253 (runtime->dma_addr + offset) >> PAGE_SHIFT, 254 size, vma->vm_page_prot); 255 } 256 257 static irqreturn_t iis_irq(int irqno, void *dev_id) 258 { 259 struct idma_ctrl *prtd = (struct idma_ctrl *)dev_id; 260 u32 iisahb, val, addr; 261 262 iisahb = readl(idma.regs + I2SAHB); 263 264 val = (iisahb & AHB_LVL0INT) ? AHB_CLRLVL0INT : 0; 265 266 if (val) { 267 iisahb |= val; 268 writel(iisahb, idma.regs + I2SAHB); 269 270 addr = readl(idma.regs + I2SLVL0ADDR) - idma.lp_tx_addr; 271 addr += prtd->periodsz; 272 addr %= (u32)(prtd->end - prtd->start); 273 addr += idma.lp_tx_addr; 274 275 writel(addr, idma.regs + I2SLVL0ADDR); 276 277 if (prtd->cb) 278 prtd->cb(prtd->token, prtd->period); 279 } 280 281 return IRQ_HANDLED; 282 } 283 284 static int idma_open(struct snd_soc_component *component, 285 struct snd_pcm_substream *substream) 286 { 287 struct snd_pcm_runtime *runtime = substream->runtime; 288 struct idma_ctrl *prtd; 289 int ret; 290 291 snd_soc_set_runtime_hwparams(substream, &idma_hardware); 292 293 prtd = kzalloc(sizeof(struct idma_ctrl), GFP_KERNEL); 294 if (prtd == NULL) 295 return -ENOMEM; 296 297 ret = request_irq(idma_irq, iis_irq, 0, "i2s", prtd); 298 if (ret < 0) { 299 pr_err("fail to claim i2s irq , ret = %d\n", ret); 300 kfree(prtd); 301 return ret; 302 } 303 304 spin_lock_init(&prtd->lock); 305 306 runtime->private_data = prtd; 307 308 return 0; 309 } 310 311 static int idma_close(struct snd_soc_component *component, 312 struct snd_pcm_substream *substream) 313 { 314 struct snd_pcm_runtime *runtime = substream->runtime; 315 struct idma_ctrl *prtd = runtime->private_data; 316 317 free_irq(idma_irq, prtd); 318 319 if (!prtd) 320 pr_err("idma_close called with prtd == NULL\n"); 321 322 kfree(prtd); 323 324 return 0; 325 } 326 327 static void idma_free(struct snd_soc_component *component, 328 struct snd_pcm *pcm) 329 { 330 struct snd_pcm_substream *substream; 331 struct snd_dma_buffer *buf; 332 333 substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; 334 if (!substream) 335 return; 336 337 buf = &substream->dma_buffer; 338 if (!buf->area) 339 return; 340 341 iounmap((void __iomem *)buf->area); 342 343 buf->area = NULL; 344 buf->addr = 0; 345 } 346 347 static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream) 348 { 349 struct snd_pcm_substream *substream = pcm->streams[stream].substream; 350 struct snd_dma_buffer *buf = &substream->dma_buffer; 351 352 buf->dev.dev = pcm->card->dev; 353 buf->private_data = NULL; 354 355 /* Assign PCM buffer pointers */ 356 buf->dev.type = SNDRV_DMA_TYPE_CONTINUOUS; 357 buf->addr = idma.lp_tx_addr; 358 buf->bytes = idma_hardware.buffer_bytes_max; 359 buf->area = (unsigned char * __force)ioremap(buf->addr, buf->bytes); 360 if (!buf->area) 361 return -ENOMEM; 362 363 return 0; 364 } 365 366 static int idma_new(struct snd_soc_component *component, 367 struct snd_soc_pcm_runtime *rtd) 368 { 369 struct snd_card *card = rtd->card->snd_card; 370 struct snd_pcm *pcm = rtd->pcm; 371 int ret; 372 373 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32)); 374 if (ret) 375 return ret; 376 377 if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { 378 ret = preallocate_idma_buffer(pcm, 379 SNDRV_PCM_STREAM_PLAYBACK); 380 } 381 382 return ret; 383 } 384 385 void idma_reg_addr_init(void __iomem *regs, dma_addr_t addr) 386 { 387 spin_lock_init(&idma.lock); 388 idma.regs = regs; 389 idma.lp_tx_addr = addr; 390 } 391 EXPORT_SYMBOL_GPL(idma_reg_addr_init); 392 393 static const struct snd_soc_component_driver asoc_idma_platform = { 394 .open = idma_open, 395 .close = idma_close, 396 .trigger = idma_trigger, 397 .pointer = idma_pointer, 398 .mmap = idma_mmap, 399 .hw_params = idma_hw_params, 400 .hw_free = idma_hw_free, 401 .prepare = idma_prepare, 402 .pcm_construct = idma_new, 403 .pcm_destruct = idma_free, 404 }; 405 406 static int asoc_idma_platform_probe(struct platform_device *pdev) 407 { 408 idma_irq = platform_get_irq(pdev, 0); 409 if (idma_irq < 0) 410 return idma_irq; 411 412 return devm_snd_soc_register_component(&pdev->dev, &asoc_idma_platform, 413 NULL, 0); 414 } 415 416 static struct platform_driver asoc_idma_driver = { 417 .driver = { 418 .name = "samsung-idma", 419 }, 420 421 .probe = asoc_idma_platform_probe, 422 }; 423 424 module_platform_driver(asoc_idma_driver); 425 426 MODULE_AUTHOR("Jaswinder Singh, <jassisinghbrar@gmail.com>"); 427 MODULE_DESCRIPTION("Samsung ASoC IDMA Driver"); 428 MODULE_LICENSE("GPL"); 429
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.