1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 // 2 // 3 // Renesas RZ/G2L ASoC Serial Sound Interface 3 // Renesas RZ/G2L ASoC Serial Sound Interface (SSIF-2) Driver 4 // 4 // 5 // Copyright (C) 2021 Renesas Electronics Corp 5 // Copyright (C) 2021 Renesas Electronics Corp. 6 // Copyright (C) 2019 Chris Brandt. 6 // Copyright (C) 2019 Chris Brandt. 7 // 7 // 8 8 9 #include <linux/clk.h> 9 #include <linux/clk.h> 10 #include <linux/dmaengine.h> 10 #include <linux/dmaengine.h> 11 #include <linux/io.h> 11 #include <linux/io.h> 12 #include <linux/module.h> 12 #include <linux/module.h> >> 13 #include <linux/of_device.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/pm_runtime.h> 14 #include <linux/reset.h> 15 #include <linux/reset.h> 15 #include <sound/soc.h> 16 #include <sound/soc.h> 16 17 17 /* REGISTER OFFSET */ 18 /* REGISTER OFFSET */ 18 #define SSICR 0x000 19 #define SSICR 0x000 19 #define SSISR 0x004 20 #define SSISR 0x004 20 #define SSIFCR 0x010 21 #define SSIFCR 0x010 21 #define SSIFSR 0x014 22 #define SSIFSR 0x014 22 #define SSIFTDR 0x018 23 #define SSIFTDR 0x018 23 #define SSIFRDR 0x01c 24 #define SSIFRDR 0x01c 24 #define SSIOFR 0x020 25 #define SSIOFR 0x020 25 #define SSISCR 0x024 26 #define SSISCR 0x024 26 27 27 /* SSI REGISTER BITS */ 28 /* SSI REGISTER BITS */ 28 #define SSICR_DWL(x) (((x) & 0x7) < 29 #define SSICR_DWL(x) (((x) & 0x7) << 19) 29 #define SSICR_SWL(x) (((x) & 0x7) < 30 #define SSICR_SWL(x) (((x) & 0x7) << 16) 30 31 31 #define SSICR_CKS BIT(30) 32 #define SSICR_CKS BIT(30) 32 #define SSICR_TUIEN BIT(29) 33 #define SSICR_TUIEN BIT(29) 33 #define SSICR_TOIEN BIT(28) 34 #define SSICR_TOIEN BIT(28) 34 #define SSICR_RUIEN BIT(27) 35 #define SSICR_RUIEN BIT(27) 35 #define SSICR_ROIEN BIT(26) 36 #define SSICR_ROIEN BIT(26) 36 #define SSICR_MST BIT(14) 37 #define SSICR_MST BIT(14) 37 #define SSICR_BCKP BIT(13) 38 #define SSICR_BCKP BIT(13) 38 #define SSICR_LRCKP BIT(12) 39 #define SSICR_LRCKP BIT(12) 39 #define SSICR_CKDV(x) (((x) & 0xf) < 40 #define SSICR_CKDV(x) (((x) & 0xf) << 4) 40 #define SSICR_TEN BIT(1) 41 #define SSICR_TEN BIT(1) 41 #define SSICR_REN BIT(0) 42 #define SSICR_REN BIT(0) 42 43 43 #define SSISR_TUIRQ BIT(29) 44 #define SSISR_TUIRQ BIT(29) 44 #define SSISR_TOIRQ BIT(28) 45 #define SSISR_TOIRQ BIT(28) 45 #define SSISR_RUIRQ BIT(27) 46 #define SSISR_RUIRQ BIT(27) 46 #define SSISR_ROIRQ BIT(26) 47 #define SSISR_ROIRQ BIT(26) 47 #define SSISR_IIRQ BIT(25) 48 #define SSISR_IIRQ BIT(25) 48 49 49 #define SSIFCR_AUCKE BIT(31) 50 #define SSIFCR_AUCKE BIT(31) 50 #define SSIFCR_SSIRST BIT(16) 51 #define SSIFCR_SSIRST BIT(16) 51 #define SSIFCR_TIE BIT(3) 52 #define SSIFCR_TIE BIT(3) 52 #define SSIFCR_RIE BIT(2) 53 #define SSIFCR_RIE BIT(2) 53 #define SSIFCR_TFRST BIT(1) 54 #define SSIFCR_TFRST BIT(1) 54 #define SSIFCR_RFRST BIT(0) 55 #define SSIFCR_RFRST BIT(0) 55 #define SSIFCR_FIFO_RST (SSIFCR_TFRST << 56 56 57 #define SSIFSR_TDC_MASK 0x3f 57 #define SSIFSR_TDC_MASK 0x3f 58 #define SSIFSR_TDC_SHIFT 24 58 #define SSIFSR_TDC_SHIFT 24 59 #define SSIFSR_RDC_MASK 0x3f 59 #define SSIFSR_RDC_MASK 0x3f 60 #define SSIFSR_RDC_SHIFT 8 60 #define SSIFSR_RDC_SHIFT 8 61 61 62 #define SSIFSR_TDE BIT(16) 62 #define SSIFSR_TDE BIT(16) 63 #define SSIFSR_RDF BIT(0) 63 #define SSIFSR_RDF BIT(0) 64 64 65 #define SSIOFR_LRCONT BIT(8) 65 #define SSIOFR_LRCONT BIT(8) 66 66 67 #define SSISCR_TDES(x) (((x) & 0x1f) 67 #define SSISCR_TDES(x) (((x) & 0x1f) << 8) 68 #define SSISCR_RDFS(x) (((x) & 0x1f) 68 #define SSISCR_RDFS(x) (((x) & 0x1f) << 0) 69 69 70 /* Pre allocated buffers sizes */ 70 /* Pre allocated buffers sizes */ 71 #define PREALLOC_BUFFER (SZ_32K) 71 #define PREALLOC_BUFFER (SZ_32K) 72 #define PREALLOC_BUFFER_MAX (SZ_32K) 72 #define PREALLOC_BUFFER_MAX (SZ_32K) 73 73 74 #define SSI_RATES SNDRV_PCM_RATE 74 #define SSI_RATES SNDRV_PCM_RATE_8000_48000 /* 8k-44.1kHz */ 75 #define SSI_FMTS SNDRV_PCM_FMTB 75 #define SSI_FMTS SNDRV_PCM_FMTBIT_S16_LE 76 #define SSI_CHAN_MIN 2 76 #define SSI_CHAN_MIN 2 77 #define SSI_CHAN_MAX 2 77 #define SSI_CHAN_MAX 2 78 #define SSI_FIFO_DEPTH 32 78 #define SSI_FIFO_DEPTH 32 79 79 80 struct rz_ssi_priv; 80 struct rz_ssi_priv; 81 81 82 struct rz_ssi_stream { 82 struct rz_ssi_stream { 83 struct rz_ssi_priv *priv; 83 struct rz_ssi_priv *priv; 84 struct snd_pcm_substream *substream; 84 struct snd_pcm_substream *substream; 85 int fifo_sample_size; /* sample capa 85 int fifo_sample_size; /* sample capacity of SSI FIFO */ 86 int dma_buffer_pos; /* The address 86 int dma_buffer_pos; /* The address for the next DMA descriptor */ 87 int period_counter; /* for keeping 87 int period_counter; /* for keeping track of periods transferred */ 88 int sample_width; 88 int sample_width; 89 int buffer_pos; /* current fra 89 int buffer_pos; /* current frame position in the buffer */ 90 int running; /* 0=stopped, 90 int running; /* 0=stopped, 1=running */ 91 91 92 int uerr_num; 92 int uerr_num; 93 int oerr_num; 93 int oerr_num; 94 94 95 struct dma_chan *dma_ch; 95 struct dma_chan *dma_ch; 96 96 97 int (*transfer)(struct rz_ssi_priv *ss 97 int (*transfer)(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm); 98 }; 98 }; 99 99 100 struct rz_ssi_priv { 100 struct rz_ssi_priv { 101 void __iomem *base; 101 void __iomem *base; 102 struct platform_device *pdev; 102 struct platform_device *pdev; 103 struct reset_control *rstc; 103 struct reset_control *rstc; 104 struct device *dev; 104 struct device *dev; 105 struct clk *sfr_clk; 105 struct clk *sfr_clk; 106 struct clk *clk; 106 struct clk *clk; 107 107 108 phys_addr_t phys; 108 phys_addr_t phys; 109 int irq_int; 109 int irq_int; 110 int irq_tx; 110 int irq_tx; 111 int irq_rx; 111 int irq_rx; 112 int irq_rt; << 113 112 114 spinlock_t lock; 113 spinlock_t lock; 115 114 116 /* 115 /* 117 * The SSI supports full-duplex transm 116 * The SSI supports full-duplex transmission and reception. 118 * However, if an error occurs, channe 117 * However, if an error occurs, channel reset (both transmission 119 * and reception reset) is required. 118 * and reception reset) is required. 120 * So it is better to use as half-dupl 119 * So it is better to use as half-duplex (playing and recording 121 * should be done on separate channels 120 * should be done on separate channels). 122 */ 121 */ 123 struct rz_ssi_stream playback; 122 struct rz_ssi_stream playback; 124 struct rz_ssi_stream capture; 123 struct rz_ssi_stream capture; 125 124 126 /* clock */ 125 /* clock */ 127 unsigned long audio_mck; 126 unsigned long audio_mck; 128 unsigned long audio_clk_1; 127 unsigned long audio_clk_1; 129 unsigned long audio_clk_2; 128 unsigned long audio_clk_2; 130 129 131 bool lrckp_fsync_fall; /* LR clock po 130 bool lrckp_fsync_fall; /* LR clock polarity (SSICR.LRCKP) */ 132 bool bckp_rise; /* Bit clock polarity 131 bool bckp_rise; /* Bit clock polarity (SSICR.BCKP) */ 133 bool dma_rt; 132 bool dma_rt; 134 << 135 /* Full duplex communication support * << 136 struct { << 137 unsigned int rate; << 138 unsigned int channels; << 139 unsigned int sample_width; << 140 unsigned int sample_bits; << 141 } hw_params_cache; << 142 }; 133 }; 143 134 144 static void rz_ssi_dma_complete(void *data); 135 static void rz_ssi_dma_complete(void *data); 145 136 146 static void rz_ssi_reg_writel(struct rz_ssi_pr 137 static void rz_ssi_reg_writel(struct rz_ssi_priv *priv, uint reg, u32 data) 147 { 138 { 148 writel(data, (priv->base + reg)); 139 writel(data, (priv->base + reg)); 149 } 140 } 150 141 151 static u32 rz_ssi_reg_readl(struct rz_ssi_priv 142 static u32 rz_ssi_reg_readl(struct rz_ssi_priv *priv, uint reg) 152 { 143 { 153 return readl(priv->base + reg); 144 return readl(priv->base + reg); 154 } 145 } 155 146 156 static void rz_ssi_reg_mask_setl(struct rz_ssi 147 static void rz_ssi_reg_mask_setl(struct rz_ssi_priv *priv, uint reg, 157 u32 bclr, u32 148 u32 bclr, u32 bset) 158 { 149 { 159 u32 val; 150 u32 val; 160 151 161 val = readl(priv->base + reg); 152 val = readl(priv->base + reg); 162 val = (val & ~bclr) | bset; 153 val = (val & ~bclr) | bset; 163 writel(val, (priv->base + reg)); 154 writel(val, (priv->base + reg)); 164 } 155 } 165 156 166 static inline struct snd_soc_dai * 157 static inline struct snd_soc_dai * 167 rz_ssi_get_dai(struct snd_pcm_substream *subst 158 rz_ssi_get_dai(struct snd_pcm_substream *substream) 168 { 159 { 169 struct snd_soc_pcm_runtime *rtd = snd_ !! 160 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); 170 161 171 return snd_soc_rtd_to_cpu(rtd, 0); !! 162 return asoc_rtd_to_cpu(rtd, 0); 172 } 163 } 173 164 174 static inline bool rz_ssi_stream_is_play(struc 165 static inline bool rz_ssi_stream_is_play(struct rz_ssi_priv *ssi, 175 struc 166 struct snd_pcm_substream *substream) 176 { 167 { 177 return substream->stream == SNDRV_PCM_ 168 return substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 178 } 169 } 179 170 180 static inline struct rz_ssi_stream * 171 static inline struct rz_ssi_stream * 181 rz_ssi_stream_get(struct rz_ssi_priv *ssi, str 172 rz_ssi_stream_get(struct rz_ssi_priv *ssi, struct snd_pcm_substream *substream) 182 { 173 { 183 struct rz_ssi_stream *stream = &ssi->p 174 struct rz_ssi_stream *stream = &ssi->playback; 184 175 185 if (substream->stream != SNDRV_PCM_STR 176 if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK) 186 stream = &ssi->capture; 177 stream = &ssi->capture; 187 178 188 return stream; 179 return stream; 189 } 180 } 190 181 191 static inline bool rz_ssi_is_dma_enabled(struc 182 static inline bool rz_ssi_is_dma_enabled(struct rz_ssi_priv *ssi) 192 { 183 { 193 return (ssi->playback.dma_ch && (ssi-> 184 return (ssi->playback.dma_ch && (ssi->dma_rt || ssi->capture.dma_ch)); 194 } 185 } 195 186 196 static void rz_ssi_set_substream(struct rz_ssi 187 static void rz_ssi_set_substream(struct rz_ssi_stream *strm, 197 struct snd_pc 188 struct snd_pcm_substream *substream) 198 { 189 { 199 struct rz_ssi_priv *ssi = strm->priv; 190 struct rz_ssi_priv *ssi = strm->priv; 200 unsigned long flags; 191 unsigned long flags; 201 192 202 spin_lock_irqsave(&ssi->lock, flags); 193 spin_lock_irqsave(&ssi->lock, flags); 203 strm->substream = substream; 194 strm->substream = substream; 204 spin_unlock_irqrestore(&ssi->lock, fla 195 spin_unlock_irqrestore(&ssi->lock, flags); 205 } 196 } 206 197 207 static bool rz_ssi_stream_is_valid(struct rz_s 198 static bool rz_ssi_stream_is_valid(struct rz_ssi_priv *ssi, 208 struct rz_s 199 struct rz_ssi_stream *strm) 209 { 200 { 210 unsigned long flags; 201 unsigned long flags; 211 bool ret; 202 bool ret; 212 203 213 spin_lock_irqsave(&ssi->lock, flags); 204 spin_lock_irqsave(&ssi->lock, flags); 214 ret = strm->substream && strm->substre 205 ret = strm->substream && strm->substream->runtime; 215 spin_unlock_irqrestore(&ssi->lock, fla 206 spin_unlock_irqrestore(&ssi->lock, flags); 216 207 217 return ret; 208 return ret; 218 } 209 } 219 210 220 static inline bool rz_ssi_is_stream_running(st << 221 { << 222 return strm->substream && strm->runnin << 223 } << 224 << 225 static void rz_ssi_stream_init(struct rz_ssi_s 211 static void rz_ssi_stream_init(struct rz_ssi_stream *strm, 226 struct snd_pcm_ 212 struct snd_pcm_substream *substream) 227 { 213 { 228 struct snd_pcm_runtime *runtime = subs 214 struct snd_pcm_runtime *runtime = substream->runtime; 229 215 230 rz_ssi_set_substream(strm, substream); 216 rz_ssi_set_substream(strm, substream); 231 strm->sample_width = samples_to_bytes( 217 strm->sample_width = samples_to_bytes(runtime, 1); 232 strm->dma_buffer_pos = 0; 218 strm->dma_buffer_pos = 0; 233 strm->period_counter = 0; 219 strm->period_counter = 0; 234 strm->buffer_pos = 0; 220 strm->buffer_pos = 0; 235 221 236 strm->oerr_num = 0; 222 strm->oerr_num = 0; 237 strm->uerr_num = 0; 223 strm->uerr_num = 0; 238 strm->running = 0; 224 strm->running = 0; 239 225 240 /* fifo init */ 226 /* fifo init */ 241 strm->fifo_sample_size = SSI_FIFO_DEPT 227 strm->fifo_sample_size = SSI_FIFO_DEPTH; 242 } 228 } 243 229 244 static void rz_ssi_stream_quit(struct rz_ssi_p 230 static void rz_ssi_stream_quit(struct rz_ssi_priv *ssi, 245 struct rz_ssi_s 231 struct rz_ssi_stream *strm) 246 { 232 { 247 struct snd_soc_dai *dai = rz_ssi_get_d 233 struct snd_soc_dai *dai = rz_ssi_get_dai(strm->substream); 248 234 249 rz_ssi_set_substream(strm, NULL); 235 rz_ssi_set_substream(strm, NULL); 250 236 251 if (strm->oerr_num > 0) 237 if (strm->oerr_num > 0) 252 dev_info(dai->dev, "overrun = 238 dev_info(dai->dev, "overrun = %d\n", strm->oerr_num); 253 239 254 if (strm->uerr_num > 0) 240 if (strm->uerr_num > 0) 255 dev_info(dai->dev, "underrun = 241 dev_info(dai->dev, "underrun = %d\n", strm->uerr_num); 256 } 242 } 257 243 258 static int rz_ssi_clk_setup(struct rz_ssi_priv 244 static int rz_ssi_clk_setup(struct rz_ssi_priv *ssi, unsigned int rate, 259 unsigned int chann 245 unsigned int channels) 260 { 246 { 261 static s8 ckdv[16] = { 1, 2, 4, 8, 247 static s8 ckdv[16] = { 1, 2, 4, 8, 16, 32, 64, 128, 262 6, 12, 24, 48, 248 6, 12, 24, 48, 96, -1, -1, -1 }; 263 unsigned int channel_bits = 32; /* Sys 249 unsigned int channel_bits = 32; /* System Word Length */ 264 unsigned long bclk_rate = rate * chann 250 unsigned long bclk_rate = rate * channels * channel_bits; 265 unsigned int div; 251 unsigned int div; 266 unsigned int i; 252 unsigned int i; 267 u32 ssicr = 0; 253 u32 ssicr = 0; 268 u32 clk_ckdv; 254 u32 clk_ckdv; 269 255 270 /* Clear AUCKE so we can set MST */ 256 /* Clear AUCKE so we can set MST */ 271 rz_ssi_reg_writel(ssi, SSIFCR, 0); 257 rz_ssi_reg_writel(ssi, SSIFCR, 0); 272 258 273 /* Continue to output LRCK pin even wh 259 /* Continue to output LRCK pin even when idle */ 274 rz_ssi_reg_writel(ssi, SSIOFR, SSIOFR_ 260 rz_ssi_reg_writel(ssi, SSIOFR, SSIOFR_LRCONT); 275 if (ssi->audio_clk_1 && ssi->audio_clk 261 if (ssi->audio_clk_1 && ssi->audio_clk_2) { 276 if (ssi->audio_clk_1 % bclk_ra 262 if (ssi->audio_clk_1 % bclk_rate) 277 ssi->audio_mck = ssi-> 263 ssi->audio_mck = ssi->audio_clk_2; 278 else 264 else 279 ssi->audio_mck = ssi-> 265 ssi->audio_mck = ssi->audio_clk_1; 280 } 266 } 281 267 282 /* Clock setting */ 268 /* Clock setting */ 283 ssicr |= SSICR_MST; 269 ssicr |= SSICR_MST; 284 if (ssi->audio_mck == ssi->audio_clk_1 270 if (ssi->audio_mck == ssi->audio_clk_1) 285 ssicr |= SSICR_CKS; 271 ssicr |= SSICR_CKS; 286 if (ssi->bckp_rise) 272 if (ssi->bckp_rise) 287 ssicr |= SSICR_BCKP; 273 ssicr |= SSICR_BCKP; 288 if (ssi->lrckp_fsync_fall) 274 if (ssi->lrckp_fsync_fall) 289 ssicr |= SSICR_LRCKP; 275 ssicr |= SSICR_LRCKP; 290 276 291 /* Determine the clock divider */ 277 /* Determine the clock divider */ 292 clk_ckdv = 0; 278 clk_ckdv = 0; 293 div = ssi->audio_mck / bclk_rate; 279 div = ssi->audio_mck / bclk_rate; 294 /* try to find an match */ 280 /* try to find an match */ 295 for (i = 0; i < ARRAY_SIZE(ckdv); i++) 281 for (i = 0; i < ARRAY_SIZE(ckdv); i++) { 296 if (ckdv[i] == div) { 282 if (ckdv[i] == div) { 297 clk_ckdv = i; 283 clk_ckdv = i; 298 break; 284 break; 299 } 285 } 300 } 286 } 301 287 302 if (i == ARRAY_SIZE(ckdv)) { 288 if (i == ARRAY_SIZE(ckdv)) { 303 dev_err(ssi->dev, "Rate not di 289 dev_err(ssi->dev, "Rate not divisible by audio clock source\n"); 304 return -EINVAL; 290 return -EINVAL; 305 } 291 } 306 292 307 /* 293 /* 308 * DWL: Data Word Length = 16 bits 294 * DWL: Data Word Length = 16 bits 309 * SWL: System Word Length = 32 bits 295 * SWL: System Word Length = 32 bits 310 */ 296 */ 311 ssicr |= SSICR_CKDV(clk_ckdv); 297 ssicr |= SSICR_CKDV(clk_ckdv); 312 ssicr |= SSICR_DWL(1) | SSICR_SWL(3); 298 ssicr |= SSICR_DWL(1) | SSICR_SWL(3); 313 rz_ssi_reg_writel(ssi, SSICR, ssicr); 299 rz_ssi_reg_writel(ssi, SSICR, ssicr); 314 rz_ssi_reg_writel(ssi, SSIFCR, 300 rz_ssi_reg_writel(ssi, SSIFCR, 315 (SSIFCR_AUCKE | SSIF 301 (SSIFCR_AUCKE | SSIFCR_TFRST | SSIFCR_RFRST)); 316 302 317 return 0; 303 return 0; 318 } 304 } 319 305 320 static void rz_ssi_set_idle(struct rz_ssi_priv << 321 { << 322 int timeout; << 323 << 324 /* Disable irqs */ << 325 rz_ssi_reg_mask_setl(ssi, SSICR, SSICR << 326 SSICR_RUIEN | SSI << 327 rz_ssi_reg_mask_setl(ssi, SSIFCR, SSIF << 328 << 329 /* Clear all error flags */ << 330 rz_ssi_reg_mask_setl(ssi, SSISR, << 331 (SSISR_TOIRQ | SS << 332 SSISR_RUIRQ), 0) << 333 << 334 /* Wait for idle */ << 335 timeout = 100; << 336 while (--timeout) { << 337 if (rz_ssi_reg_readl(ssi, SSIS << 338 break; << 339 udelay(1); << 340 } << 341 << 342 if (!timeout) << 343 dev_info(ssi->dev, "timeout wa << 344 << 345 /* Hold FIFOs in reset */ << 346 rz_ssi_reg_mask_setl(ssi, SSIFCR, 0, << 347 SSIFCR_TFRST | SS << 348 } << 349 << 350 static int rz_ssi_start(struct rz_ssi_priv *ss 306 static int rz_ssi_start(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) 351 { 307 { 352 bool is_play = rz_ssi_stream_is_play(s 308 bool is_play = rz_ssi_stream_is_play(ssi, strm->substream); 353 bool is_full_duplex; << 354 u32 ssicr, ssifcr; 309 u32 ssicr, ssifcr; 355 310 356 is_full_duplex = rz_ssi_is_stream_runn << 357 rz_ssi_is_stream_running(&ssi- << 358 ssicr = rz_ssi_reg_readl(ssi, SSICR); 311 ssicr = rz_ssi_reg_readl(ssi, SSICR); 359 ssifcr = rz_ssi_reg_readl(ssi, SSIFCR) !! 312 ssifcr = rz_ssi_reg_readl(ssi, SSIFCR) & ~0xF; 360 if (!is_full_duplex) { << 361 ssifcr &= ~0xF; << 362 } else { << 363 rz_ssi_reg_mask_setl(ssi, SSIC << 364 rz_ssi_set_idle(ssi); << 365 ssifcr &= ~SSIFCR_FIFO_RST; << 366 } << 367 313 368 /* FIFO interrupt thresholds */ 314 /* FIFO interrupt thresholds */ 369 if (rz_ssi_is_dma_enabled(ssi)) 315 if (rz_ssi_is_dma_enabled(ssi)) 370 rz_ssi_reg_writel(ssi, SSISCR, 316 rz_ssi_reg_writel(ssi, SSISCR, 0); 371 else 317 else 372 rz_ssi_reg_writel(ssi, SSISCR, 318 rz_ssi_reg_writel(ssi, SSISCR, 373 SSISCR_TDES( 319 SSISCR_TDES(strm->fifo_sample_size / 2 - 1) | 374 SSISCR_RDFS( 320 SSISCR_RDFS(0)); 375 321 376 /* enable IRQ */ 322 /* enable IRQ */ 377 if (is_play) { 323 if (is_play) { 378 ssicr |= SSICR_TUIEN | SSICR_T 324 ssicr |= SSICR_TUIEN | SSICR_TOIEN; 379 ssifcr |= SSIFCR_TIE; !! 325 ssifcr |= SSIFCR_TIE | SSIFCR_RFRST; 380 if (!is_full_duplex) << 381 ssifcr |= SSIFCR_RFRST << 382 } else { 326 } else { 383 ssicr |= SSICR_RUIEN | SSICR_R 327 ssicr |= SSICR_RUIEN | SSICR_ROIEN; 384 ssifcr |= SSIFCR_RIE; !! 328 ssifcr |= SSIFCR_RIE | SSIFCR_TFRST; 385 if (!is_full_duplex) << 386 ssifcr |= SSIFCR_TFRST << 387 } 329 } 388 330 389 rz_ssi_reg_writel(ssi, SSICR, ssicr); 331 rz_ssi_reg_writel(ssi, SSICR, ssicr); 390 rz_ssi_reg_writel(ssi, SSIFCR, ssifcr) 332 rz_ssi_reg_writel(ssi, SSIFCR, ssifcr); 391 333 392 /* Clear all error flags */ 334 /* Clear all error flags */ 393 rz_ssi_reg_mask_setl(ssi, SSISR, 335 rz_ssi_reg_mask_setl(ssi, SSISR, 394 (SSISR_TOIRQ | SS 336 (SSISR_TOIRQ | SSISR_TUIRQ | SSISR_ROIRQ | 395 SSISR_RUIRQ), 0) 337 SSISR_RUIRQ), 0); 396 338 397 strm->running = 1; 339 strm->running = 1; 398 if (is_full_duplex) !! 340 ssicr |= is_play ? SSICR_TEN : SSICR_REN; 399 ssicr |= SSICR_TEN | SSICR_REN << 400 else << 401 ssicr |= is_play ? SSICR_TEN : << 402 << 403 rz_ssi_reg_writel(ssi, SSICR, ssicr); 341 rz_ssi_reg_writel(ssi, SSICR, ssicr); 404 342 405 return 0; 343 return 0; 406 } 344 } 407 345 408 static int rz_ssi_stop(struct rz_ssi_priv *ssi 346 static int rz_ssi_stop(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) 409 { 347 { 410 strm->running = 0; !! 348 int timeout; 411 349 412 if (rz_ssi_is_stream_running(&ssi->pla !! 350 strm->running = 0; 413 rz_ssi_is_stream_running(&ssi->cap << 414 return 0; << 415 351 416 /* Disable TX/RX */ 352 /* Disable TX/RX */ 417 rz_ssi_reg_mask_setl(ssi, SSICR, SSICR 353 rz_ssi_reg_mask_setl(ssi, SSICR, SSICR_TEN | SSICR_REN, 0); 418 354 419 /* Cancel all remaining DMA transactio 355 /* Cancel all remaining DMA transactions */ 420 if (rz_ssi_is_dma_enabled(ssi)) 356 if (rz_ssi_is_dma_enabled(ssi)) 421 dmaengine_terminate_async(strm 357 dmaengine_terminate_async(strm->dma_ch); 422 358 423 rz_ssi_set_idle(ssi); !! 359 /* Disable irqs */ >> 360 rz_ssi_reg_mask_setl(ssi, SSICR, SSICR_TUIEN | SSICR_TOIEN | >> 361 SSICR_RUIEN | SSICR_ROIEN, 0); >> 362 rz_ssi_reg_mask_setl(ssi, SSIFCR, SSIFCR_TIE | SSIFCR_RIE, 0); >> 363 >> 364 /* Clear all error flags */ >> 365 rz_ssi_reg_mask_setl(ssi, SSISR, >> 366 (SSISR_TOIRQ | SSISR_TUIRQ | SSISR_ROIRQ | >> 367 SSISR_RUIRQ), 0); >> 368 >> 369 /* Wait for idle */ >> 370 timeout = 100; >> 371 while (--timeout) { >> 372 if (rz_ssi_reg_readl(ssi, SSISR) & SSISR_IIRQ) >> 373 break; >> 374 udelay(1); >> 375 } >> 376 >> 377 if (!timeout) >> 378 dev_info(ssi->dev, "timeout waiting for SSI idle\n"); >> 379 >> 380 /* Hold FIFOs in reset */ >> 381 rz_ssi_reg_mask_setl(ssi, SSIFCR, 0, >> 382 SSIFCR_TFRST | SSIFCR_RFRST); 424 383 425 return 0; 384 return 0; 426 } 385 } 427 386 428 static void rz_ssi_pointer_update(struct rz_ss 387 static void rz_ssi_pointer_update(struct rz_ssi_stream *strm, int frames) 429 { 388 { 430 struct snd_pcm_substream *substream = 389 struct snd_pcm_substream *substream = strm->substream; 431 struct snd_pcm_runtime *runtime; 390 struct snd_pcm_runtime *runtime; 432 int current_period; 391 int current_period; 433 392 434 if (!strm->running || !substream || !s 393 if (!strm->running || !substream || !substream->runtime) 435 return; 394 return; 436 395 437 runtime = substream->runtime; 396 runtime = substream->runtime; 438 strm->buffer_pos += frames; 397 strm->buffer_pos += frames; 439 WARN_ON(strm->buffer_pos > runtime->bu 398 WARN_ON(strm->buffer_pos > runtime->buffer_size); 440 399 441 /* ring buffer */ 400 /* ring buffer */ 442 if (strm->buffer_pos == runtime->buffe 401 if (strm->buffer_pos == runtime->buffer_size) 443 strm->buffer_pos = 0; 402 strm->buffer_pos = 0; 444 403 445 current_period = strm->buffer_pos / ru 404 current_period = strm->buffer_pos / runtime->period_size; 446 if (strm->period_counter != current_pe 405 if (strm->period_counter != current_period) { 447 snd_pcm_period_elapsed(strm->s 406 snd_pcm_period_elapsed(strm->substream); 448 strm->period_counter = current 407 strm->period_counter = current_period; 449 } 408 } 450 } 409 } 451 410 452 static int rz_ssi_pio_recv(struct rz_ssi_priv 411 static int rz_ssi_pio_recv(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) 453 { 412 { 454 struct snd_pcm_substream *substream = 413 struct snd_pcm_substream *substream = strm->substream; 455 struct snd_pcm_runtime *runtime; 414 struct snd_pcm_runtime *runtime; 456 u16 *buf; 415 u16 *buf; 457 int fifo_samples; 416 int fifo_samples; 458 int frames_left; 417 int frames_left; 459 int samples; 418 int samples; 460 int i; 419 int i; 461 420 462 if (!rz_ssi_stream_is_valid(ssi, strm) 421 if (!rz_ssi_stream_is_valid(ssi, strm)) 463 return -EINVAL; 422 return -EINVAL; 464 423 465 runtime = substream->runtime; 424 runtime = substream->runtime; 466 425 467 do { 426 do { 468 /* frames left in this period 427 /* frames left in this period */ 469 frames_left = runtime->period_ 428 frames_left = runtime->period_size - 470 (strm->buffer_po 429 (strm->buffer_pos % runtime->period_size); 471 if (!frames_left) 430 if (!frames_left) 472 frames_left = runtime- 431 frames_left = runtime->period_size; 473 432 474 /* Samples in RX FIFO */ 433 /* Samples in RX FIFO */ 475 fifo_samples = (rz_ssi_reg_rea 434 fifo_samples = (rz_ssi_reg_readl(ssi, SSIFSR) >> 476 SSIFSR_RDC_SHI 435 SSIFSR_RDC_SHIFT) & SSIFSR_RDC_MASK; 477 436 478 /* Only read full frames at a 437 /* Only read full frames at a time */ 479 samples = 0; 438 samples = 0; 480 while (frames_left && (fifo_sa 439 while (frames_left && (fifo_samples >= runtime->channels)) { 481 samples += runtime->ch 440 samples += runtime->channels; 482 fifo_samples -= runtim 441 fifo_samples -= runtime->channels; 483 frames_left--; 442 frames_left--; 484 } 443 } 485 444 486 /* not enough samples yet */ 445 /* not enough samples yet */ 487 if (!samples) 446 if (!samples) 488 break; 447 break; 489 448 490 /* calculate new buffer index 449 /* calculate new buffer index */ 491 buf = (u16 *)runtime->dma_area 450 buf = (u16 *)runtime->dma_area; 492 buf += strm->buffer_pos * runt 451 buf += strm->buffer_pos * runtime->channels; 493 452 494 /* Note, only supports 16-bit 453 /* Note, only supports 16-bit samples */ 495 for (i = 0; i < samples; i++) 454 for (i = 0; i < samples; i++) 496 *buf++ = (u16)(rz_ssi_ 455 *buf++ = (u16)(rz_ssi_reg_readl(ssi, SSIFRDR) >> 16); 497 456 498 rz_ssi_reg_mask_setl(ssi, SSIF 457 rz_ssi_reg_mask_setl(ssi, SSIFSR, SSIFSR_RDF, 0); 499 rz_ssi_pointer_update(strm, sa 458 rz_ssi_pointer_update(strm, samples / runtime->channels); 500 } while (!frames_left && fifo_samples 459 } while (!frames_left && fifo_samples >= runtime->channels); 501 460 502 return 0; 461 return 0; 503 } 462 } 504 463 505 static int rz_ssi_pio_send(struct rz_ssi_priv 464 static int rz_ssi_pio_send(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) 506 { 465 { 507 struct snd_pcm_substream *substream = 466 struct snd_pcm_substream *substream = strm->substream; 508 struct snd_pcm_runtime *runtime = subs 467 struct snd_pcm_runtime *runtime = substream->runtime; 509 int sample_space; 468 int sample_space; 510 int samples = 0; 469 int samples = 0; 511 int frames_left; 470 int frames_left; 512 int i; 471 int i; 513 u32 ssifsr; 472 u32 ssifsr; 514 u16 *buf; 473 u16 *buf; 515 474 516 if (!rz_ssi_stream_is_valid(ssi, strm) 475 if (!rz_ssi_stream_is_valid(ssi, strm)) 517 return -EINVAL; 476 return -EINVAL; 518 477 519 /* frames left in this period */ 478 /* frames left in this period */ 520 frames_left = runtime->period_size - ( 479 frames_left = runtime->period_size - (strm->buffer_pos % 521 480 runtime->period_size); 522 if (frames_left == 0) 481 if (frames_left == 0) 523 frames_left = runtime->period_ 482 frames_left = runtime->period_size; 524 483 525 sample_space = strm->fifo_sample_size; 484 sample_space = strm->fifo_sample_size; 526 ssifsr = rz_ssi_reg_readl(ssi, SSIFSR) 485 ssifsr = rz_ssi_reg_readl(ssi, SSIFSR); 527 sample_space -= (ssifsr >> SSIFSR_TDC_ 486 sample_space -= (ssifsr >> SSIFSR_TDC_SHIFT) & SSIFSR_TDC_MASK; 528 487 529 /* Only add full frames at a time */ 488 /* Only add full frames at a time */ 530 while (frames_left && (sample_space >= 489 while (frames_left && (sample_space >= runtime->channels)) { 531 samples += runtime->channels; 490 samples += runtime->channels; 532 sample_space -= runtime->chann 491 sample_space -= runtime->channels; 533 frames_left--; 492 frames_left--; 534 } 493 } 535 494 536 /* no space to send anything right now 495 /* no space to send anything right now */ 537 if (samples == 0) 496 if (samples == 0) 538 return 0; 497 return 0; 539 498 540 /* calculate new buffer index */ 499 /* calculate new buffer index */ 541 buf = (u16 *)(runtime->dma_area); 500 buf = (u16 *)(runtime->dma_area); 542 buf += strm->buffer_pos * runtime->cha 501 buf += strm->buffer_pos * runtime->channels; 543 502 544 /* Note, only supports 16-bit samples 503 /* Note, only supports 16-bit samples */ 545 for (i = 0; i < samples; i++) 504 for (i = 0; i < samples; i++) 546 rz_ssi_reg_writel(ssi, SSIFTDR 505 rz_ssi_reg_writel(ssi, SSIFTDR, ((u32)(*buf++) << 16)); 547 506 548 rz_ssi_reg_mask_setl(ssi, SSIFSR, SSIF 507 rz_ssi_reg_mask_setl(ssi, SSIFSR, SSIFSR_TDE, 0); 549 rz_ssi_pointer_update(strm, samples / 508 rz_ssi_pointer_update(strm, samples / runtime->channels); 550 509 551 return 0; 510 return 0; 552 } 511 } 553 512 554 static irqreturn_t rz_ssi_interrupt(int irq, v 513 static irqreturn_t rz_ssi_interrupt(int irq, void *data) 555 { 514 { 556 struct rz_ssi_stream *strm_playback = !! 515 struct rz_ssi_stream *strm = NULL; 557 struct rz_ssi_stream *strm_capture = N << 558 struct rz_ssi_priv *ssi = data; 516 struct rz_ssi_priv *ssi = data; 559 u32 ssisr = rz_ssi_reg_readl(ssi, SSIS 517 u32 ssisr = rz_ssi_reg_readl(ssi, SSISR); 560 518 561 if (ssi->playback.substream) 519 if (ssi->playback.substream) 562 strm_playback = &ssi->playback !! 520 strm = &ssi->playback; 563 if (ssi->capture.substream) !! 521 else if (ssi->capture.substream) 564 strm_capture = &ssi->capture; !! 522 strm = &ssi->capture; 565 !! 523 else 566 if (!strm_playback && !strm_capture) << 567 return IRQ_HANDLED; /* Left ov 524 return IRQ_HANDLED; /* Left over TX/RX interrupt */ 568 525 569 if (irq == ssi->irq_int) { /* error or 526 if (irq == ssi->irq_int) { /* error or idle */ 570 bool is_stopped = false; !! 527 if (ssisr & SSISR_TUIRQ) 571 int i, count; !! 528 strm->uerr_num++; 572 !! 529 if (ssisr & SSISR_TOIRQ) 573 if (rz_ssi_is_dma_enabled(ssi) !! 530 strm->oerr_num++; 574 count = 4; !! 531 if (ssisr & SSISR_RUIRQ) 575 else !! 532 strm->uerr_num++; 576 count = 1; !! 533 if (ssisr & SSISR_ROIRQ) 577 !! 534 strm->oerr_num++; 578 if (ssisr & (SSISR_RUIRQ | SSI !! 535 579 is_stopped = true; !! 536 if (ssisr & (SSISR_TUIRQ | SSISR_TOIRQ | SSISR_RUIRQ | 580 !! 537 SSISR_ROIRQ)) { 581 if (ssi->capture.substream && !! 538 /* Error handling */ 582 if (ssisr & SSISR_RUIR !! 539 /* You must reset (stop/restart) after each interrupt */ 583 strm_capture-> !! 540 rz_ssi_stop(ssi, strm); 584 if (ssisr & SSISR_ROIR !! 541 585 strm_capture-> !! 542 /* Clear all flags */ 586 !! 543 rz_ssi_reg_mask_setl(ssi, SSISR, SSISR_TOIRQ | 587 rz_ssi_stop(ssi, strm_ !! 544 SSISR_TUIRQ | SSISR_ROIRQ | 588 } !! 545 SSISR_RUIRQ, 0); 589 << 590 if (ssi->playback.substream && << 591 if (ssisr & SSISR_TUIR << 592 strm_playback- << 593 if (ssisr & SSISR_TOIR << 594 strm_playback- << 595 << 596 rz_ssi_stop(ssi, strm_ << 597 } << 598 546 599 /* Clear all flags */ !! 547 /* Add/remove more data */ 600 rz_ssi_reg_mask_setl(ssi, SSIS !! 548 strm->transfer(ssi, strm); 601 SSISR_ROI << 602 << 603 /* Add/remove more data */ << 604 if (ssi->capture.substream && << 605 for (i = 0; i < count; << 606 strm_capture-> << 607 } << 608 549 609 if (ssi->playback.substream && !! 550 /* Resume */ 610 for (i = 0; i < count; !! 551 rz_ssi_start(ssi, strm); 611 strm_playback- << 612 } 552 } 613 << 614 /* Resume */ << 615 if (ssi->playback.substream && << 616 rz_ssi_start(ssi, &ssi << 617 if (ssi->capture.substream && << 618 rz_ssi_start(ssi, &ssi << 619 } 553 } 620 554 621 if (!rz_ssi_is_stream_running(&ssi->pl !! 555 if (!strm->running) 622 !rz_ssi_is_stream_running(&ssi->ca << 623 return IRQ_HANDLED; 556 return IRQ_HANDLED; 624 557 625 /* tx data empty */ 558 /* tx data empty */ 626 if (irq == ssi->irq_tx && rz_ssi_is_st !! 559 if (irq == ssi->irq_tx) 627 strm_playback->transfer(ssi, & !! 560 strm->transfer(ssi, &ssi->playback); 628 561 629 /* rx data full */ 562 /* rx data full */ 630 if (irq == ssi->irq_rx && rz_ssi_is_st !! 563 if (irq == ssi->irq_rx) { 631 strm_capture->transfer(ssi, &s !! 564 strm->transfer(ssi, &ssi->capture); 632 rz_ssi_reg_mask_setl(ssi, SSIF 565 rz_ssi_reg_mask_setl(ssi, SSIFSR, SSIFSR_RDF, 0); 633 } 566 } 634 567 635 if (irq == ssi->irq_rt) { << 636 if (ssi->playback.substream) { << 637 strm_playback->transfe << 638 } else { << 639 strm_capture->transfer << 640 rz_ssi_reg_mask_setl(s << 641 } << 642 } << 643 << 644 return IRQ_HANDLED; 568 return IRQ_HANDLED; 645 } 569 } 646 570 647 static int rz_ssi_dma_slave_config(struct rz_s 571 static int rz_ssi_dma_slave_config(struct rz_ssi_priv *ssi, 648 struct dma_ 572 struct dma_chan *dma_ch, bool is_play) 649 { 573 { 650 struct dma_slave_config cfg; 574 struct dma_slave_config cfg; 651 575 652 memset(&cfg, 0, sizeof(cfg)); 576 memset(&cfg, 0, sizeof(cfg)); 653 577 654 cfg.direction = is_play ? DMA_MEM_TO_D 578 cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 655 cfg.dst_addr = ssi->phys + SSIFTDR; 579 cfg.dst_addr = ssi->phys + SSIFTDR; 656 cfg.src_addr = ssi->phys + SSIFRDR; 580 cfg.src_addr = ssi->phys + SSIFRDR; 657 cfg.src_addr_width = DMA_SLAVE_BUSWIDT 581 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 658 cfg.dst_addr_width = DMA_SLAVE_BUSWIDT 582 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 659 583 660 return dmaengine_slave_config(dma_ch, 584 return dmaengine_slave_config(dma_ch, &cfg); 661 } 585 } 662 586 663 static int rz_ssi_dma_transfer(struct rz_ssi_p 587 static int rz_ssi_dma_transfer(struct rz_ssi_priv *ssi, 664 struct rz_ssi_s 588 struct rz_ssi_stream *strm) 665 { 589 { 666 struct snd_pcm_substream *substream = 590 struct snd_pcm_substream *substream = strm->substream; 667 struct dma_async_tx_descriptor *desc; 591 struct dma_async_tx_descriptor *desc; 668 struct snd_pcm_runtime *runtime; 592 struct snd_pcm_runtime *runtime; 669 enum dma_transfer_direction dir; 593 enum dma_transfer_direction dir; 670 u32 dma_paddr, dma_size; 594 u32 dma_paddr, dma_size; 671 int amount; 595 int amount; 672 596 673 if (!rz_ssi_stream_is_valid(ssi, strm) 597 if (!rz_ssi_stream_is_valid(ssi, strm)) 674 return -EINVAL; 598 return -EINVAL; 675 599 676 runtime = substream->runtime; 600 runtime = substream->runtime; 677 if (runtime->state == SNDRV_PCM_STATE_ 601 if (runtime->state == SNDRV_PCM_STATE_DRAINING) 678 /* 602 /* 679 * Stream is ending, so do not 603 * Stream is ending, so do not queue up any more DMA 680 * transfers otherwise we play 604 * transfers otherwise we play partial sound clips 681 * because we can't shut off t 605 * because we can't shut off the DMA quick enough. 682 */ 606 */ 683 return 0; 607 return 0; 684 608 685 dir = rz_ssi_stream_is_play(ssi, subst 609 dir = rz_ssi_stream_is_play(ssi, substream) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 686 610 687 /* Always transfer 1 period */ 611 /* Always transfer 1 period */ 688 amount = runtime->period_size; 612 amount = runtime->period_size; 689 613 690 /* DMA physical address and size */ 614 /* DMA physical address and size */ 691 dma_paddr = runtime->dma_addr + frames 615 dma_paddr = runtime->dma_addr + frames_to_bytes(runtime, 692 616 strm->dma_buffer_pos); 693 dma_size = frames_to_bytes(runtime, am 617 dma_size = frames_to_bytes(runtime, amount); 694 desc = dmaengine_prep_slave_single(str 618 desc = dmaengine_prep_slave_single(strm->dma_ch, dma_paddr, dma_size, 695 dir 619 dir, 696 DMA 620 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 697 if (!desc) { 621 if (!desc) { 698 dev_err(ssi->dev, "dmaengine_p 622 dev_err(ssi->dev, "dmaengine_prep_slave_single() fail\n"); 699 return -ENOMEM; 623 return -ENOMEM; 700 } 624 } 701 625 702 desc->callback = rz_ssi_dma_complete; 626 desc->callback = rz_ssi_dma_complete; 703 desc->callback_param = strm; 627 desc->callback_param = strm; 704 628 705 if (dmaengine_submit(desc) < 0) { 629 if (dmaengine_submit(desc) < 0) { 706 dev_err(ssi->dev, "dmaengine_s 630 dev_err(ssi->dev, "dmaengine_submit() fail\n"); 707 return -EIO; 631 return -EIO; 708 } 632 } 709 633 710 /* Update DMA pointer */ 634 /* Update DMA pointer */ 711 strm->dma_buffer_pos += amount; 635 strm->dma_buffer_pos += amount; 712 if (strm->dma_buffer_pos >= runtime->b 636 if (strm->dma_buffer_pos >= runtime->buffer_size) 713 strm->dma_buffer_pos = 0; 637 strm->dma_buffer_pos = 0; 714 638 715 /* Start DMA */ 639 /* Start DMA */ 716 dma_async_issue_pending(strm->dma_ch); 640 dma_async_issue_pending(strm->dma_ch); 717 641 718 return 0; 642 return 0; 719 } 643 } 720 644 721 static void rz_ssi_dma_complete(void *data) 645 static void rz_ssi_dma_complete(void *data) 722 { 646 { 723 struct rz_ssi_stream *strm = (struct r 647 struct rz_ssi_stream *strm = (struct rz_ssi_stream *)data; 724 648 725 if (!strm->running || !strm->substream 649 if (!strm->running || !strm->substream || !strm->substream->runtime) 726 return; 650 return; 727 651 728 /* Note that next DMA transaction has 652 /* Note that next DMA transaction has probably already started */ 729 rz_ssi_pointer_update(strm, strm->subs 653 rz_ssi_pointer_update(strm, strm->substream->runtime->period_size); 730 654 731 /* Queue up another DMA transaction */ 655 /* Queue up another DMA transaction */ 732 rz_ssi_dma_transfer(strm->priv, strm); 656 rz_ssi_dma_transfer(strm->priv, strm); 733 } 657 } 734 658 735 static void rz_ssi_release_dma_channels(struct 659 static void rz_ssi_release_dma_channels(struct rz_ssi_priv *ssi) 736 { 660 { 737 if (ssi->playback.dma_ch) { 661 if (ssi->playback.dma_ch) { 738 dma_release_channel(ssi->playb 662 dma_release_channel(ssi->playback.dma_ch); 739 ssi->playback.dma_ch = NULL; 663 ssi->playback.dma_ch = NULL; 740 if (ssi->dma_rt) 664 if (ssi->dma_rt) 741 ssi->dma_rt = false; 665 ssi->dma_rt = false; 742 } 666 } 743 667 744 if (ssi->capture.dma_ch) { 668 if (ssi->capture.dma_ch) { 745 dma_release_channel(ssi->captu 669 dma_release_channel(ssi->capture.dma_ch); 746 ssi->capture.dma_ch = NULL; 670 ssi->capture.dma_ch = NULL; 747 } 671 } 748 } 672 } 749 673 750 static int rz_ssi_dma_request(struct rz_ssi_pr 674 static int rz_ssi_dma_request(struct rz_ssi_priv *ssi, struct device *dev) 751 { 675 { 752 ssi->playback.dma_ch = dma_request_cha 676 ssi->playback.dma_ch = dma_request_chan(dev, "tx"); 753 if (IS_ERR(ssi->playback.dma_ch)) 677 if (IS_ERR(ssi->playback.dma_ch)) 754 ssi->playback.dma_ch = NULL; 678 ssi->playback.dma_ch = NULL; 755 679 756 ssi->capture.dma_ch = dma_request_chan 680 ssi->capture.dma_ch = dma_request_chan(dev, "rx"); 757 if (IS_ERR(ssi->capture.dma_ch)) 681 if (IS_ERR(ssi->capture.dma_ch)) 758 ssi->capture.dma_ch = NULL; 682 ssi->capture.dma_ch = NULL; 759 683 760 if (!ssi->playback.dma_ch && !ssi->cap 684 if (!ssi->playback.dma_ch && !ssi->capture.dma_ch) { 761 ssi->playback.dma_ch = dma_req 685 ssi->playback.dma_ch = dma_request_chan(dev, "rt"); 762 if (IS_ERR(ssi->playback.dma_c 686 if (IS_ERR(ssi->playback.dma_ch)) { 763 ssi->playback.dma_ch = 687 ssi->playback.dma_ch = NULL; 764 goto no_dma; 688 goto no_dma; 765 } 689 } 766 690 767 ssi->dma_rt = true; 691 ssi->dma_rt = true; 768 } 692 } 769 693 770 if (!rz_ssi_is_dma_enabled(ssi)) 694 if (!rz_ssi_is_dma_enabled(ssi)) 771 goto no_dma; 695 goto no_dma; 772 696 773 if (ssi->playback.dma_ch && 697 if (ssi->playback.dma_ch && 774 (rz_ssi_dma_slave_config(ssi, ssi- 698 (rz_ssi_dma_slave_config(ssi, ssi->playback.dma_ch, true) < 0)) 775 goto no_dma; 699 goto no_dma; 776 700 777 if (ssi->capture.dma_ch && 701 if (ssi->capture.dma_ch && 778 (rz_ssi_dma_slave_config(ssi, ssi- 702 (rz_ssi_dma_slave_config(ssi, ssi->capture.dma_ch, false) < 0)) 779 goto no_dma; 703 goto no_dma; 780 704 781 return 0; 705 return 0; 782 706 783 no_dma: 707 no_dma: 784 rz_ssi_release_dma_channels(ssi); 708 rz_ssi_release_dma_channels(ssi); 785 709 786 return -ENODEV; 710 return -ENODEV; 787 } 711 } 788 712 789 static int rz_ssi_dai_trigger(struct snd_pcm_s 713 static int rz_ssi_dai_trigger(struct snd_pcm_substream *substream, int cmd, 790 struct snd_soc_d 714 struct snd_soc_dai *dai) 791 { 715 { 792 struct rz_ssi_priv *ssi = snd_soc_dai_ 716 struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai); 793 struct rz_ssi_stream *strm = rz_ssi_st 717 struct rz_ssi_stream *strm = rz_ssi_stream_get(ssi, substream); 794 int ret = 0, i, num_transfer = 1; 718 int ret = 0, i, num_transfer = 1; 795 719 796 switch (cmd) { 720 switch (cmd) { 797 case SNDRV_PCM_TRIGGER_START: 721 case SNDRV_PCM_TRIGGER_START: 798 /* Soft Reset */ 722 /* Soft Reset */ 799 if (!rz_ssi_is_stream_running( !! 723 rz_ssi_reg_mask_setl(ssi, SSIFCR, 0, SSIFCR_SSIRST); 800 !rz_ssi_is_stream_running( !! 724 rz_ssi_reg_mask_setl(ssi, SSIFCR, SSIFCR_SSIRST, 0); 801 rz_ssi_reg_mask_setl(s !! 725 udelay(5); 802 rz_ssi_reg_mask_setl(s << 803 udelay(5); << 804 } << 805 726 806 rz_ssi_stream_init(strm, subst 727 rz_ssi_stream_init(strm, substream); 807 728 808 if (ssi->dma_rt) { 729 if (ssi->dma_rt) { 809 bool is_playback; 730 bool is_playback; 810 731 811 is_playback = rz_ssi_s 732 is_playback = rz_ssi_stream_is_play(ssi, substream); 812 ret = rz_ssi_dma_slave 733 ret = rz_ssi_dma_slave_config(ssi, ssi->playback.dma_ch, 813 734 is_playback); 814 /* Fallback to pio */ 735 /* Fallback to pio */ 815 if (ret < 0) { 736 if (ret < 0) { 816 ssi->playback. 737 ssi->playback.transfer = rz_ssi_pio_send; 817 ssi->capture.t 738 ssi->capture.transfer = rz_ssi_pio_recv; 818 rz_ssi_release 739 rz_ssi_release_dma_channels(ssi); 819 } 740 } 820 } 741 } 821 742 822 /* For DMA, queue up multiple 743 /* For DMA, queue up multiple DMA descriptors */ 823 if (rz_ssi_is_dma_enabled(ssi) 744 if (rz_ssi_is_dma_enabled(ssi)) 824 num_transfer = 4; 745 num_transfer = 4; 825 746 826 for (i = 0; i < num_transfer; 747 for (i = 0; i < num_transfer; i++) { 827 ret = strm->transfer(s 748 ret = strm->transfer(ssi, strm); 828 if (ret) 749 if (ret) 829 goto done; 750 goto done; 830 } 751 } 831 752 832 ret = rz_ssi_start(ssi, strm); 753 ret = rz_ssi_start(ssi, strm); 833 break; 754 break; 834 case SNDRV_PCM_TRIGGER_STOP: 755 case SNDRV_PCM_TRIGGER_STOP: 835 rz_ssi_stop(ssi, strm); 756 rz_ssi_stop(ssi, strm); 836 rz_ssi_stream_quit(ssi, strm); 757 rz_ssi_stream_quit(ssi, strm); 837 break; 758 break; 838 } 759 } 839 760 840 done: 761 done: 841 return ret; 762 return ret; 842 } 763 } 843 764 844 static int rz_ssi_dai_set_fmt(struct snd_soc_d 765 static int rz_ssi_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) 845 { 766 { 846 struct rz_ssi_priv *ssi = snd_soc_dai_ 767 struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai); 847 768 848 switch (fmt & SND_SOC_DAIFMT_CLOCK_PRO 769 switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) { 849 case SND_SOC_DAIFMT_BP_FP: 770 case SND_SOC_DAIFMT_BP_FP: 850 break; 771 break; 851 default: 772 default: 852 dev_err(ssi->dev, "Codec shoul 773 dev_err(ssi->dev, "Codec should be clk and frame consumer\n"); 853 return -EINVAL; 774 return -EINVAL; 854 } 775 } 855 776 856 /* 777 /* 857 * set clock polarity 778 * set clock polarity 858 * 779 * 859 * "normal" BCLK = Signal is available 780 * "normal" BCLK = Signal is available at rising edge of BCLK 860 * "normal" FSYNC = (I2S) Left ch star 781 * "normal" FSYNC = (I2S) Left ch starts with falling FSYNC edge 861 */ 782 */ 862 switch (fmt & SND_SOC_DAIFMT_INV_MASK) 783 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 863 case SND_SOC_DAIFMT_NB_NF: 784 case SND_SOC_DAIFMT_NB_NF: 864 ssi->bckp_rise = false; 785 ssi->bckp_rise = false; 865 ssi->lrckp_fsync_fall = false; 786 ssi->lrckp_fsync_fall = false; 866 break; 787 break; 867 case SND_SOC_DAIFMT_NB_IF: 788 case SND_SOC_DAIFMT_NB_IF: 868 ssi->bckp_rise = false; 789 ssi->bckp_rise = false; 869 ssi->lrckp_fsync_fall = true; 790 ssi->lrckp_fsync_fall = true; 870 break; 791 break; 871 case SND_SOC_DAIFMT_IB_NF: 792 case SND_SOC_DAIFMT_IB_NF: 872 ssi->bckp_rise = true; 793 ssi->bckp_rise = true; 873 ssi->lrckp_fsync_fall = false; 794 ssi->lrckp_fsync_fall = false; 874 break; 795 break; 875 case SND_SOC_DAIFMT_IB_IF: 796 case SND_SOC_DAIFMT_IB_IF: 876 ssi->bckp_rise = true; 797 ssi->bckp_rise = true; 877 ssi->lrckp_fsync_fall = true; 798 ssi->lrckp_fsync_fall = true; 878 break; 799 break; 879 default: 800 default: 880 return -EINVAL; 801 return -EINVAL; 881 } 802 } 882 803 883 /* only i2s support */ 804 /* only i2s support */ 884 switch (fmt & SND_SOC_DAIFMT_FORMAT_MA 805 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 885 case SND_SOC_DAIFMT_I2S: 806 case SND_SOC_DAIFMT_I2S: 886 break; 807 break; 887 default: 808 default: 888 dev_err(ssi->dev, "Only I2S mo 809 dev_err(ssi->dev, "Only I2S mode is supported.\n"); 889 return -EINVAL; 810 return -EINVAL; 890 } 811 } 891 812 892 return 0; 813 return 0; 893 } 814 } 894 815 895 static bool rz_ssi_is_valid_hw_params(struct r << 896 unsigned << 897 unsigned << 898 unsigned << 899 { << 900 if (ssi->hw_params_cache.rate != rate << 901 ssi->hw_params_cache.channels != c << 902 ssi->hw_params_cache.sample_width << 903 ssi->hw_params_cache.sample_bits ! << 904 return false; << 905 << 906 return true; << 907 } << 908 << 909 static void rz_ssi_cache_hw_params(struct rz_s << 910 unsigned in << 911 unsigned in << 912 unsigned in << 913 { << 914 ssi->hw_params_cache.rate = rate; << 915 ssi->hw_params_cache.channels = channe << 916 ssi->hw_params_cache.sample_width = sa << 917 ssi->hw_params_cache.sample_bits = sam << 918 } << 919 << 920 static int rz_ssi_dai_hw_params(struct snd_pcm 816 static int rz_ssi_dai_hw_params(struct snd_pcm_substream *substream, 921 struct snd_pcm 817 struct snd_pcm_hw_params *params, 922 struct snd_soc 818 struct snd_soc_dai *dai) 923 { 819 { 924 struct rz_ssi_priv *ssi = snd_soc_dai_ 820 struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai); 925 struct rz_ssi_stream *strm = rz_ssi_st << 926 unsigned int sample_bits = hw_param_in 821 unsigned int sample_bits = hw_param_interval(params, 927 SNDRV_ 822 SNDRV_PCM_HW_PARAM_SAMPLE_BITS)->min; 928 unsigned int channels = params_channel 823 unsigned int channels = params_channels(params); 929 unsigned int rate = params_rate(params << 930 824 931 if (sample_bits != 16) { 825 if (sample_bits != 16) { 932 dev_err(ssi->dev, "Unsupported 826 dev_err(ssi->dev, "Unsupported sample width: %d\n", 933 sample_bits); 827 sample_bits); 934 return -EINVAL; 828 return -EINVAL; 935 } 829 } 936 830 937 if (channels != 2) { 831 if (channels != 2) { 938 dev_err(ssi->dev, "Number of c 832 dev_err(ssi->dev, "Number of channels not matched: %d\n", 939 channels); 833 channels); 940 return -EINVAL; 834 return -EINVAL; 941 } 835 } 942 836 943 if (rz_ssi_is_stream_running(&ssi->pla !! 837 return rz_ssi_clk_setup(ssi, params_rate(params), 944 rz_ssi_is_stream_running(&ssi->cap !! 838 params_channels(params)); 945 if (rz_ssi_is_valid_hw_params( << 946 << 947 return 0; << 948 << 949 dev_err(ssi->dev, "Full duplex << 950 return -EINVAL; << 951 } << 952 << 953 rz_ssi_cache_hw_params(ssi, rate, chan << 954 sample_bits); << 955 << 956 return rz_ssi_clk_setup(ssi, rate, cha << 957 } 839 } 958 840 959 static const struct snd_soc_dai_ops rz_ssi_dai 841 static const struct snd_soc_dai_ops rz_ssi_dai_ops = { 960 .trigger = rz_ssi_dai_trigger, 842 .trigger = rz_ssi_dai_trigger, 961 .set_fmt = rz_ssi_dai_set_fmt, 843 .set_fmt = rz_ssi_dai_set_fmt, 962 .hw_params = rz_ssi_dai_hw_params 844 .hw_params = rz_ssi_dai_hw_params, 963 }; 845 }; 964 846 965 static const struct snd_pcm_hardware rz_ssi_pc 847 static const struct snd_pcm_hardware rz_ssi_pcm_hardware = { 966 .info = SNDRV_PCM_IN 848 .info = SNDRV_PCM_INFO_INTERLEAVED | 967 SNDRV_PCM_IN 849 SNDRV_PCM_INFO_MMAP | 968 SNDRV_PCM_IN 850 SNDRV_PCM_INFO_MMAP_VALID, 969 .buffer_bytes_max = PREALLOC_BUF 851 .buffer_bytes_max = PREALLOC_BUFFER, 970 .period_bytes_min = 32, 852 .period_bytes_min = 32, 971 .period_bytes_max = 8192, 853 .period_bytes_max = 8192, 972 .channels_min = SSI_CHAN_MIN 854 .channels_min = SSI_CHAN_MIN, 973 .channels_max = SSI_CHAN_MAX 855 .channels_max = SSI_CHAN_MAX, 974 .periods_min = 1, 856 .periods_min = 1, 975 .periods_max = 32, 857 .periods_max = 32, 976 .fifo_size = 32 * 2, 858 .fifo_size = 32 * 2, 977 }; 859 }; 978 860 979 static int rz_ssi_pcm_open(struct snd_soc_comp 861 static int rz_ssi_pcm_open(struct snd_soc_component *component, 980 struct snd_pcm_subs 862 struct snd_pcm_substream *substream) 981 { 863 { 982 snd_soc_set_runtime_hwparams(substream 864 snd_soc_set_runtime_hwparams(substream, &rz_ssi_pcm_hardware); 983 865 984 return snd_pcm_hw_constraint_integer(s 866 return snd_pcm_hw_constraint_integer(substream->runtime, 985 SN 867 SNDRV_PCM_HW_PARAM_PERIODS); 986 } 868 } 987 869 988 static snd_pcm_uframes_t rz_ssi_pcm_pointer(st 870 static snd_pcm_uframes_t rz_ssi_pcm_pointer(struct snd_soc_component *component, 989 st 871 struct snd_pcm_substream *substream) 990 { 872 { 991 struct snd_soc_dai *dai = rz_ssi_get_d 873 struct snd_soc_dai *dai = rz_ssi_get_dai(substream); 992 struct rz_ssi_priv *ssi = snd_soc_dai_ 874 struct rz_ssi_priv *ssi = snd_soc_dai_get_drvdata(dai); 993 struct rz_ssi_stream *strm = rz_ssi_st 875 struct rz_ssi_stream *strm = rz_ssi_stream_get(ssi, substream); 994 876 995 return strm->buffer_pos; 877 return strm->buffer_pos; 996 } 878 } 997 879 998 static int rz_ssi_pcm_new(struct snd_soc_compo 880 static int rz_ssi_pcm_new(struct snd_soc_component *component, 999 struct snd_soc_pcm_r 881 struct snd_soc_pcm_runtime *rtd) 1000 { 882 { 1001 snd_pcm_set_managed_buffer_all(rtd->p 883 snd_pcm_set_managed_buffer_all(rtd->pcm, SNDRV_DMA_TYPE_DEV, 1002 rtd->c 884 rtd->card->snd_card->dev, 1003 PREALL 885 PREALLOC_BUFFER, PREALLOC_BUFFER_MAX); 1004 return 0; 886 return 0; 1005 } 887 } 1006 888 1007 static struct snd_soc_dai_driver rz_ssi_soc_d 889 static struct snd_soc_dai_driver rz_ssi_soc_dai[] = { 1008 { 890 { 1009 .name = "rz 891 .name = "rz-ssi-dai", 1010 .playback = { 892 .playback = { 1011 .rates = SSI 893 .rates = SSI_RATES, 1012 .formats = SSI 894 .formats = SSI_FMTS, 1013 .channels_min = SSI 895 .channels_min = SSI_CHAN_MIN, 1014 .channels_max = SSI 896 .channels_max = SSI_CHAN_MAX, 1015 }, 897 }, 1016 .capture = { 898 .capture = { 1017 .rates = SSI 899 .rates = SSI_RATES, 1018 .formats = SSI 900 .formats = SSI_FMTS, 1019 .channels_min = SSI 901 .channels_min = SSI_CHAN_MIN, 1020 .channels_max = SSI 902 .channels_max = SSI_CHAN_MAX, 1021 }, 903 }, 1022 .ops = &rz_ssi_dai_ops, 904 .ops = &rz_ssi_dai_ops, 1023 }, 905 }, 1024 }; 906 }; 1025 907 1026 static const struct snd_soc_component_driver 908 static const struct snd_soc_component_driver rz_ssi_soc_component = { 1027 .name = "rz-ssi", 909 .name = "rz-ssi", 1028 .open = rz_ssi_pcm_ 910 .open = rz_ssi_pcm_open, 1029 .pointer = rz_ssi_pcm_ 911 .pointer = rz_ssi_pcm_pointer, 1030 .pcm_construct = rz_ssi_pcm_ 912 .pcm_construct = rz_ssi_pcm_new, 1031 .legacy_dai_naming = 1, 913 .legacy_dai_naming = 1, 1032 }; 914 }; 1033 915 1034 static int rz_ssi_probe(struct platform_devic 916 static int rz_ssi_probe(struct platform_device *pdev) 1035 { 917 { 1036 struct rz_ssi_priv *ssi; 918 struct rz_ssi_priv *ssi; 1037 struct clk *audio_clk; 919 struct clk *audio_clk; 1038 struct resource *res; 920 struct resource *res; 1039 int ret; 921 int ret; 1040 922 1041 ssi = devm_kzalloc(&pdev->dev, sizeof 923 ssi = devm_kzalloc(&pdev->dev, sizeof(*ssi), GFP_KERNEL); 1042 if (!ssi) 924 if (!ssi) 1043 return -ENOMEM; 925 return -ENOMEM; 1044 926 1045 ssi->pdev = pdev; 927 ssi->pdev = pdev; 1046 ssi->dev = &pdev->dev; 928 ssi->dev = &pdev->dev; 1047 ssi->base = devm_platform_get_and_ior 929 ssi->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1048 if (IS_ERR(ssi->base)) 930 if (IS_ERR(ssi->base)) 1049 return PTR_ERR(ssi->base); 931 return PTR_ERR(ssi->base); 1050 932 1051 ssi->phys = res->start; 933 ssi->phys = res->start; 1052 ssi->clk = devm_clk_get(&pdev->dev, " 934 ssi->clk = devm_clk_get(&pdev->dev, "ssi"); 1053 if (IS_ERR(ssi->clk)) 935 if (IS_ERR(ssi->clk)) 1054 return PTR_ERR(ssi->clk); 936 return PTR_ERR(ssi->clk); 1055 937 1056 ssi->sfr_clk = devm_clk_get(&pdev->de 938 ssi->sfr_clk = devm_clk_get(&pdev->dev, "ssi_sfr"); 1057 if (IS_ERR(ssi->sfr_clk)) 939 if (IS_ERR(ssi->sfr_clk)) 1058 return PTR_ERR(ssi->sfr_clk); 940 return PTR_ERR(ssi->sfr_clk); 1059 941 1060 audio_clk = devm_clk_get(&pdev->dev, 942 audio_clk = devm_clk_get(&pdev->dev, "audio_clk1"); 1061 if (IS_ERR(audio_clk)) 943 if (IS_ERR(audio_clk)) 1062 return dev_err_probe(&pdev->d 944 return dev_err_probe(&pdev->dev, PTR_ERR(audio_clk), 1063 "no audi 945 "no audio clk1"); 1064 946 1065 ssi->audio_clk_1 = clk_get_rate(audio 947 ssi->audio_clk_1 = clk_get_rate(audio_clk); 1066 audio_clk = devm_clk_get(&pdev->dev, 948 audio_clk = devm_clk_get(&pdev->dev, "audio_clk2"); 1067 if (IS_ERR(audio_clk)) 949 if (IS_ERR(audio_clk)) 1068 return dev_err_probe(&pdev->d 950 return dev_err_probe(&pdev->dev, PTR_ERR(audio_clk), 1069 "no audi 951 "no audio clk2"); 1070 952 1071 ssi->audio_clk_2 = clk_get_rate(audio 953 ssi->audio_clk_2 = clk_get_rate(audio_clk); 1072 if (!(ssi->audio_clk_1 || ssi->audio_ 954 if (!(ssi->audio_clk_1 || ssi->audio_clk_2)) 1073 return dev_err_probe(&pdev->d 955 return dev_err_probe(&pdev->dev, -EINVAL, 1074 "no audi 956 "no audio clk1 or audio clk2"); 1075 957 1076 ssi->audio_mck = ssi->audio_clk_1 ? s 958 ssi->audio_mck = ssi->audio_clk_1 ? ssi->audio_clk_1 : ssi->audio_clk_2; 1077 959 1078 /* Detect DMA support */ 960 /* Detect DMA support */ 1079 ret = rz_ssi_dma_request(ssi, &pdev-> 961 ret = rz_ssi_dma_request(ssi, &pdev->dev); 1080 if (ret < 0) { 962 if (ret < 0) { 1081 dev_warn(&pdev->dev, "DMA not 963 dev_warn(&pdev->dev, "DMA not available, using PIO\n"); 1082 ssi->playback.transfer = rz_s 964 ssi->playback.transfer = rz_ssi_pio_send; 1083 ssi->capture.transfer = rz_ss 965 ssi->capture.transfer = rz_ssi_pio_recv; 1084 } else { 966 } else { 1085 dev_info(&pdev->dev, "DMA ena 967 dev_info(&pdev->dev, "DMA enabled"); 1086 ssi->playback.transfer = rz_s 968 ssi->playback.transfer = rz_ssi_dma_transfer; 1087 ssi->capture.transfer = rz_ss 969 ssi->capture.transfer = rz_ssi_dma_transfer; 1088 } 970 } 1089 971 1090 ssi->playback.priv = ssi; 972 ssi->playback.priv = ssi; 1091 ssi->capture.priv = ssi; 973 ssi->capture.priv = ssi; 1092 974 1093 spin_lock_init(&ssi->lock); 975 spin_lock_init(&ssi->lock); 1094 dev_set_drvdata(&pdev->dev, ssi); 976 dev_set_drvdata(&pdev->dev, ssi); 1095 977 1096 /* Error Interrupt */ 978 /* Error Interrupt */ 1097 ssi->irq_int = platform_get_irq_bynam 979 ssi->irq_int = platform_get_irq_byname(pdev, "int_req"); 1098 if (ssi->irq_int < 0) { 980 if (ssi->irq_int < 0) { 1099 rz_ssi_release_dma_channels(s 981 rz_ssi_release_dma_channels(ssi); 1100 return ssi->irq_int; 982 return ssi->irq_int; 1101 } 983 } 1102 984 1103 ret = devm_request_irq(&pdev->dev, ss 985 ret = devm_request_irq(&pdev->dev, ssi->irq_int, &rz_ssi_interrupt, 1104 0, dev_name(&p 986 0, dev_name(&pdev->dev), ssi); 1105 if (ret < 0) { 987 if (ret < 0) { 1106 rz_ssi_release_dma_channels(s 988 rz_ssi_release_dma_channels(ssi); 1107 return dev_err_probe(&pdev->d 989 return dev_err_probe(&pdev->dev, ret, 1108 "irq req 990 "irq request error (int_req)\n"); 1109 } 991 } 1110 992 1111 if (!rz_ssi_is_dma_enabled(ssi)) { 993 if (!rz_ssi_is_dma_enabled(ssi)) { 1112 /* Tx and Rx interrupts (pio 994 /* Tx and Rx interrupts (pio only) */ 1113 ssi->irq_tx = platform_get_ir 995 ssi->irq_tx = platform_get_irq_byname(pdev, "dma_tx"); >> 996 if (ssi->irq_tx < 0) >> 997 return ssi->irq_tx; >> 998 >> 999 ret = devm_request_irq(&pdev->dev, ssi->irq_tx, >> 1000 &rz_ssi_interrupt, 0, >> 1001 dev_name(&pdev->dev), ssi); >> 1002 if (ret < 0) >> 1003 return dev_err_probe(&pdev->dev, ret, >> 1004 "irq request error (dma_tx)\n"); >> 1005 1114 ssi->irq_rx = platform_get_ir 1006 ssi->irq_rx = platform_get_irq_byname(pdev, "dma_rx"); 1115 if (ssi->irq_tx == -ENXIO && !! 1007 if (ssi->irq_rx < 0) 1116 ssi->irq_rt = platfor !! 1008 return ssi->irq_rx; 1117 if (ssi->irq_rt < 0) !! 1009 1118 return ssi->i !! 1010 ret = devm_request_irq(&pdev->dev, ssi->irq_rx, 1119 !! 1011 &rz_ssi_interrupt, 0, 1120 ret = devm_request_ir !! 1012 dev_name(&pdev->dev), ssi); 1121 !! 1013 if (ret < 0) 1122 !! 1014 return dev_err_probe(&pdev->dev, ret, 1123 if (ret < 0) !! 1015 "irq request error (dma_rx)\n"); 1124 return dev_er << 1125 << 1126 } else { << 1127 if (ssi->irq_tx < 0) << 1128 return ssi->i << 1129 << 1130 if (ssi->irq_rx < 0) << 1131 return ssi->i << 1132 << 1133 ret = devm_request_ir << 1134 << 1135 << 1136 if (ret < 0) << 1137 return dev_er << 1138 << 1139 << 1140 ret = devm_request_ir << 1141 << 1142 << 1143 if (ret < 0) << 1144 return dev_er << 1145 << 1146 } << 1147 } 1016 } 1148 1017 1149 ssi->rstc = devm_reset_control_get_ex 1018 ssi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); 1150 if (IS_ERR(ssi->rstc)) { 1019 if (IS_ERR(ssi->rstc)) { 1151 ret = PTR_ERR(ssi->rstc); 1020 ret = PTR_ERR(ssi->rstc); 1152 goto err_reset; 1021 goto err_reset; 1153 } 1022 } 1154 1023 1155 reset_control_deassert(ssi->rstc); 1024 reset_control_deassert(ssi->rstc); 1156 pm_runtime_enable(&pdev->dev); 1025 pm_runtime_enable(&pdev->dev); 1157 ret = pm_runtime_resume_and_get(&pdev 1026 ret = pm_runtime_resume_and_get(&pdev->dev); 1158 if (ret < 0) { 1027 if (ret < 0) { 1159 dev_err(&pdev->dev, "pm_runti 1028 dev_err(&pdev->dev, "pm_runtime_resume_and_get failed\n"); 1160 goto err_pm; 1029 goto err_pm; 1161 } 1030 } 1162 1031 1163 ret = devm_snd_soc_register_component 1032 ret = devm_snd_soc_register_component(&pdev->dev, &rz_ssi_soc_component, 1164 1033 rz_ssi_soc_dai, 1165 1034 ARRAY_SIZE(rz_ssi_soc_dai)); 1166 if (ret < 0) { 1035 if (ret < 0) { 1167 dev_err(&pdev->dev, "failed t 1036 dev_err(&pdev->dev, "failed to register snd component\n"); 1168 goto err_snd_soc; 1037 goto err_snd_soc; 1169 } 1038 } 1170 1039 1171 return 0; 1040 return 0; 1172 1041 1173 err_snd_soc: 1042 err_snd_soc: 1174 pm_runtime_put(ssi->dev); 1043 pm_runtime_put(ssi->dev); 1175 err_pm: 1044 err_pm: 1176 pm_runtime_disable(ssi->dev); 1045 pm_runtime_disable(ssi->dev); 1177 reset_control_assert(ssi->rstc); 1046 reset_control_assert(ssi->rstc); 1178 err_reset: 1047 err_reset: 1179 rz_ssi_release_dma_channels(ssi); 1048 rz_ssi_release_dma_channels(ssi); 1180 1049 1181 return ret; 1050 return ret; 1182 } 1051 } 1183 1052 1184 static void rz_ssi_remove(struct platform_dev !! 1053 static int rz_ssi_remove(struct platform_device *pdev) 1185 { 1054 { 1186 struct rz_ssi_priv *ssi = dev_get_drv 1055 struct rz_ssi_priv *ssi = dev_get_drvdata(&pdev->dev); 1187 1056 1188 rz_ssi_release_dma_channels(ssi); 1057 rz_ssi_release_dma_channels(ssi); 1189 1058 1190 pm_runtime_put(ssi->dev); 1059 pm_runtime_put(ssi->dev); 1191 pm_runtime_disable(ssi->dev); 1060 pm_runtime_disable(ssi->dev); 1192 reset_control_assert(ssi->rstc); 1061 reset_control_assert(ssi->rstc); >> 1062 >> 1063 return 0; 1193 } 1064 } 1194 1065 1195 static const struct of_device_id rz_ssi_of_ma 1066 static const struct of_device_id rz_ssi_of_match[] = { 1196 { .compatible = "renesas,rz-ssi", }, 1067 { .compatible = "renesas,rz-ssi", }, 1197 {/* Sentinel */}, 1068 {/* Sentinel */}, 1198 }; 1069 }; 1199 MODULE_DEVICE_TABLE(of, rz_ssi_of_match); 1070 MODULE_DEVICE_TABLE(of, rz_ssi_of_match); 1200 1071 1201 static struct platform_driver rz_ssi_driver = 1072 static struct platform_driver rz_ssi_driver = { 1202 .driver = { 1073 .driver = { 1203 .name = "rz-ssi-pcm-audio", 1074 .name = "rz-ssi-pcm-audio", 1204 .of_match_table = rz_ssi_of_m 1075 .of_match_table = rz_ssi_of_match, 1205 }, 1076 }, 1206 .probe = rz_ssi_probe, 1077 .probe = rz_ssi_probe, 1207 .remove = rz_ssi_remove, 1078 .remove = rz_ssi_remove, 1208 }; 1079 }; 1209 1080 1210 module_platform_driver(rz_ssi_driver); 1081 module_platform_driver(rz_ssi_driver); 1211 1082 1212 MODULE_LICENSE("GPL v2"); 1083 MODULE_LICENSE("GPL v2"); 1213 MODULE_DESCRIPTION("Renesas RZ/G2L ASoC Seria 1084 MODULE_DESCRIPTION("Renesas RZ/G2L ASoC Serial Sound Interface Driver"); 1214 MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renes 1085 MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>"); 1215 1086
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.