|
@@ -70,6 +70,7 @@ struct davinci_mcasp {
|
|
void __iomem *base;
|
|
void __iomem *base;
|
|
u32 fifo_base;
|
|
u32 fifo_base;
|
|
struct device *dev;
|
|
struct device *dev;
|
|
|
|
+ struct snd_pcm_substream *substreams[2];
|
|
|
|
|
|
/* McASP specific data */
|
|
/* McASP specific data */
|
|
int tdm_slots;
|
|
int tdm_slots;
|
|
@@ -80,6 +81,7 @@ struct davinci_mcasp {
|
|
u8 bclk_div;
|
|
u8 bclk_div;
|
|
u16 bclk_lrclk_ratio;
|
|
u16 bclk_lrclk_ratio;
|
|
int streams;
|
|
int streams;
|
|
|
|
+ u32 irq_request[2];
|
|
|
|
|
|
int sysclk_freq;
|
|
int sysclk_freq;
|
|
bool bclk_master;
|
|
bool bclk_master;
|
|
@@ -90,6 +92,9 @@ struct davinci_mcasp {
|
|
|
|
|
|
bool dat_port;
|
|
bool dat_port;
|
|
|
|
|
|
|
|
+ /* Used for comstraint setting on the second stream */
|
|
|
|
+ u32 channels;
|
|
|
|
+
|
|
#ifdef CONFIG_PM_SLEEP
|
|
#ifdef CONFIG_PM_SLEEP
|
|
struct davinci_mcasp_context context;
|
|
struct davinci_mcasp_context context;
|
|
#endif
|
|
#endif
|
|
@@ -154,6 +159,13 @@ static bool mcasp_is_synchronous(struct davinci_mcasp *mcasp)
|
|
|
|
|
|
static void mcasp_start_rx(struct davinci_mcasp *mcasp)
|
|
static void mcasp_start_rx(struct davinci_mcasp *mcasp)
|
|
{
|
|
{
|
|
|
|
+ if (mcasp->rxnumevt) { /* enable FIFO */
|
|
|
|
+ u32 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
|
|
|
|
+
|
|
|
|
+ mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
|
|
|
|
+ mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
|
|
|
|
+ }
|
|
|
|
+
|
|
/* Start clocks */
|
|
/* Start clocks */
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST);
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST);
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXCLKRST);
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXCLKRST);
|
|
@@ -175,12 +187,23 @@ static void mcasp_start_rx(struct davinci_mcasp *mcasp)
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
|
|
if (mcasp_is_synchronous(mcasp))
|
|
if (mcasp_is_synchronous(mcasp))
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
|
|
|
|
+
|
|
|
|
+ /* enable receive IRQs */
|
|
|
|
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_EVTCTLR_REG,
|
|
|
|
+ mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE]);
|
|
}
|
|
}
|
|
|
|
|
|
static void mcasp_start_tx(struct davinci_mcasp *mcasp)
|
|
static void mcasp_start_tx(struct davinci_mcasp *mcasp)
|
|
{
|
|
{
|
|
u32 cnt;
|
|
u32 cnt;
|
|
|
|
|
|
|
|
+ if (mcasp->txnumevt) { /* enable FIFO */
|
|
|
|
+ u32 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
|
|
|
|
+
|
|
|
|
+ mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
|
|
|
|
+ mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
|
|
|
|
+ }
|
|
|
|
+
|
|
/* Start clocks */
|
|
/* Start clocks */
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
|
|
@@ -197,33 +220,28 @@ static void mcasp_start_tx(struct davinci_mcasp *mcasp)
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSMRST);
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSMRST);
|
|
/* Release Frame Sync generator */
|
|
/* Release Frame Sync generator */
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
|
|
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
|
|
|
|
+
|
|
|
|
+ /* enable transmit IRQs */
|
|
|
|
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_EVTCTLX_REG,
|
|
|
|
+ mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK]);
|
|
}
|
|
}
|
|
|
|
|
|
static void davinci_mcasp_start(struct davinci_mcasp *mcasp, int stream)
|
|
static void davinci_mcasp_start(struct davinci_mcasp *mcasp, int stream)
|
|
{
|
|
{
|
|
- u32 reg;
|
|
|
|
-
|
|
|
|
mcasp->streams++;
|
|
mcasp->streams++;
|
|
|
|
|
|
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
|
|
|
- if (mcasp->txnumevt) { /* enable FIFO */
|
|
|
|
- reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
|
|
|
|
- mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
|
|
|
|
- mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
|
|
|
|
- }
|
|
|
|
|
|
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK)
|
|
mcasp_start_tx(mcasp);
|
|
mcasp_start_tx(mcasp);
|
|
- } else {
|
|
|
|
- if (mcasp->rxnumevt) { /* enable FIFO */
|
|
|
|
- reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
|
|
|
|
- mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
|
|
|
|
- mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
|
|
|
|
- }
|
|
|
|
|
|
+ else
|
|
mcasp_start_rx(mcasp);
|
|
mcasp_start_rx(mcasp);
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static void mcasp_stop_rx(struct davinci_mcasp *mcasp)
|
|
static void mcasp_stop_rx(struct davinci_mcasp *mcasp)
|
|
{
|
|
{
|
|
|
|
+ /* disable IRQ sources */
|
|
|
|
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_EVTCTLR_REG,
|
|
|
|
+ mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE]);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* In synchronous mode stop the TX clocks if no other stream is
|
|
* In synchronous mode stop the TX clocks if no other stream is
|
|
* running
|
|
* running
|
|
@@ -245,6 +263,10 @@ static void mcasp_stop_tx(struct davinci_mcasp *mcasp)
|
|
{
|
|
{
|
|
u32 val = 0;
|
|
u32 val = 0;
|
|
|
|
|
|
|
|
+ /* disable IRQ sources */
|
|
|
|
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_EVTCTLX_REG,
|
|
|
|
+ mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK]);
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* In synchronous mode keep TX clocks running if the capture stream is
|
|
* In synchronous mode keep TX clocks running if the capture stream is
|
|
* still running.
|
|
* still running.
|
|
@@ -272,6 +294,76 @@ static void davinci_mcasp_stop(struct davinci_mcasp *mcasp, int stream)
|
|
mcasp_stop_rx(mcasp);
|
|
mcasp_stop_rx(mcasp);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static irqreturn_t davinci_mcasp_tx_irq_handler(int irq, void *data)
|
|
|
|
+{
|
|
|
|
+ struct davinci_mcasp *mcasp = (struct davinci_mcasp *)data;
|
|
|
|
+ struct snd_pcm_substream *substream;
|
|
|
|
+ u32 irq_mask = mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK];
|
|
|
|
+ u32 handled_mask = 0;
|
|
|
|
+ u32 stat;
|
|
|
|
+
|
|
|
|
+ stat = mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG);
|
|
|
|
+ if (stat & XUNDRN & irq_mask) {
|
|
|
|
+ dev_warn(mcasp->dev, "Transmit buffer underflow\n");
|
|
|
|
+ handled_mask |= XUNDRN;
|
|
|
|
+
|
|
|
|
+ substream = mcasp->substreams[SNDRV_PCM_STREAM_PLAYBACK];
|
|
|
|
+ if (substream) {
|
|
|
|
+ snd_pcm_stream_lock_irq(substream);
|
|
|
|
+ if (snd_pcm_running(substream))
|
|
|
|
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
|
|
|
|
+ snd_pcm_stream_unlock_irq(substream);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!handled_mask)
|
|
|
|
+ dev_warn(mcasp->dev, "unhandled tx event. txstat: 0x%08x\n",
|
|
|
|
+ stat);
|
|
|
|
+
|
|
|
|
+ if (stat & XRERR)
|
|
|
|
+ handled_mask |= XRERR;
|
|
|
|
+
|
|
|
|
+ /* Ack the handled event only */
|
|
|
|
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, handled_mask);
|
|
|
|
+
|
|
|
|
+ return IRQ_RETVAL(handled_mask);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static irqreturn_t davinci_mcasp_rx_irq_handler(int irq, void *data)
|
|
|
|
+{
|
|
|
|
+ struct davinci_mcasp *mcasp = (struct davinci_mcasp *)data;
|
|
|
|
+ struct snd_pcm_substream *substream;
|
|
|
|
+ u32 irq_mask = mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE];
|
|
|
|
+ u32 handled_mask = 0;
|
|
|
|
+ u32 stat;
|
|
|
|
+
|
|
|
|
+ stat = mcasp_get_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG);
|
|
|
|
+ if (stat & ROVRN & irq_mask) {
|
|
|
|
+ dev_warn(mcasp->dev, "Receive buffer overflow\n");
|
|
|
|
+ handled_mask |= ROVRN;
|
|
|
|
+
|
|
|
|
+ substream = mcasp->substreams[SNDRV_PCM_STREAM_CAPTURE];
|
|
|
|
+ if (substream) {
|
|
|
|
+ snd_pcm_stream_lock_irq(substream);
|
|
|
|
+ if (snd_pcm_running(substream))
|
|
|
|
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
|
|
|
|
+ snd_pcm_stream_unlock_irq(substream);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!handled_mask)
|
|
|
|
+ dev_warn(mcasp->dev, "unhandled rx event. rxstat: 0x%08x\n",
|
|
|
|
+ stat);
|
|
|
|
+
|
|
|
|
+ if (stat & XRERR)
|
|
|
|
+ handled_mask |= XRERR;
|
|
|
|
+
|
|
|
|
+ /* Ack the handled event only */
|
|
|
|
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, handled_mask);
|
|
|
|
+
|
|
|
|
+ return IRQ_RETVAL(handled_mask);
|
|
|
|
+}
|
|
|
|
+
|
|
static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
|
|
static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
|
|
unsigned int fmt)
|
|
unsigned int fmt)
|
|
{
|
|
{
|
|
@@ -634,19 +726,29 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream)
|
|
|
|
|
|
+static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream,
|
|
|
|
+ int channels)
|
|
{
|
|
{
|
|
int i, active_slots;
|
|
int i, active_slots;
|
|
|
|
+ int total_slots;
|
|
|
|
+ int active_serializers;
|
|
u32 mask = 0;
|
|
u32 mask = 0;
|
|
u32 busel = 0;
|
|
u32 busel = 0;
|
|
|
|
|
|
- if ((mcasp->tdm_slots < 2) || (mcasp->tdm_slots > 32)) {
|
|
|
|
- dev_err(mcasp->dev, "tdm slot %d not supported\n",
|
|
|
|
- mcasp->tdm_slots);
|
|
|
|
- return -EINVAL;
|
|
|
|
- }
|
|
|
|
|
|
+ total_slots = mcasp->tdm_slots;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If more than one serializer is needed, then use them with
|
|
|
|
+ * their specified tdm_slots count. Otherwise, one serializer
|
|
|
|
+ * can cope with the transaction using as many slots as channels
|
|
|
|
+ * in the stream, requires channels symmetry
|
|
|
|
+ */
|
|
|
|
+ active_serializers = (channels + total_slots - 1) / total_slots;
|
|
|
|
+ if (active_serializers == 1)
|
|
|
|
+ active_slots = channels;
|
|
|
|
+ else
|
|
|
|
+ active_slots = total_slots;
|
|
|
|
|
|
- active_slots = (mcasp->tdm_slots > 31) ? 32 : mcasp->tdm_slots;
|
|
|
|
for (i = 0; i < active_slots; i++)
|
|
for (i = 0; i < active_slots; i++)
|
|
mask |= (1 << i);
|
|
mask |= (1 << i);
|
|
|
|
|
|
@@ -658,12 +760,12 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream)
|
|
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask);
|
|
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask);
|
|
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD);
|
|
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD);
|
|
mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
|
|
mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
|
|
- FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF));
|
|
|
|
|
|
+ FSXMOD(total_slots), FSXMOD(0x1FF));
|
|
|
|
|
|
mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask);
|
|
mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask);
|
|
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
|
|
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
|
|
mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
|
|
mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
|
|
- FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF));
|
|
|
|
|
|
+ FSRMOD(total_slots), FSRMOD(0x1FF));
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -777,7 +879,8 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
|
|
if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
|
|
if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
|
|
ret = mcasp_dit_hw_param(mcasp, params_rate(params));
|
|
ret = mcasp_dit_hw_param(mcasp, params_rate(params));
|
|
else
|
|
else
|
|
- ret = mcasp_i2s_hw_param(mcasp, substream->stream);
|
|
|
|
|
|
+ ret = mcasp_i2s_hw_param(mcasp, substream->stream,
|
|
|
|
+ channels);
|
|
|
|
|
|
if (ret)
|
|
if (ret)
|
|
return ret;
|
|
return ret;
|
|
@@ -825,6 +928,9 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
|
|
|
|
|
|
davinci_config_channel_size(mcasp, word_length);
|
|
davinci_config_channel_size(mcasp, word_length);
|
|
|
|
|
|
|
|
+ if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE)
|
|
|
|
+ mcasp->channels = channels;
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -853,7 +959,65 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
|
|
|
|
+ struct snd_soc_dai *cpu_dai)
|
|
|
|
+{
|
|
|
|
+ struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
|
|
|
|
+ u32 max_channels = 0;
|
|
|
|
+ int i, dir;
|
|
|
|
+
|
|
|
|
+ mcasp->substreams[substream->stream] = substream;
|
|
|
|
+
|
|
|
|
+ if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Limit the maximum allowed channels for the first stream:
|
|
|
|
+ * number of serializers for the direction * tdm slots per serializer
|
|
|
|
+ */
|
|
|
|
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
|
|
|
|
+ dir = TX_MODE;
|
|
|
|
+ else
|
|
|
|
+ dir = RX_MODE;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < mcasp->num_serializer; i++) {
|
|
|
|
+ if (mcasp->serial_dir[i] == dir)
|
|
|
|
+ max_channels++;
|
|
|
|
+ }
|
|
|
|
+ max_channels *= mcasp->tdm_slots;
|
|
|
|
+ /*
|
|
|
|
+ * If the already active stream has less channels than the calculated
|
|
|
|
+ * limnit based on the seirializers * tdm_slots, we need to use that as
|
|
|
|
+ * a constraint for the second stream.
|
|
|
|
+ * Otherwise (first stream or less allowed channels) we use the
|
|
|
|
+ * calculated constraint.
|
|
|
|
+ */
|
|
|
|
+ if (mcasp->channels && mcasp->channels < max_channels)
|
|
|
|
+ max_channels = mcasp->channels;
|
|
|
|
+
|
|
|
|
+ snd_pcm_hw_constraint_minmax(substream->runtime,
|
|
|
|
+ SNDRV_PCM_HW_PARAM_CHANNELS,
|
|
|
|
+ 2, max_channels);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void davinci_mcasp_shutdown(struct snd_pcm_substream *substream,
|
|
|
|
+ struct snd_soc_dai *cpu_dai)
|
|
|
|
+{
|
|
|
|
+ struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
|
|
|
|
+
|
|
|
|
+ mcasp->substreams[substream->stream] = NULL;
|
|
|
|
+
|
|
|
|
+ if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (!cpu_dai->active)
|
|
|
|
+ mcasp->channels = 0;
|
|
|
|
+}
|
|
|
|
+
|
|
static const struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
|
|
static const struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
|
|
|
|
+ .startup = davinci_mcasp_startup,
|
|
|
|
+ .shutdown = davinci_mcasp_shutdown,
|
|
.trigger = davinci_mcasp_trigger,
|
|
.trigger = davinci_mcasp_trigger,
|
|
.hw_params = davinci_mcasp_hw_params,
|
|
.hw_params = davinci_mcasp_hw_params,
|
|
.set_fmt = davinci_mcasp_set_dai_fmt,
|
|
.set_fmt = davinci_mcasp_set_dai_fmt,
|
|
@@ -1194,6 +1358,8 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
|
|
struct resource *mem, *ioarea, *res, *dat;
|
|
struct resource *mem, *ioarea, *res, *dat;
|
|
struct davinci_mcasp_pdata *pdata;
|
|
struct davinci_mcasp_pdata *pdata;
|
|
struct davinci_mcasp *mcasp;
|
|
struct davinci_mcasp *mcasp;
|
|
|
|
+ char *irq_name;
|
|
|
|
+ int irq;
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
if (!pdev->dev.platform_data && !pdev->dev.of_node) {
|
|
if (!pdev->dev.platform_data && !pdev->dev.of_node) {
|
|
@@ -1247,7 +1413,21 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
|
|
}
|
|
}
|
|
|
|
|
|
mcasp->op_mode = pdata->op_mode;
|
|
mcasp->op_mode = pdata->op_mode;
|
|
- mcasp->tdm_slots = pdata->tdm_slots;
|
|
|
|
|
|
+ /* sanity check for tdm slots parameter */
|
|
|
|
+ if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) {
|
|
|
|
+ if (pdata->tdm_slots < 2) {
|
|
|
|
+ dev_err(&pdev->dev, "invalid tdm slots: %d\n",
|
|
|
|
+ pdata->tdm_slots);
|
|
|
|
+ mcasp->tdm_slots = 2;
|
|
|
|
+ } else if (pdata->tdm_slots > 32) {
|
|
|
|
+ dev_err(&pdev->dev, "invalid tdm slots: %d\n",
|
|
|
|
+ pdata->tdm_slots);
|
|
|
|
+ mcasp->tdm_slots = 32;
|
|
|
|
+ } else {
|
|
|
|
+ mcasp->tdm_slots = pdata->tdm_slots;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
mcasp->num_serializer = pdata->num_serializer;
|
|
mcasp->num_serializer = pdata->num_serializer;
|
|
#ifdef CONFIG_PM_SLEEP
|
|
#ifdef CONFIG_PM_SLEEP
|
|
mcasp->context.xrsr_regs = devm_kzalloc(&pdev->dev,
|
|
mcasp->context.xrsr_regs = devm_kzalloc(&pdev->dev,
|
|
@@ -1261,6 +1441,36 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
|
|
|
|
|
|
mcasp->dev = &pdev->dev;
|
|
mcasp->dev = &pdev->dev;
|
|
|
|
|
|
|
|
+ irq = platform_get_irq_byname(pdev, "rx");
|
|
|
|
+ if (irq >= 0) {
|
|
|
|
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx\n",
|
|
|
|
+ dev_name(&pdev->dev));
|
|
|
|
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
|
|
|
|
+ davinci_mcasp_rx_irq_handler,
|
|
|
|
+ IRQF_ONESHOT, irq_name, mcasp);
|
|
|
|
+ if (ret) {
|
|
|
|
+ dev_err(&pdev->dev, "RX IRQ request failed\n");
|
|
|
|
+ goto err;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE] = ROVRN;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ irq = platform_get_irq_byname(pdev, "tx");
|
|
|
|
+ if (irq >= 0) {
|
|
|
|
+ irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx\n",
|
|
|
|
+ dev_name(&pdev->dev));
|
|
|
|
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
|
|
|
|
+ davinci_mcasp_tx_irq_handler,
|
|
|
|
+ IRQF_ONESHOT, irq_name, mcasp);
|
|
|
|
+ if (ret) {
|
|
|
|
+ dev_err(&pdev->dev, "TX IRQ request failed\n");
|
|
|
|
+ goto err;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK] = XUNDRN;
|
|
|
|
+ }
|
|
|
|
+
|
|
dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
|
|
dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
|
|
if (dat)
|
|
if (dat)
|
|
mcasp->dat_port = true;
|
|
mcasp->dat_port = true;
|