|
@@ -226,7 +226,6 @@ struct imx_port {
|
|
dma_cookie_t rx_cookie;
|
|
dma_cookie_t rx_cookie;
|
|
unsigned int tx_bytes;
|
|
unsigned int tx_bytes;
|
|
unsigned int dma_tx_nents;
|
|
unsigned int dma_tx_nents;
|
|
- wait_queue_head_t dma_wait;
|
|
|
|
unsigned int saved_reg[10];
|
|
unsigned int saved_reg[10];
|
|
bool context_saved;
|
|
bool context_saved;
|
|
};
|
|
};
|
|
@@ -498,20 +497,12 @@ static void dma_tx_callback(void *data)
|
|
|
|
|
|
sport->dma_is_txing = 0;
|
|
sport->dma_is_txing = 0;
|
|
|
|
|
|
- spin_unlock_irqrestore(&sport->port.lock, flags);
|
|
|
|
-
|
|
|
|
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
|
|
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
|
|
uart_write_wakeup(&sport->port);
|
|
uart_write_wakeup(&sport->port);
|
|
|
|
|
|
- if (waitqueue_active(&sport->dma_wait)) {
|
|
|
|
- wake_up(&sport->dma_wait);
|
|
|
|
- dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
|
|
|
|
- return;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&sport->port.lock, flags);
|
|
|
|
if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
|
|
if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
|
|
imx_dma_tx(sport);
|
|
imx_dma_tx(sport);
|
|
|
|
+
|
|
spin_unlock_irqrestore(&sport->port.lock, flags);
|
|
spin_unlock_irqrestore(&sport->port.lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1208,8 +1199,6 @@ static void imx_enable_dma(struct imx_port *sport)
|
|
{
|
|
{
|
|
unsigned long temp;
|
|
unsigned long temp;
|
|
|
|
|
|
- init_waitqueue_head(&sport->dma_wait);
|
|
|
|
-
|
|
|
|
/* set UCR1 */
|
|
/* set UCR1 */
|
|
temp = readl(sport->port.membase + UCR1);
|
|
temp = readl(sport->port.membase + UCR1);
|
|
temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN;
|
|
temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN;
|