|
|
@@ -302,8 +302,8 @@ static int rockchip_spi_prepare_message(struct spi_master *master,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int rockchip_spi_unprepare_message(struct spi_master *master,
|
|
|
- struct spi_message *msg)
|
|
|
+static void rockchip_spi_handle_err(struct spi_master *master,
|
|
|
+ struct spi_message *msg)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct rockchip_spi *rs = spi_master_get_devdata(master);
|
|
|
@@ -313,8 +313,8 @@ static int rockchip_spi_unprepare_message(struct spi_master *master,
|
|
|
/*
|
|
|
* For DMA mode, we need terminate DMA channel and flush
|
|
|
* fifo for the next transfer if DMA thansfer timeout.
|
|
|
- * unprepare_message() was called by core if transfer complete
|
|
|
- * or timeout. Maybe it is reasonable for error handling here.
|
|
|
+ * handle_err() was called by core if transfer failed.
|
|
|
+ * Maybe it is reasonable for error handling here.
|
|
|
*/
|
|
|
if (rs->use_dma) {
|
|
|
if (rs->state & RXBUSY) {
|
|
|
@@ -327,6 +327,12 @@ static int rockchip_spi_unprepare_message(struct spi_master *master,
|
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&rs->lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+static int rockchip_spi_unprepare_message(struct spi_master *master,
|
|
|
+ struct spi_message *msg)
|
|
|
+{
|
|
|
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
|
|
|
|
|
|
spi_enable_chip(rs, 0);
|
|
|
|
|
|
@@ -688,6 +694,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
|
|
|
master->prepare_message = rockchip_spi_prepare_message;
|
|
|
master->unprepare_message = rockchip_spi_unprepare_message;
|
|
|
master->transfer_one = rockchip_spi_transfer_one;
|
|
|
+ master->handle_err = rockchip_spi_handle_err;
|
|
|
|
|
|
rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx");
|
|
|
if (!rs->dma_tx.ch)
|