|
@@ -300,8 +300,8 @@ static void maple_send(void)
|
|
|
mutex_unlock(&maple_wlist_lock);
|
|
|
if (maple_packets > 0) {
|
|
|
for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
|
|
|
- sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE,
|
|
|
- PAGE_SIZE, DMA_BIDIRECTIONAL);
|
|
|
+ __flush_purge_region(maple_sendbuf + i * PAGE_SIZE,
|
|
|
+ PAGE_SIZE);
|
|
|
}
|
|
|
|
|
|
finish:
|
|
@@ -642,7 +642,8 @@ static void maple_dma_handler(struct work_struct *work)
|
|
|
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
|
|
|
mdev = mq->dev;
|
|
|
recvbuf = mq->recvbuf->buf;
|
|
|
- sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE);
|
|
|
+ __flush_invalidate_region(sh_cacheop_vaddr(recvbuf),
|
|
|
+ 0x400);
|
|
|
code = recvbuf[0];
|
|
|
kfree(mq->sendbuf);
|
|
|
list_del_init(&mq->list);
|