|
@@ -289,7 +289,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
|
|
do {
|
|
do {
|
|
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
|
|
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
|
|
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
|
|
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
|
|
- pr_err("%s: timeout!\n", __func__);
|
|
|
|
|
|
+ dev_err(chan->device->dev, "%s: timeout!\n", __func__);
|
|
return DMA_ERROR;
|
|
return DMA_ERROR;
|
|
}
|
|
}
|
|
if (status != DMA_IN_PROGRESS)
|
|
if (status != DMA_IN_PROGRESS)
|
|
@@ -518,7 +518,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
|
|
struct dma_chan *chan;
|
|
struct dma_chan *chan;
|
|
|
|
|
|
if (mask && !__dma_device_satisfies_mask(dev, mask)) {
|
|
if (mask && !__dma_device_satisfies_mask(dev, mask)) {
|
|
- pr_debug("%s: wrong capabilities\n", __func__);
|
|
|
|
|
|
+ dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
/* devices with multiple channels need special handling as we need to
|
|
/* devices with multiple channels need special handling as we need to
|
|
@@ -533,12 +533,12 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
|
|
|
|
|
|
list_for_each_entry(chan, &dev->channels, device_node) {
|
|
list_for_each_entry(chan, &dev->channels, device_node) {
|
|
if (chan->client_count) {
|
|
if (chan->client_count) {
|
|
- pr_debug("%s: %s busy\n",
|
|
|
|
|
|
+ dev_dbg(dev->dev, "%s: %s busy\n",
|
|
__func__, dma_chan_name(chan));
|
|
__func__, dma_chan_name(chan));
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
if (fn && !fn(chan, fn_param)) {
|
|
if (fn && !fn(chan, fn_param)) {
|
|
- pr_debug("%s: %s filter said false\n",
|
|
|
|
|
|
+ dev_dbg(dev->dev, "%s: %s filter said false\n",
|
|
__func__, dma_chan_name(chan));
|
|
__func__, dma_chan_name(chan));
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
@@ -567,11 +567,12 @@ static struct dma_chan *find_candidate(struct dma_device *device,
|
|
|
|
|
|
if (err) {
|
|
if (err) {
|
|
if (err == -ENODEV) {
|
|
if (err == -ENODEV) {
|
|
- pr_debug("%s: %s module removed\n", __func__,
|
|
|
|
- dma_chan_name(chan));
|
|
|
|
|
|
+ dev_dbg(device->dev, "%s: %s module removed\n",
|
|
|
|
+ __func__, dma_chan_name(chan));
|
|
list_del_rcu(&device->global_node);
|
|
list_del_rcu(&device->global_node);
|
|
} else
|
|
} else
|
|
- pr_debug("%s: failed to get %s: (%d)\n",
|
|
|
|
|
|
+ dev_dbg(device->dev,
|
|
|
|
+ "%s: failed to get %s: (%d)\n",
|
|
__func__, dma_chan_name(chan), err);
|
|
__func__, dma_chan_name(chan), err);
|
|
|
|
|
|
if (--device->privatecnt == 0)
|
|
if (--device->privatecnt == 0)
|
|
@@ -602,7 +603,8 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
|
|
device->privatecnt++;
|
|
device->privatecnt++;
|
|
err = dma_chan_get(chan);
|
|
err = dma_chan_get(chan);
|
|
if (err) {
|
|
if (err) {
|
|
- pr_debug("%s: failed to get %s: (%d)\n",
|
|
|
|
|
|
+ dev_dbg(chan->device->dev,
|
|
|
|
+ "%s: failed to get %s: (%d)\n",
|
|
__func__, dma_chan_name(chan), err);
|
|
__func__, dma_chan_name(chan), err);
|
|
chan = NULL;
|
|
chan = NULL;
|
|
if (--device->privatecnt == 0)
|
|
if (--device->privatecnt == 0)
|
|
@@ -662,7 +664,7 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
|
|
}
|
|
}
|
|
mutex_unlock(&dma_list_mutex);
|
|
mutex_unlock(&dma_list_mutex);
|
|
|
|
|
|
- pr_debug("%s: %s (%s)\n",
|
|
|
|
|
|
+ dev_dbg(chan->device->dev, "%s: %s (%s)\n",
|
|
__func__,
|
|
__func__,
|
|
chan ? "success" : "fail",
|
|
chan ? "success" : "fail",
|
|
chan ? dma_chan_name(chan) : NULL);
|
|
chan ? dma_chan_name(chan) : NULL);
|
|
@@ -814,8 +816,9 @@ void dmaengine_get(void)
|
|
list_del_rcu(&device->global_node);
|
|
list_del_rcu(&device->global_node);
|
|
break;
|
|
break;
|
|
} else if (err)
|
|
} else if (err)
|
|
- pr_debug("%s: failed to get %s: (%d)\n",
|
|
|
|
- __func__, dma_chan_name(chan), err);
|
|
|
|
|
|
+ dev_dbg(chan->device->dev,
|
|
|
|
+ "%s: failed to get %s: (%d)\n",
|
|
|
|
+ __func__, dma_chan_name(chan), err);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1222,8 +1225,9 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
|
|
|
|
|
|
while (tx->cookie == -EBUSY) {
|
|
while (tx->cookie == -EBUSY) {
|
|
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
|
|
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
|
|
- pr_err("%s timeout waiting for descriptor submission\n",
|
|
|
|
- __func__);
|
|
|
|
|
|
+ dev_err(tx->chan->device->dev,
|
|
|
|
+ "%s timeout waiting for descriptor submission\n",
|
|
|
|
+ __func__);
|
|
return DMA_ERROR;
|
|
return DMA_ERROR;
|
|
}
|
|
}
|
|
cpu_relax();
|
|
cpu_relax();
|