|
@@ -375,7 +375,7 @@ static inline void complete_tx(struct sdma_engine *sde,
|
|
|
sde->head_sn, tx->sn);
|
|
|
sde->head_sn++;
|
|
|
#endif
|
|
|
- sdma_txclean(sde->dd, tx);
|
|
|
+ __sdma_txclean(sde->dd, tx);
|
|
|
if (complete)
|
|
|
(*complete)(tx, res);
|
|
|
if (wait && iowait_sdma_dec(wait))
|
|
@@ -1643,7 +1643,7 @@ static inline u8 ahg_mode(struct sdma_txreq *tx)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * sdma_txclean() - clean tx of mappings, descp *kmalloc's
|
|
|
+ * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
|
|
|
* @dd: hfi1_devdata for unmapping
|
|
|
* @tx: tx request to clean
|
|
|
*
|
|
@@ -1653,7 +1653,7 @@ static inline u8 ahg_mode(struct sdma_txreq *tx)
|
|
|
* The code can be called multiple times without issue.
|
|
|
*
|
|
|
*/
|
|
|
-void sdma_txclean(
|
|
|
+void __sdma_txclean(
|
|
|
struct hfi1_devdata *dd,
|
|
|
struct sdma_txreq *tx)
|
|
|
{
|
|
@@ -3080,7 +3080,7 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
|
|
|
tx->descp[i] = tx->descs[i];
|
|
|
return 0;
|
|
|
enomem:
|
|
|
- sdma_txclean(dd, tx);
|
|
|
+ __sdma_txclean(dd, tx);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
@@ -3109,14 +3109,14 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
|
|
|
|
|
|
rval = _extend_sdma_tx_descs(dd, tx);
|
|
|
if (rval) {
|
|
|
- sdma_txclean(dd, tx);
|
|
|
+ __sdma_txclean(dd, tx);
|
|
|
return rval;
|
|
|
}
|
|
|
|
|
|
/* If coalesce buffer is allocated, copy data into it */
|
|
|
if (tx->coalesce_buf) {
|
|
|
if (type == SDMA_MAP_NONE) {
|
|
|
- sdma_txclean(dd, tx);
|
|
|
+ __sdma_txclean(dd, tx);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
@@ -3124,7 +3124,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
|
|
|
kvaddr = kmap(page);
|
|
|
kvaddr += offset;
|
|
|
} else if (WARN_ON(!kvaddr)) {
|
|
|
- sdma_txclean(dd, tx);
|
|
|
+ __sdma_txclean(dd, tx);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
@@ -3154,7 +3154,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
|
|
|
- sdma_txclean(dd, tx);
|
|
|
+ __sdma_txclean(dd, tx);
|
|
|
return -ENOSPC;
|
|
|
}
|
|
|
|
|
@@ -3196,7 +3196,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
|
|
|
if ((unlikely(tx->num_desc == tx->desc_limit))) {
|
|
|
rval = _extend_sdma_tx_descs(dd, tx);
|
|
|
if (rval) {
|
|
|
- sdma_txclean(dd, tx);
|
|
|
+ __sdma_txclean(dd, tx);
|
|
|
return rval;
|
|
|
}
|
|
|
}
|