|
@@ -605,7 +605,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
|
|
num_qps_mw = qp_count / mw_count;
|
|
num_qps_mw = qp_count / mw_count;
|
|
|
|
|
|
rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
|
|
rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
|
|
- qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count;
|
|
|
|
|
|
+ qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
|
|
rx_size -= sizeof(struct ntb_rx_info);
|
|
rx_size -= sizeof(struct ntb_rx_info);
|
|
|
|
|
|
qp->remote_rx_info = qp->rx_buff + rx_size;
|
|
qp->remote_rx_info = qp->rx_buff + rx_size;
|
|
@@ -825,10 +825,10 @@ static void ntb_transport_link_work(struct work_struct *work)
|
|
size = max_mw_size;
|
|
size = max_mw_size;
|
|
|
|
|
|
spad = MW0_SZ_HIGH + (i * 2);
|
|
spad = MW0_SZ_HIGH + (i * 2);
|
|
- ntb_peer_spad_write(ndev, spad, (u32)(size >> 32));
|
|
|
|
|
|
+ ntb_peer_spad_write(ndev, spad, upper_32_bits(size));
|
|
|
|
|
|
spad = MW0_SZ_LOW + (i * 2);
|
|
spad = MW0_SZ_LOW + (i * 2);
|
|
- ntb_peer_spad_write(ndev, spad, (u32)size);
|
|
|
|
|
|
+ ntb_peer_spad_write(ndev, spad, lower_32_bits(size));
|
|
}
|
|
}
|
|
|
|
|
|
ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
|
|
ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
|
|
@@ -928,7 +928,6 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
|
|
unsigned int qp_num)
|
|
unsigned int qp_num)
|
|
{
|
|
{
|
|
struct ntb_transport_qp *qp;
|
|
struct ntb_transport_qp *qp;
|
|
- struct ntb_transport_mw *mw;
|
|
|
|
phys_addr_t mw_base;
|
|
phys_addr_t mw_base;
|
|
resource_size_t mw_size;
|
|
resource_size_t mw_size;
|
|
unsigned int num_qps_mw, tx_size;
|
|
unsigned int num_qps_mw, tx_size;
|
|
@@ -939,7 +938,6 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
|
|
qp_count = nt->qp_count;
|
|
qp_count = nt->qp_count;
|
|
|
|
|
|
mw_num = QP_TO_MW(nt, qp_num);
|
|
mw_num = QP_TO_MW(nt, qp_num);
|
|
- mw = &nt->mw_vec[mw_num];
|
|
|
|
|
|
|
|
qp = &nt->qp_vec[qp_num];
|
|
qp = &nt->qp_vec[qp_num];
|
|
qp->qp_num = qp_num;
|
|
qp->qp_num = qp_num;
|
|
@@ -958,7 +956,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
|
|
mw_size = nt->mw_vec[mw_num].phys_size;
|
|
mw_size = nt->mw_vec[mw_num].phys_size;
|
|
|
|
|
|
tx_size = (unsigned int)mw_size / num_qps_mw;
|
|
tx_size = (unsigned int)mw_size / num_qps_mw;
|
|
- qp_offset = tx_size * qp_num / mw_count;
|
|
|
|
|
|
+ qp_offset = tx_size * (qp_num / mw_count);
|
|
|
|
|
|
qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
|
|
qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
|
|
if (!qp->tx_mw)
|
|
if (!qp->tx_mw)
|
|
@@ -1080,7 +1078,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
|
|
GFP_KERNEL, node);
|
|
GFP_KERNEL, node);
|
|
if (!nt->qp_vec) {
|
|
if (!nt->qp_vec) {
|
|
rc = -ENOMEM;
|
|
rc = -ENOMEM;
|
|
- goto err2;
|
|
|
|
|
|
+ goto err1;
|
|
}
|
|
}
|
|
|
|
|
|
if (nt_debugfs_dir) {
|
|
if (nt_debugfs_dir) {
|
|
@@ -1092,7 +1090,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
|
|
for (i = 0; i < qp_count; i++) {
|
|
for (i = 0; i < qp_count; i++) {
|
|
rc = ntb_transport_init_queue(nt, i);
|
|
rc = ntb_transport_init_queue(nt, i);
|
|
if (rc)
|
|
if (rc)
|
|
- goto err3;
|
|
|
|
|
|
+ goto err2;
|
|
}
|
|
}
|
|
|
|
|
|
INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
|
|
INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
|
|
@@ -1100,12 +1098,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
|
|
|
|
|
|
rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
|
|
rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
|
|
if (rc)
|
|
if (rc)
|
|
- goto err3;
|
|
|
|
|
|
+ goto err2;
|
|
|
|
|
|
INIT_LIST_HEAD(&nt->client_devs);
|
|
INIT_LIST_HEAD(&nt->client_devs);
|
|
rc = ntb_bus_init(nt);
|
|
rc = ntb_bus_init(nt);
|
|
if (rc)
|
|
if (rc)
|
|
- goto err4;
|
|
|
|
|
|
+ goto err3;
|
|
|
|
|
|
nt->link_is_up = false;
|
|
nt->link_is_up = false;
|
|
ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
|
|
ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
|
|
@@ -1113,17 +1111,16 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
-err4:
|
|
|
|
- ntb_clear_ctx(ndev);
|
|
|
|
err3:
|
|
err3:
|
|
- kfree(nt->qp_vec);
|
|
|
|
|
|
+ ntb_clear_ctx(ndev);
|
|
err2:
|
|
err2:
|
|
- kfree(nt->mw_vec);
|
|
|
|
|
|
+ kfree(nt->qp_vec);
|
|
err1:
|
|
err1:
|
|
while (i--) {
|
|
while (i--) {
|
|
mw = &nt->mw_vec[i];
|
|
mw = &nt->mw_vec[i];
|
|
iounmap(mw->vbase);
|
|
iounmap(mw->vbase);
|
|
}
|
|
}
|
|
|
|
+ kfree(nt->mw_vec);
|
|
err:
|
|
err:
|
|
kfree(nt);
|
|
kfree(nt);
|
|
return rc;
|
|
return rc;
|
|
@@ -1931,13 +1928,11 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
|
|
*/
|
|
*/
|
|
void ntb_transport_link_down(struct ntb_transport_qp *qp)
|
|
void ntb_transport_link_down(struct ntb_transport_qp *qp)
|
|
{
|
|
{
|
|
- struct pci_dev *pdev;
|
|
|
|
int val;
|
|
int val;
|
|
|
|
|
|
if (!qp)
|
|
if (!qp)
|
|
return;
|
|
return;
|
|
|
|
|
|
- pdev = qp->ndev->pdev;
|
|
|
|
qp->client_ready = false;
|
|
qp->client_ready = false;
|
|
|
|
|
|
val = ntb_spad_read(qp->ndev, QP_LINKS);
|
|
val = ntb_spad_read(qp->ndev, QP_LINKS);
|
|
@@ -1996,23 +1991,24 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
|
|
*/
|
|
*/
|
|
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
|
|
unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
|
|
{
|
|
{
|
|
- unsigned int max;
|
|
|
|
|
|
+ unsigned int max_size;
|
|
unsigned int copy_align;
|
|
unsigned int copy_align;
|
|
|
|
+ struct dma_chan *rx_chan, *tx_chan;
|
|
|
|
|
|
if (!qp)
|
|
if (!qp)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
- if (!qp->tx_dma_chan && !qp->rx_dma_chan)
|
|
|
|
- return qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
|
|
|
|
|
+ rx_chan = qp->rx_dma_chan;
|
|
|
|
+ tx_chan = qp->tx_dma_chan;
|
|
|
|
|
|
- copy_align = max(qp->tx_dma_chan->device->copy_align,
|
|
|
|
- qp->rx_dma_chan->device->copy_align);
|
|
|
|
|
|
+ copy_align = max(rx_chan ? rx_chan->device->copy_align : 0,
|
|
|
|
+ tx_chan ? tx_chan->device->copy_align : 0);
|
|
|
|
|
|
/* If DMA engine usage is possible, try to find the max size for that */
|
|
/* If DMA engine usage is possible, try to find the max size for that */
|
|
- max = qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
|
|
|
- max -= max % (1 << copy_align);
|
|
|
|
|
|
+ max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header);
|
|
|
|
+ max_size = round_down(max_size, 1 << copy_align);
|
|
|
|
|
|
- return max;
|
|
|
|
|
|
+ return max_size;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
|
|
EXPORT_SYMBOL_GPL(ntb_transport_max_size);
|
|
|
|
|