|
@@ -372,9 +372,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
|
|
|
pvscsi_create_sg(ctx, sg, segs);
|
|
|
|
|
|
e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
|
|
|
- ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
|
|
|
- SGL_SIZE, PCI_DMA_TODEVICE);
|
|
|
- if (pci_dma_mapping_error(adapter->dev, ctx->sglPA)) {
|
|
|
+ ctx->sglPA = dma_map_single(&adapter->dev->dev,
|
|
|
+ ctx->sgl, SGL_SIZE, DMA_TO_DEVICE);
|
|
|
+ if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) {
|
|
|
scmd_printk(KERN_ERR, cmd,
|
|
|
"vmw_pvscsi: Failed to map ctx sglist for DMA.\n");
|
|
|
scsi_dma_unmap(cmd);
|
|
@@ -389,9 +389,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
|
|
|
* In case there is no S/G list, scsi_sglist points
|
|
|
* directly to the buffer.
|
|
|
*/
|
|
|
- ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
|
|
|
+ ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
|
|
|
cmd->sc_data_direction);
|
|
|
- if (pci_dma_mapping_error(adapter->dev, ctx->dataPA)) {
|
|
|
+ if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
|
|
|
scmd_printk(KERN_ERR, cmd,
|
|
|
"vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
|
|
|
return -ENOMEM;
|
|
@@ -417,23 +417,23 @@ static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
|
|
|
if (count != 0) {
|
|
|
scsi_dma_unmap(cmd);
|
|
|
if (ctx->sglPA) {
|
|
|
- pci_unmap_single(adapter->dev, ctx->sglPA,
|
|
|
- SGL_SIZE, PCI_DMA_TODEVICE);
|
|
|
+ dma_unmap_single(&adapter->dev->dev, ctx->sglPA,
|
|
|
+ SGL_SIZE, DMA_TO_DEVICE);
|
|
|
ctx->sglPA = 0;
|
|
|
}
|
|
|
} else
|
|
|
- pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
|
|
|
- cmd->sc_data_direction);
|
|
|
+ dma_unmap_single(&adapter->dev->dev, ctx->dataPA,
|
|
|
+ bufflen, cmd->sc_data_direction);
|
|
|
}
|
|
|
if (cmd->sense_buffer)
|
|
|
- pci_unmap_single(adapter->dev, ctx->sensePA,
|
|
|
- SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
|
|
|
+ dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
|
|
|
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
|
|
|
}
|
|
|
|
|
|
static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
|
|
|
{
|
|
|
- adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
|
|
|
- &adapter->ringStatePA);
|
|
|
+ adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
|
|
|
+ &adapter->ringStatePA, GFP_KERNEL);
|
|
|
if (!adapter->rings_state)
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -441,17 +441,17 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
|
|
|
pvscsi_ring_pages);
|
|
|
adapter->req_depth = adapter->req_pages
|
|
|
* PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
|
|
|
- adapter->req_ring = pci_alloc_consistent(adapter->dev,
|
|
|
- adapter->req_pages * PAGE_SIZE,
|
|
|
- &adapter->reqRingPA);
|
|
|
+ adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev,
|
|
|
+ adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA,
|
|
|
+ GFP_KERNEL);
|
|
|
if (!adapter->req_ring)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
|
|
|
pvscsi_ring_pages);
|
|
|
- adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
|
|
|
- adapter->cmp_pages * PAGE_SIZE,
|
|
|
- &adapter->cmpRingPA);
|
|
|
+ adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev,
|
|
|
+ adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA,
|
|
|
+ GFP_KERNEL);
|
|
|
if (!adapter->cmp_ring)
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -464,9 +464,9 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
|
|
|
|
|
|
adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
|
|
|
pvscsi_msg_ring_pages);
|
|
|
- adapter->msg_ring = pci_alloc_consistent(adapter->dev,
|
|
|
- adapter->msg_pages * PAGE_SIZE,
|
|
|
- &adapter->msgRingPA);
|
|
|
+ adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev,
|
|
|
+ adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA,
|
|
|
+ GFP_KERNEL);
|
|
|
if (!adapter->msg_ring)
|
|
|
return -ENOMEM;
|
|
|
BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
|
|
@@ -708,10 +708,10 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
|
|
|
e->lun[1] = sdev->lun;
|
|
|
|
|
|
if (cmd->sense_buffer) {
|
|
|
- ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
|
|
|
- SCSI_SENSE_BUFFERSIZE,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
- if (pci_dma_mapping_error(adapter->dev, ctx->sensePA)) {
|
|
|
+ ctx->sensePA = dma_map_single(&adapter->dev->dev,
|
|
|
+ cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
+ if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
|
|
|
scmd_printk(KERN_ERR, cmd,
|
|
|
"vmw_pvscsi: Failed to map sense buffer for DMA.\n");
|
|
|
ctx->sensePA = 0;
|
|
@@ -740,9 +740,9 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
|
|
|
|
|
|
if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) {
|
|
|
if (cmd->sense_buffer) {
|
|
|
- pci_unmap_single(adapter->dev, ctx->sensePA,
|
|
|
+ dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
|
|
|
SCSI_SENSE_BUFFERSIZE,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
+ DMA_FROM_DEVICE);
|
|
|
ctx->sensePA = 0;
|
|
|
}
|
|
|
return -ENOMEM;
|
|
@@ -1218,21 +1218,21 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
|
|
|
}
|
|
|
|
|
|
if (adapter->rings_state)
|
|
|
- pci_free_consistent(adapter->dev, PAGE_SIZE,
|
|
|
+ dma_free_coherent(&adapter->dev->dev, PAGE_SIZE,
|
|
|
adapter->rings_state, adapter->ringStatePA);
|
|
|
|
|
|
if (adapter->req_ring)
|
|
|
- pci_free_consistent(adapter->dev,
|
|
|
+ dma_free_coherent(&adapter->dev->dev,
|
|
|
adapter->req_pages * PAGE_SIZE,
|
|
|
adapter->req_ring, adapter->reqRingPA);
|
|
|
|
|
|
if (adapter->cmp_ring)
|
|
|
- pci_free_consistent(adapter->dev,
|
|
|
+ dma_free_coherent(&adapter->dev->dev,
|
|
|
adapter->cmp_pages * PAGE_SIZE,
|
|
|
adapter->cmp_ring, adapter->cmpRingPA);
|
|
|
|
|
|
if (adapter->msg_ring)
|
|
|
- pci_free_consistent(adapter->dev,
|
|
|
+ dma_free_coherent(&adapter->dev->dev,
|
|
|
adapter->msg_pages * PAGE_SIZE,
|
|
|
adapter->msg_ring, adapter->msgRingPA);
|
|
|
}
|
|
@@ -1291,8 +1291,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
|
|
|
u32 numPhys = 16;
|
|
|
|
|
|
dev = pvscsi_dev(adapter);
|
|
|
- config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
|
|
|
- &configPagePA);
|
|
|
+ config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
|
|
|
+ &configPagePA, GFP_KERNEL);
|
|
|
if (!config_page) {
|
|
|
dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
|
|
|
goto exit;
|
|
@@ -1326,7 +1326,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
|
|
|
} else
|
|
|
dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
|
|
|
header->hostStatus, header->scsiStatus);
|
|
|
- pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
|
|
|
+ dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page,
|
|
|
+ configPagePA);
|
|
|
exit:
|
|
|
return numPhys;
|
|
|
}
|
|
@@ -1346,11 +1347,9 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|
|
if (pci_enable_device(pdev))
|
|
|
return error;
|
|
|
|
|
|
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
|
|
|
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
|
|
|
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
|
|
|
printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
|
|
|
- } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
|
|
|
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
|
|
|
+ } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
|
|
|
printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
|
|
|
} else {
|
|
|
printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
|