|
@@ -2149,27 +2149,6 @@ static int scsi_map_queues(struct blk_mq_tag_set *set)
|
|
|
return blk_mq_map_queues(set);
|
|
|
}
|
|
|
|
|
|
-static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
|
|
|
-{
|
|
|
- struct device *host_dev;
|
|
|
- u64 bounce_limit = 0xffffffff;
|
|
|
-
|
|
|
- if (shost->unchecked_isa_dma)
|
|
|
- return BLK_BOUNCE_ISA;
|
|
|
- /*
|
|
|
- * Platforms with virtual-DMA translation
|
|
|
- * hardware have no practical limit.
|
|
|
- */
|
|
|
- if (!PCI_DMA_BUS_IS_PHYS)
|
|
|
- return BLK_BOUNCE_ANY;
|
|
|
-
|
|
|
- host_dev = scsi_get_device(shost);
|
|
|
- if (host_dev && host_dev->dma_mask)
|
|
|
- bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
|
|
|
-
|
|
|
- return bounce_limit;
|
|
|
-}
|
|
|
-
|
|
|
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
|
|
|
{
|
|
|
struct device *dev = shost->dma_dev;
|
|
@@ -2189,7 +2168,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
|
|
|
}
|
|
|
|
|
|
blk_queue_max_hw_sectors(q, shost->max_sectors);
|
|
|
- blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
|
|
|
+ if (shost->unchecked_isa_dma)
|
|
|
+ blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
|
|
|
blk_queue_segment_boundary(q, shost->dma_boundary);
|
|
|
dma_set_seg_boundary(dev, shost->dma_boundary);
|
|
|
|