|
@@ -1193,8 +1193,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
|
|
|
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
|
|
|
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
|
|
|
}
|
|
|
- if (ctrl->stripe_size)
|
|
|
- blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
|
|
|
+ if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
|
|
|
+ blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
|
|
|
blk_queue_virt_boundary(q, ctrl->page_size - 1);
|
|
|
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
|
|
|
vwc = true;
|
|
@@ -1250,19 +1250,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
|
|
ctrl->max_hw_sectors =
|
|
|
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
|
|
|
|
|
|
- if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
|
|
|
- unsigned int max_hw_sectors;
|
|
|
-
|
|
|
- ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
|
|
|
- max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
|
|
|
- if (ctrl->max_hw_sectors) {
|
|
|
- ctrl->max_hw_sectors = min(max_hw_sectors,
|
|
|
- ctrl->max_hw_sectors);
|
|
|
- } else {
|
|
|
- ctrl->max_hw_sectors = max_hw_sectors;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
nvme_set_queue_limits(ctrl, ctrl->admin_q);
|
|
|
ctrl->sgls = le32_to_cpu(id->sgls);
|
|
|
ctrl->kas = le16_to_cpu(id->kas);
|