|
@@ -179,18 +179,204 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
|
|
|
struct fusion_context *fusion = instance->ctrl_context;
|
|
|
struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
|
|
|
struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
|
|
|
- int i;
|
|
|
+ int i, j;
|
|
|
u16 ld_count;
|
|
|
+ struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn;
|
|
|
+ struct MR_FW_RAID_MAP_EXT *fw_map_ext;
|
|
|
+ struct MR_RAID_MAP_DESC_TABLE *desc_table;
|
|
|
|
|
|
|
|
|
struct MR_DRV_RAID_MAP_ALL *drv_map =
|
|
|
fusion->ld_drv_map[(instance->map_id & 1)];
|
|
|
struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
|
|
|
+ void *raid_map_data = NULL;
|
|
|
+
|
|
|
+ memset(drv_map, 0, fusion->drv_map_sz);
|
|
|
+ memset(pDrvRaidMap->ldTgtIdToLd,
|
|
|
+ 0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
|
|
|
+
|
|
|
+ if (instance->max_raid_mapsize) {
|
|
|
+ fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
|
|
|
+#if VD_EXT_DEBUG
|
|
|
+ dev_dbg(&instance->pdev->dev, "raidMapSize 0x%x fw_map_dyn->descTableOffset 0x%x\n",
|
|
|
+ le32_to_cpu(fw_map_dyn->raid_map_size),
|
|
|
+ le32_to_cpu(fw_map_dyn->desc_table_offset));
|
|
|
+ dev_dbg(&instance->pdev->dev, "descTableSize 0x%x descTableNumElements 0x%x\n",
|
|
|
+ le32_to_cpu(fw_map_dyn->desc_table_size),
|
|
|
+ le32_to_cpu(fw_map_dyn->desc_table_num_elements));
|
|
|
+ dev_dbg(&instance->pdev->dev, "drv map %p ldCount %d\n",
|
|
|
+ drv_map, fw_map_dyn->ld_count);
|
|
|
+#endif
|
|
|
+ desc_table =
|
|
|
+ (struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
|
|
|
+ if (desc_table != fw_map_dyn->raid_map_desc_table)
|
|
|
+ dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n",
|
|
|
+ desc_table, fw_map_dyn->raid_map_desc_table);
|
|
|
+
|
|
|
+ ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count);
|
|
|
+ pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
|
|
|
+ pDrvRaidMap->fpPdIoTimeoutSec =
|
|
|
+ fw_map_dyn->fp_pd_io_timeout_sec;
|
|
|
+ pDrvRaidMap->totalSize = sizeof(struct MR_DRV_RAID_MAP_ALL);
|
|
|
+ /* point to actual data starting point*/
|
|
|
+ raid_map_data = (void *)fw_map_dyn +
|
|
|
+ le32_to_cpu(fw_map_dyn->desc_table_offset) +
|
|
|
+ le32_to_cpu(fw_map_dyn->desc_table_size);
|
|
|
+
|
|
|
+ for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) {
|
|
|
+
|
|
|
+#if VD_EXT_DEBUG
|
|
|
+ dev_dbg(&instance->pdev->dev, "desc table %p\n",
|
|
|
+ desc_table);
|
|
|
+ dev_dbg(&instance->pdev->dev, "raidmap type %d, raidmapOffset 0x%x\n",
|
|
|
+ desc_table->raid_map_desc_type,
|
|
|
+ desc_table->raid_map_desc_offset);
|
|
|
+ dev_dbg(&instance->pdev->dev, "raid map number of elements 0%x, raidmapsize 0x%x\n",
|
|
|
+ desc_table->raid_map_desc_elements,
|
|
|
+ desc_table->raid_map_desc_buffer_size);
|
|
|
+#endif
|
|
|
+ switch (le32_to_cpu(desc_table->raid_map_desc_type)) {
|
|
|
+ case RAID_MAP_DESC_TYPE_DEVHDL_INFO:
|
|
|
+ fw_map_dyn->dev_hndl_info =
|
|
|
+ (struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
|
|
|
+#if VD_EXT_DEBUG
|
|
|
+ dev_dbg(&instance->pdev->dev, "devHndlInfo address %p\n",
|
|
|
+ fw_map_dyn->dev_hndl_info);
|
|
|
+#endif
|
|
|
+ memcpy(pDrvRaidMap->devHndlInfo,
|
|
|
+ fw_map_dyn->dev_hndl_info,
|
|
|
+ sizeof(struct MR_DEV_HANDLE_INFO) *
|
|
|
+ le32_to_cpu(desc_table->raid_map_desc_elements));
|
|
|
+ break;
|
|
|
+ case RAID_MAP_DESC_TYPE_TGTID_INFO:
|
|
|
+ fw_map_dyn->ld_tgt_id_to_ld =
|
|
|
+ (u16 *) (raid_map_data +
|
|
|
+ le32_to_cpu(desc_table->raid_map_desc_offset));
|
|
|
+#if VD_EXT_DEBUG
|
|
|
+ dev_dbg(&instance->pdev->dev, "ldTgtIdToLd address %p\n",
|
|
|
+ fw_map_dyn->ld_tgt_id_to_ld);
|
|
|
+#endif
|
|
|
+ for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
|
|
|
+ pDrvRaidMap->ldTgtIdToLd[j] =
|
|
|
+ fw_map_dyn->ld_tgt_id_to_ld[j];
|
|
|
+#if VD_EXT_DEBUG
|
|
|
+ dev_dbg(&instance->pdev->dev, " %d drv ldTgtIdToLd %d\n",
|
|
|
+ j, pDrvRaidMap->ldTgtIdToLd[j]);
|
|
|
+#endif
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case RAID_MAP_DESC_TYPE_ARRAY_INFO:
|
|
|
+ fw_map_dyn->ar_map_info =
|
|
|
+ (struct MR_ARRAY_INFO *)
|
|
|
+ (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
|
|
|
+#if VD_EXT_DEBUG
|
|
|
+ dev_dbg(&instance->pdev->dev, "arMapInfo address %p\n",
|
|
|
+ fw_map_dyn->ar_map_info);
|
|
|
+#endif
|
|
|
+
|
|
|
+ memcpy(pDrvRaidMap->arMapInfo,
|
|
|
+ fw_map_dyn->ar_map_info,
|
|
|
+ sizeof(struct MR_ARRAY_INFO) * le32_to_cpu(desc_table->raid_map_desc_elements));
|
|
|
+ break;
|
|
|
+ case RAID_MAP_DESC_TYPE_SPAN_INFO:
|
|
|
+ fw_map_dyn->ld_span_map =
|
|
|
+ (struct MR_LD_SPAN_MAP *)
|
|
|
+ (raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
|
|
|
+ memcpy(pDrvRaidMap->ldSpanMap,
|
|
|
+ fw_map_dyn->ld_span_map,
|
|
|
+ sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(desc_table->raid_map_desc_elements));
|
|
|
+#if VD_EXT_DEBUG
|
|
|
+ dev_dbg(&instance->pdev->dev, "ldSpanMap address %p\n",
|
|
|
+ fw_map_dyn->ld_span_map);
|
|
|
+ dev_dbg(&instance->pdev->dev, "MR_LD_SPAN_MAP size 0x%lx\n",
|
|
|
+ sizeof(struct MR_LD_SPAN_MAP));
|
|
|
+ for (j = 0; j < ld_count; j++) {
|
|
|
+ dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : fw_map_dyn->ldSpanMap[%d].ldRaid.targetId 0x%x\n",
|
|
|
+ j, j, fw_map_dyn->ld_span_map[j].ldRaid.targetId);
|
|
|
+ dev_dbg(&instance->pdev->dev, "fw_map_dyn->ldSpanMap[%d].ldRaid.seqNum 0x%x\n",
|
|
|
+ j, fw_map_dyn->ld_span_map[j].ldRaid.seqNum);
|
|
|
+ dev_dbg(&instance->pdev->dev, "fw_map_dyn->ld_span_map[%d].ldRaid.rowSize 0x%x\n",
|
|
|
+ j, (u32)fw_map_dyn->ld_span_map[j].ldRaid.rowSize);
|
|
|
+
|
|
|
+ dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) :pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x\n",
|
|
|
+ j, j, pDrvRaidMap->ldSpanMap[j].ldRaid.targetId);
|
|
|
+ dev_dbg(&instance->pdev->dev, "DrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x\n",
|
|
|
+ j, pDrvRaidMap->ldSpanMap[j].ldRaid.seqNum);
|
|
|
+ dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.rowSize 0x%x\n",
|
|
|
+ j, (u32)pDrvRaidMap->ldSpanMap[j].ldRaid.rowSize);
|
|
|
+
|
|
|
+ dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : drv raid map all %p\n",
|
|
|
+ instance->unique_id, drv_map);
|
|
|
+ dev_dbg(&instance->pdev->dev, "raid map %p LD RAID MAP %p/%p\n",
|
|
|
+ pDrvRaidMap,
|
|
|
+ &fw_map_dyn->ld_span_map[j].ldRaid,
|
|
|
+ &pDrvRaidMap->ldSpanMap[j].ldRaid);
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n",
|
|
|
+ fw_map_dyn->desc_table_num_elements);
|
|
|
+ }
|
|
|
+ ++desc_table;
|
|
|
+ }
|
|
|
+
|
|
|
+ } else if (instance->supportmax256vd) {
|
|
|
+ fw_map_ext =
|
|
|
+ (struct MR_FW_RAID_MAP_EXT *) fusion->ld_map[(instance->map_id & 1)];
|
|
|
+ ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
|
|
|
+ if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
|
|
|
+ dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+#if VD_EXT_DEBUG
|
|
|
+ for (i = 0; i < ld_count; i++) {
|
|
|
+ dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) :Index 0x%x\n",
|
|
|
+ instance->unique_id, i);
|
|
|
+ dev_dbg(&instance->pdev->dev, "Target Id 0x%x\n",
|
|
|
+ fw_map_ext->ldSpanMap[i].ldRaid.targetId);
|
|
|
+ dev_dbg(&instance->pdev->dev, "Seq Num 0x%x Size 0/%llx\n",
|
|
|
+ fw_map_ext->ldSpanMap[i].ldRaid.seqNum,
|
|
|
+ fw_map_ext->ldSpanMap[i].ldRaid.size);
|
|
|
+ }
|
|
|
+#endif
|
|
|
+
|
|
|
+ pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
|
|
|
+ pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec;
|
|
|
+ for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++)
|
|
|
+ pDrvRaidMap->ldTgtIdToLd[i] =
|
|
|
+ (u16)fw_map_ext->ldTgtIdToLd[i];
|
|
|
+ memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap,
|
|
|
+ sizeof(struct MR_LD_SPAN_MAP) * ld_count);
|
|
|
+#if VD_EXT_DEBUG
|
|
|
+ for (i = 0; i < ld_count; i++) {
|
|
|
+ dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : fw_map_ext->ldSpanMap[%d].ldRaid.targetId 0x%x\n",
|
|
|
+ i, i, fw_map_ext->ldSpanMap[i].ldRaid.targetId);
|
|
|
+ dev_dbg(&instance->pdev->dev, "fw_map_ext->ldSpanMap[%d].ldRaid.seqNum 0x%x\n",
|
|
|
+ i, fw_map_ext->ldSpanMap[i].ldRaid.seqNum);
|
|
|
+ dev_dbg(&instance->pdev->dev, "fw_map_ext->ldSpanMap[%d].ldRaid.rowSize 0x%x\n",
|
|
|
+ i, (u32)fw_map_ext->ldSpanMap[i].ldRaid.rowSize);
|
|
|
+
|
|
|
+ dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x\n",
|
|
|
+ i, i, pDrvRaidMap->ldSpanMap[i].ldRaid.targetId);
|
|
|
+ dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x\n",
|
|
|
+ i, pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum);
|
|
|
+ dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.rowSize 0x%x\n",
|
|
|
+ i, (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
|
|
|
+
|
|
|
+ dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : drv raid map all %p\n",
|
|
|
+ instance->unique_id, drv_map);
|
|
|
+ dev_dbg(&instance->pdev->dev, "raid map %p LD RAID MAP %p %p\n",
|
|
|
+ pDrvRaidMap, &fw_map_ext->ldSpanMap[i].ldRaid,
|
|
|
+ &pDrvRaidMap->ldSpanMap[i].ldRaid);
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo,
|
|
|
+ sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT);
|
|
|
+ memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo,
|
|
|
+ sizeof(struct MR_DEV_HANDLE_INFO) *
|
|
|
+ MAX_RAIDMAP_PHYSICAL_DEVICES);
|
|
|
|
|
|
- if (instance->supportmax256vd) {
|
|
|
- memcpy(fusion->ld_drv_map[instance->map_id & 1],
|
|
|
- fusion->ld_map[instance->map_id & 1],
|
|
|
- fusion->current_map_sz);
|
|
|
/* New Raid map will not set totalSize, so keep expected value
|
|
|
* for legacy code in ValidateMapInfo
|
|
|
*/
|
|
@@ -213,16 +399,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
- memset(drv_map, 0, fusion->drv_map_sz);
|
|
|
pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
|
|
|
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
|
|
|
pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
|
|
|
for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
|
|
|
pDrvRaidMap->ldTgtIdToLd[i] =
|
|
|
(u8)pFwRaidMap->ldTgtIdToLd[i];
|
|
|
- for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
|
|
|
- i < MAX_LOGICAL_DRIVES_EXT; i++)
|
|
|
- pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
|
|
|
for (i = 0; i < ld_count; i++) {
|
|
|
pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
|
|
|
#if VD_EXT_DEBUG
|
|
@@ -279,7 +461,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
|
|
|
lbInfo = fusion->load_balance_info;
|
|
|
ldSpanInfo = fusion->log_to_span;
|
|
|
|
|
|
- if (instance->supportmax256vd)
|
|
|
+ if (instance->max_raid_mapsize)
|
|
|
+ expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL);
|
|
|
+ else if (instance->supportmax256vd)
|
|
|
expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
|
|
|
else
|
|
|
expected_size =
|
|
@@ -287,8 +471,10 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
|
|
|
(sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
|
|
|
|
|
|
if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
|
|
|
- dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
|
|
|
- (unsigned int) expected_size);
|
|
|
+ dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
|
|
|
+ le32_to_cpu(pDrvRaidMap->totalSize));
|
|
|
+ dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n",
|
|
|
+ (unsigned int) expected_size);
|
|
|
dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
|
|
|
(unsigned int)sizeof(struct MR_LD_SPAN_MAP),
|
|
|
le32_to_cpu(pDrvRaidMap->totalSize));
|
|
@@ -787,7 +973,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
|
|
|
((fusion->adapter_type == THUNDERBOLT_SERIES) ||
|
|
|
((fusion->adapter_type == INVADER_SERIES) &&
|
|
|
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
|
|
|
- pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
|
|
|
+ pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
|
|
|
else if (raid->level == 1) {
|
|
|
physArm = physArm + 1;
|
|
|
pd = MR_ArPdGet(arRef, physArm, map);
|
|
@@ -797,9 +983,16 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
|
|
|
}
|
|
|
|
|
|
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
|
|
|
- pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
|
|
|
- physArm;
|
|
|
- io_info->span_arm = pRAID_Context->spanArm;
|
|
|
+ if (instance->is_ventura) {
|
|
|
+ ((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm =
|
|
|
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
|
|
+ io_info->span_arm =
|
|
|
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
|
|
+ } else {
|
|
|
+ pRAID_Context->span_arm =
|
|
|
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
|
|
+ io_info->span_arm = pRAID_Context->span_arm;
|
|
|
+ }
|
|
|
return retval;
|
|
|
}
|
|
|
|
|
@@ -891,7 +1084,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
|
|
|
((fusion->adapter_type == THUNDERBOLT_SERIES) ||
|
|
|
((fusion->adapter_type == INVADER_SERIES) &&
|
|
|
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
|
|
|
- pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
|
|
|
+ pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
|
|
|
else if (raid->level == 1) {
|
|
|
/* Get alternate Pd. */
|
|
|
physArm = physArm + 1;
|
|
@@ -903,9 +1096,16 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
|
|
|
}
|
|
|
|
|
|
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
|
|
|
- pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
|
|
|
- physArm;
|
|
|
- io_info->span_arm = pRAID_Context->spanArm;
|
|
|
+ if (instance->is_ventura) {
|
|
|
+ ((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm =
|
|
|
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
|
|
+ io_info->span_arm =
|
|
|
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
|
|
+ } else {
|
|
|
+ pRAID_Context->span_arm =
|
|
|
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
|
|
+ io_info->span_arm = pRAID_Context->span_arm;
|
|
|
+ }
|
|
|
return retval;
|
|
|
}
|
|
|
|
|
@@ -1109,20 +1309,20 @@ MR_BuildRaidContext(struct megasas_instance *instance,
|
|
|
regSize += stripSize;
|
|
|
}
|
|
|
|
|
|
- pRAID_Context->timeoutValue =
|
|
|
+ pRAID_Context->timeout_value =
|
|
|
cpu_to_le16(raid->fpIoTimeoutForLd ?
|
|
|
raid->fpIoTimeoutForLd :
|
|
|
map->raidMap.fpPdIoTimeoutSec);
|
|
|
if (fusion->adapter_type == INVADER_SERIES)
|
|
|
- pRAID_Context->regLockFlags = (isRead) ?
|
|
|
+ pRAID_Context->reg_lock_flags = (isRead) ?
|
|
|
raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
|
|
|
- else
|
|
|
- pRAID_Context->regLockFlags = (isRead) ?
|
|
|
+ else if (!instance->is_ventura)
|
|
|
+ pRAID_Context->reg_lock_flags = (isRead) ?
|
|
|
REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
|
|
|
- pRAID_Context->VirtualDiskTgtId = raid->targetId;
|
|
|
- pRAID_Context->regLockRowLBA = cpu_to_le64(regStart);
|
|
|
- pRAID_Context->regLockLength = cpu_to_le32(regSize);
|
|
|
- pRAID_Context->configSeqNum = raid->seqNum;
|
|
|
+ pRAID_Context->virtual_disk_tgt_id = raid->targetId;
|
|
|
+ pRAID_Context->reg_lock_row_lba = cpu_to_le64(regStart);
|
|
|
+ pRAID_Context->reg_lock_length = cpu_to_le32(regSize);
|
|
|
+ pRAID_Context->config_seq_num = raid->seqNum;
|
|
|
/* save pointer to raid->LUN array */
|
|
|
*raidLUN = raid->LUN;
|
|
|
|
|
@@ -1140,6 +1340,13 @@ MR_BuildRaidContext(struct megasas_instance *instance,
|
|
|
/* If IO on an invalid Pd, then FP is not possible.*/
|
|
|
if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID))
|
|
|
io_info->fpOkForIo = FALSE;
|
|
|
+ /* if FP possible, set the SLUD bit in
|
|
|
+ * regLockFlags for ventura
|
|
|
+ */
|
|
|
+ else if ((instance->is_ventura) && (!isRead) &&
|
|
|
+ (raid->writeMode == MR_RL_WRITE_BACK_MODE) &&
|
|
|
+ (raid->capability.fp_cache_bypass_capable))
|
|
|
+ ((struct RAID_CONTEXT_G35 *) pRAID_Context)->routing_flags.bits.sld = 1;
|
|
|
/* set raid 1/10 fast path write capable bit in io_info */
|
|
|
if (io_info->fpOkForIo &&
|
|
|
(io_info->r1_alt_dev_handle != MR_PD_INVALID) &&
|
|
@@ -1319,6 +1526,7 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
|
|
|
struct fusion_context *fusion;
|
|
|
struct MR_LD_RAID *raid;
|
|
|
struct MR_DRV_RAID_MAP_ALL *drv_map;
|
|
|
+ u16 pd1_dev_handle;
|
|
|
u16 pend0, pend1, ld;
|
|
|
u64 diff0, diff1;
|
|
|
u8 bestArm, pd0, pd1, span, arm;
|
|
@@ -1344,23 +1552,36 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
|
|
|
pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
|
|
|
(arm + 1 - span_row_size) : arm + 1, drv_map);
|
|
|
|
|
|
- /* get the pending cmds for the data and mirror arms */
|
|
|
- pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
|
|
|
- pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
|
|
|
+ /* Get PD1 Dev Handle */
|
|
|
+
|
|
|
+ pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map);
|
|
|
+
|
|
|
+ if (pd1_dev_handle == MR_PD_INVALID) {
|
|
|
+ bestArm = arm;
|
|
|
+ } else {
|
|
|
+ /* get the pending cmds for the data and mirror arms */
|
|
|
+ pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
|
|
|
+ pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
|
|
|
|
|
|
- /* Determine the disk whose head is nearer to the req. block */
|
|
|
- diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
|
|
|
- diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
|
|
|
- bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
|
|
|
+ /* Determine the disk whose head is nearer to the req. block */
|
|
|
+ diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
|
|
|
+ diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
|
|
|
+ bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
|
|
|
|
|
|
- if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
|
|
|
+ /* Make balance count from 16 to 4 to
|
|
|
+ * keep driver in sync with Firmware
|
|
|
+ */
|
|
|
+ if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
|
|
|
(bestArm != arm && pend1 > pend0 + lb_pending_cmds))
|
|
|
- bestArm ^= 1;
|
|
|
+ bestArm ^= 1;
|
|
|
+
|
|
|
+ /* Update the last accessed block on the correct pd */
|
|
|
+ io_info->span_arm =
|
|
|
+ (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
|
|
|
+ io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
|
|
|
+ }
|
|
|
|
|
|
- /* Update the last accessed block on the correct pd */
|
|
|
- io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
|
|
|
lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
|
|
|
- io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
|
|
|
#if SPAN_DEBUG
|
|
|
if (arm != bestArm)
|
|
|
dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
|