scsi: megaraid_sas: Dynamic Raid Map Changes for SAS3.5 Generic Megaraid Controllers
SAS3.5 Generic Megaraid Controllers FW will support new dynamic RaidMap to have different sizes for different number of supported VDs. Signed-off-by: Sasikumar Chandrasekaran <sasikumar.pc@broadcom.com> Reviewed-by: Tomas Henzl <thenzl@redhat.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Родитель
69c337c0f8
Коммит
d889344e4e
|
@ -1434,6 +1434,12 @@ enum FW_BOOT_CONTEXT {
|
||||||
#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
|
#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT 14
|
||||||
#define MR_MAX_MSIX_REG_ARRAY 16
|
#define MR_MAX_MSIX_REG_ARRAY 16
|
||||||
#define MR_RDPQ_MODE_OFFSET 0X00800000
|
#define MR_RDPQ_MODE_OFFSET 0X00800000
|
||||||
|
|
||||||
|
#define MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT 16
|
||||||
|
#define MR_MAX_RAID_MAP_SIZE_MASK 0x1FF
|
||||||
|
#define MR_MIN_MAP_SIZE 0x10000
|
||||||
|
/* 64k */
|
||||||
|
|
||||||
#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
|
#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2151,6 +2157,7 @@ struct megasas_instance {
|
||||||
bool fw_sync_cache_support;
|
bool fw_sync_cache_support;
|
||||||
bool is_ventura;
|
bool is_ventura;
|
||||||
bool msix_combined;
|
bool msix_combined;
|
||||||
|
u16 max_raid_mapsize;
|
||||||
};
|
};
|
||||||
struct MR_LD_VF_MAP {
|
struct MR_LD_VF_MAP {
|
||||||
u32 size;
|
u32 size;
|
||||||
|
|
|
@ -4424,8 +4424,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
|
||||||
static void megasas_update_ext_vd_details(struct megasas_instance *instance)
|
static void megasas_update_ext_vd_details(struct megasas_instance *instance)
|
||||||
{
|
{
|
||||||
struct fusion_context *fusion;
|
struct fusion_context *fusion;
|
||||||
u32 old_map_sz;
|
u32 ventura_map_sz = 0;
|
||||||
u32 new_map_sz;
|
|
||||||
|
|
||||||
fusion = instance->ctrl_context;
|
fusion = instance->ctrl_context;
|
||||||
/* For MFI based controllers return dummy success */
|
/* For MFI based controllers return dummy success */
|
||||||
|
@ -4455,21 +4454,38 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
|
||||||
instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
|
instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
|
||||||
"Legacy(64 VD) firmware");
|
"Legacy(64 VD) firmware");
|
||||||
|
|
||||||
old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
|
if (instance->max_raid_mapsize) {
|
||||||
(sizeof(struct MR_LD_SPAN_MAP) *
|
ventura_map_sz = instance->max_raid_mapsize *
|
||||||
(instance->fw_supported_vd_count - 1));
|
MR_MIN_MAP_SIZE; /* 64k */
|
||||||
new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
|
fusion->current_map_sz = ventura_map_sz;
|
||||||
fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
|
fusion->max_map_sz = ventura_map_sz;
|
||||||
(sizeof(struct MR_LD_SPAN_MAP) *
|
} else {
|
||||||
(instance->drv_supported_vd_count - 1));
|
fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
|
||||||
|
(sizeof(struct MR_LD_SPAN_MAP) *
|
||||||
|
(instance->fw_supported_vd_count - 1));
|
||||||
|
fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
|
||||||
|
|
||||||
fusion->max_map_sz = max(old_map_sz, new_map_sz);
|
fusion->max_map_sz =
|
||||||
|
max(fusion->old_map_sz, fusion->new_map_sz);
|
||||||
|
|
||||||
|
if (instance->supportmax256vd)
|
||||||
|
fusion->current_map_sz = fusion->new_map_sz;
|
||||||
|
else
|
||||||
|
fusion->current_map_sz = fusion->old_map_sz;
|
||||||
|
}
|
||||||
|
/* irrespective of FW raid maps, driver raid map is constant */
|
||||||
|
fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
|
||||||
|
|
||||||
if (instance->supportmax256vd)
|
#if VD_EXT_DEBUG
|
||||||
fusion->current_map_sz = new_map_sz;
|
dev_info(&instance->pdev->dev, "instance->max_raid_mapsize 0x%x\n ",
|
||||||
else
|
instance->max_raid_mapsize);
|
||||||
fusion->current_map_sz = old_map_sz;
|
dev_info(&instance->pdev->dev, "new_map_sz = 0x%x, old_map_sz = 0x%x\n",
|
||||||
|
fusion->new_map_sz, fusion->old_map_sz);
|
||||||
|
dev_info(&instance->pdev->dev, "ventura_map_sz = 0x%x, current_map_sz = 0x%x\n",
|
||||||
|
ventura_map_sz, fusion->current_map_sz);
|
||||||
|
dev_info(&instance->pdev->dev, "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx\n",
|
||||||
|
fusion->drv_map_sz, sizeof(struct MR_DRV_RAID_MAP_ALL));
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -4996,7 +5012,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||||
{
|
{
|
||||||
u32 max_sectors_1;
|
u32 max_sectors_1;
|
||||||
u32 max_sectors_2;
|
u32 max_sectors_2;
|
||||||
u32 tmp_sectors, msix_enable, scratch_pad_2;
|
u32 tmp_sectors, msix_enable, scratch_pad_2, scratch_pad_3;
|
||||||
resource_size_t base_addr;
|
resource_size_t base_addr;
|
||||||
struct megasas_register_set __iomem *reg_set;
|
struct megasas_register_set __iomem *reg_set;
|
||||||
struct megasas_ctrl_info *ctrl_info = NULL;
|
struct megasas_ctrl_info *ctrl_info = NULL;
|
||||||
|
@ -5072,7 +5088,17 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||||
goto fail_ready_state;
|
goto fail_ready_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (instance->is_ventura) {
|
||||||
|
scratch_pad_3 =
|
||||||
|
readl(&instance->reg_set->outbound_scratch_pad_3);
|
||||||
|
#if VD_EXT_DEBUG
|
||||||
|
dev_info(&instance->pdev->dev, "scratch_pad3 0x%x\n",
|
||||||
|
scratch_pad_3);
|
||||||
|
#endif
|
||||||
|
instance->max_raid_mapsize = ((scratch_pad_3 >>
|
||||||
|
MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
|
||||||
|
MR_MAX_RAID_MAP_SIZE_MASK);
|
||||||
|
}
|
||||||
|
|
||||||
/* Check if MSI-X is supported while in ready state */
|
/* Check if MSI-X is supported while in ready state */
|
||||||
msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
|
msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
|
||||||
|
|
|
@ -179,18 +179,204 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
|
||||||
struct fusion_context *fusion = instance->ctrl_context;
|
struct fusion_context *fusion = instance->ctrl_context;
|
||||||
struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
|
struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
|
||||||
struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
|
struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
|
||||||
int i;
|
int i, j;
|
||||||
u16 ld_count;
|
u16 ld_count;
|
||||||
|
struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn;
|
||||||
|
struct MR_FW_RAID_MAP_EXT *fw_map_ext;
|
||||||
|
struct MR_RAID_MAP_DESC_TABLE *desc_table;
|
||||||
|
|
||||||
|
|
||||||
struct MR_DRV_RAID_MAP_ALL *drv_map =
|
struct MR_DRV_RAID_MAP_ALL *drv_map =
|
||||||
fusion->ld_drv_map[(instance->map_id & 1)];
|
fusion->ld_drv_map[(instance->map_id & 1)];
|
||||||
struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
|
struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
|
||||||
|
void *raid_map_data = NULL;
|
||||||
|
|
||||||
|
memset(drv_map, 0, fusion->drv_map_sz);
|
||||||
|
memset(pDrvRaidMap->ldTgtIdToLd,
|
||||||
|
0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));
|
||||||
|
|
||||||
|
if (instance->max_raid_mapsize) {
|
||||||
|
fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
|
||||||
|
#if VD_EXT_DEBUG
|
||||||
|
dev_dbg(&instance->pdev->dev, "raidMapSize 0x%x fw_map_dyn->descTableOffset 0x%x\n",
|
||||||
|
le32_to_cpu(fw_map_dyn->raid_map_size),
|
||||||
|
le32_to_cpu(fw_map_dyn->desc_table_offset));
|
||||||
|
dev_dbg(&instance->pdev->dev, "descTableSize 0x%x descTableNumElements 0x%x\n",
|
||||||
|
le32_to_cpu(fw_map_dyn->desc_table_size),
|
||||||
|
le32_to_cpu(fw_map_dyn->desc_table_num_elements));
|
||||||
|
dev_dbg(&instance->pdev->dev, "drv map %p ldCount %d\n",
|
||||||
|
drv_map, fw_map_dyn->ld_count);
|
||||||
|
#endif
|
||||||
|
desc_table =
|
||||||
|
(struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
|
||||||
|
if (desc_table != fw_map_dyn->raid_map_desc_table)
|
||||||
|
dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n",
|
||||||
|
desc_table, fw_map_dyn->raid_map_desc_table);
|
||||||
|
|
||||||
|
ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count);
|
||||||
|
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
|
||||||
|
pDrvRaidMap->fpPdIoTimeoutSec =
|
||||||
|
fw_map_dyn->fp_pd_io_timeout_sec;
|
||||||
|
pDrvRaidMap->totalSize = sizeof(struct MR_DRV_RAID_MAP_ALL);
|
||||||
|
/* point to actual data starting point*/
|
||||||
|
raid_map_data = (void *)fw_map_dyn +
|
||||||
|
le32_to_cpu(fw_map_dyn->desc_table_offset) +
|
||||||
|
le32_to_cpu(fw_map_dyn->desc_table_size);
|
||||||
|
|
||||||
|
for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) {
|
||||||
|
|
||||||
|
#if VD_EXT_DEBUG
|
||||||
|
dev_dbg(&instance->pdev->dev, "desc table %p\n",
|
||||||
|
desc_table);
|
||||||
|
dev_dbg(&instance->pdev->dev, "raidmap type %d, raidmapOffset 0x%x\n",
|
||||||
|
desc_table->raid_map_desc_type,
|
||||||
|
desc_table->raid_map_desc_offset);
|
||||||
|
dev_dbg(&instance->pdev->dev, "raid map number of elements 0%x, raidmapsize 0x%x\n",
|
||||||
|
desc_table->raid_map_desc_elements,
|
||||||
|
desc_table->raid_map_desc_buffer_size);
|
||||||
|
#endif
|
||||||
|
switch (le32_to_cpu(desc_table->raid_map_desc_type)) {
|
||||||
|
case RAID_MAP_DESC_TYPE_DEVHDL_INFO:
|
||||||
|
fw_map_dyn->dev_hndl_info =
|
||||||
|
(struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
|
||||||
|
#if VD_EXT_DEBUG
|
||||||
|
dev_dbg(&instance->pdev->dev, "devHndlInfo address %p\n",
|
||||||
|
fw_map_dyn->dev_hndl_info);
|
||||||
|
#endif
|
||||||
|
memcpy(pDrvRaidMap->devHndlInfo,
|
||||||
|
fw_map_dyn->dev_hndl_info,
|
||||||
|
sizeof(struct MR_DEV_HANDLE_INFO) *
|
||||||
|
le32_to_cpu(desc_table->raid_map_desc_elements));
|
||||||
|
break;
|
||||||
|
case RAID_MAP_DESC_TYPE_TGTID_INFO:
|
||||||
|
fw_map_dyn->ld_tgt_id_to_ld =
|
||||||
|
(u16 *) (raid_map_data +
|
||||||
|
le32_to_cpu(desc_table->raid_map_desc_offset));
|
||||||
|
#if VD_EXT_DEBUG
|
||||||
|
dev_dbg(&instance->pdev->dev, "ldTgtIdToLd address %p\n",
|
||||||
|
fw_map_dyn->ld_tgt_id_to_ld);
|
||||||
|
#endif
|
||||||
|
for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
|
||||||
|
pDrvRaidMap->ldTgtIdToLd[j] =
|
||||||
|
fw_map_dyn->ld_tgt_id_to_ld[j];
|
||||||
|
#if VD_EXT_DEBUG
|
||||||
|
dev_dbg(&instance->pdev->dev, " %d drv ldTgtIdToLd %d\n",
|
||||||
|
j, pDrvRaidMap->ldTgtIdToLd[j]);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case RAID_MAP_DESC_TYPE_ARRAY_INFO:
|
||||||
|
fw_map_dyn->ar_map_info =
|
||||||
|
(struct MR_ARRAY_INFO *)
|
||||||
|
(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
|
||||||
|
#if VD_EXT_DEBUG
|
||||||
|
dev_dbg(&instance->pdev->dev, "arMapInfo address %p\n",
|
||||||
|
fw_map_dyn->ar_map_info);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
memcpy(pDrvRaidMap->arMapInfo,
|
||||||
|
fw_map_dyn->ar_map_info,
|
||||||
|
sizeof(struct MR_ARRAY_INFO) * le32_to_cpu(desc_table->raid_map_desc_elements));
|
||||||
|
break;
|
||||||
|
case RAID_MAP_DESC_TYPE_SPAN_INFO:
|
||||||
|
fw_map_dyn->ld_span_map =
|
||||||
|
(struct MR_LD_SPAN_MAP *)
|
||||||
|
(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
|
||||||
|
memcpy(pDrvRaidMap->ldSpanMap,
|
||||||
|
fw_map_dyn->ld_span_map,
|
||||||
|
sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(desc_table->raid_map_desc_elements));
|
||||||
|
#if VD_EXT_DEBUG
|
||||||
|
dev_dbg(&instance->pdev->dev, "ldSpanMap address %p\n",
|
||||||
|
fw_map_dyn->ld_span_map);
|
||||||
|
dev_dbg(&instance->pdev->dev, "MR_LD_SPAN_MAP size 0x%lx\n",
|
||||||
|
sizeof(struct MR_LD_SPAN_MAP));
|
||||||
|
for (j = 0; j < ld_count; j++) {
|
||||||
|
dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : fw_map_dyn->ldSpanMap[%d].ldRaid.targetId 0x%x\n",
|
||||||
|
j, j, fw_map_dyn->ld_span_map[j].ldRaid.targetId);
|
||||||
|
dev_dbg(&instance->pdev->dev, "fw_map_dyn->ldSpanMap[%d].ldRaid.seqNum 0x%x\n",
|
||||||
|
j, fw_map_dyn->ld_span_map[j].ldRaid.seqNum);
|
||||||
|
dev_dbg(&instance->pdev->dev, "fw_map_dyn->ld_span_map[%d].ldRaid.rowSize 0x%x\n",
|
||||||
|
j, (u32)fw_map_dyn->ld_span_map[j].ldRaid.rowSize);
|
||||||
|
|
||||||
|
dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) :pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x\n",
|
||||||
|
j, j, pDrvRaidMap->ldSpanMap[j].ldRaid.targetId);
|
||||||
|
dev_dbg(&instance->pdev->dev, "DrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x\n",
|
||||||
|
j, pDrvRaidMap->ldSpanMap[j].ldRaid.seqNum);
|
||||||
|
dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.rowSize 0x%x\n",
|
||||||
|
j, (u32)pDrvRaidMap->ldSpanMap[j].ldRaid.rowSize);
|
||||||
|
|
||||||
|
dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : drv raid map all %p\n",
|
||||||
|
instance->unique_id, drv_map);
|
||||||
|
dev_dbg(&instance->pdev->dev, "raid map %p LD RAID MAP %p/%p\n",
|
||||||
|
pDrvRaidMap,
|
||||||
|
&fw_map_dyn->ld_span_map[j].ldRaid,
|
||||||
|
&pDrvRaidMap->ldSpanMap[j].ldRaid);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n",
|
||||||
|
fw_map_dyn->desc_table_num_elements);
|
||||||
|
}
|
||||||
|
++desc_table;
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if (instance->supportmax256vd) {
|
||||||
|
fw_map_ext =
|
||||||
|
(struct MR_FW_RAID_MAP_EXT *) fusion->ld_map[(instance->map_id & 1)];
|
||||||
|
ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
|
||||||
|
if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
|
||||||
|
dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#if VD_EXT_DEBUG
|
||||||
|
for (i = 0; i < ld_count; i++) {
|
||||||
|
dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) :Index 0x%x\n",
|
||||||
|
instance->unique_id, i);
|
||||||
|
dev_dbg(&instance->pdev->dev, "Target Id 0x%x\n",
|
||||||
|
fw_map_ext->ldSpanMap[i].ldRaid.targetId);
|
||||||
|
dev_dbg(&instance->pdev->dev, "Seq Num 0x%x Size 0/%llx\n",
|
||||||
|
fw_map_ext->ldSpanMap[i].ldRaid.seqNum,
|
||||||
|
fw_map_ext->ldSpanMap[i].ldRaid.size);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
|
||||||
|
pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec;
|
||||||
|
for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++)
|
||||||
|
pDrvRaidMap->ldTgtIdToLd[i] =
|
||||||
|
(u16)fw_map_ext->ldTgtIdToLd[i];
|
||||||
|
memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap,
|
||||||
|
sizeof(struct MR_LD_SPAN_MAP) * ld_count);
|
||||||
|
#if VD_EXT_DEBUG
|
||||||
|
for (i = 0; i < ld_count; i++) {
|
||||||
|
dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : fw_map_ext->ldSpanMap[%d].ldRaid.targetId 0x%x\n",
|
||||||
|
i, i, fw_map_ext->ldSpanMap[i].ldRaid.targetId);
|
||||||
|
dev_dbg(&instance->pdev->dev, "fw_map_ext->ldSpanMap[%d].ldRaid.seqNum 0x%x\n",
|
||||||
|
i, fw_map_ext->ldSpanMap[i].ldRaid.seqNum);
|
||||||
|
dev_dbg(&instance->pdev->dev, "fw_map_ext->ldSpanMap[%d].ldRaid.rowSize 0x%x\n",
|
||||||
|
i, (u32)fw_map_ext->ldSpanMap[i].ldRaid.rowSize);
|
||||||
|
|
||||||
|
dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x\n",
|
||||||
|
i, i, pDrvRaidMap->ldSpanMap[i].ldRaid.targetId);
|
||||||
|
dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x\n",
|
||||||
|
i, pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum);
|
||||||
|
dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.rowSize 0x%x\n",
|
||||||
|
i, (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
|
||||||
|
|
||||||
|
dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : drv raid map all %p\n",
|
||||||
|
instance->unique_id, drv_map);
|
||||||
|
dev_dbg(&instance->pdev->dev, "raid map %p LD RAID MAP %p %p\n",
|
||||||
|
pDrvRaidMap, &fw_map_ext->ldSpanMap[i].ldRaid,
|
||||||
|
&pDrvRaidMap->ldSpanMap[i].ldRaid);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo,
|
||||||
|
sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT);
|
||||||
|
memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo,
|
||||||
|
sizeof(struct MR_DEV_HANDLE_INFO) *
|
||||||
|
MAX_RAIDMAP_PHYSICAL_DEVICES);
|
||||||
|
|
||||||
if (instance->supportmax256vd) {
|
|
||||||
memcpy(fusion->ld_drv_map[instance->map_id & 1],
|
|
||||||
fusion->ld_map[instance->map_id & 1],
|
|
||||||
fusion->current_map_sz);
|
|
||||||
/* New Raid map will not set totalSize, so keep expected value
|
/* New Raid map will not set totalSize, so keep expected value
|
||||||
* for legacy code in ValidateMapInfo
|
* for legacy code in ValidateMapInfo
|
||||||
*/
|
*/
|
||||||
|
@ -213,16 +399,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
memset(drv_map, 0, fusion->drv_map_sz);
|
|
||||||
pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
|
pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
|
||||||
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
|
pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
|
||||||
pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
|
pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
|
||||||
for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
|
for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
|
||||||
pDrvRaidMap->ldTgtIdToLd[i] =
|
pDrvRaidMap->ldTgtIdToLd[i] =
|
||||||
(u8)pFwRaidMap->ldTgtIdToLd[i];
|
(u8)pFwRaidMap->ldTgtIdToLd[i];
|
||||||
for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
|
|
||||||
i < MAX_LOGICAL_DRIVES_EXT; i++)
|
|
||||||
pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
|
|
||||||
for (i = 0; i < ld_count; i++) {
|
for (i = 0; i < ld_count; i++) {
|
||||||
pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
|
pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
|
||||||
#if VD_EXT_DEBUG
|
#if VD_EXT_DEBUG
|
||||||
|
@ -279,7 +461,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
|
||||||
lbInfo = fusion->load_balance_info;
|
lbInfo = fusion->load_balance_info;
|
||||||
ldSpanInfo = fusion->log_to_span;
|
ldSpanInfo = fusion->log_to_span;
|
||||||
|
|
||||||
if (instance->supportmax256vd)
|
if (instance->max_raid_mapsize)
|
||||||
|
expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL);
|
||||||
|
else if (instance->supportmax256vd)
|
||||||
expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
|
expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
|
||||||
else
|
else
|
||||||
expected_size =
|
expected_size =
|
||||||
|
@ -287,8 +471,10 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
|
||||||
(sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
|
(sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));
|
||||||
|
|
||||||
if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
|
if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
|
||||||
dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
|
dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
|
||||||
(unsigned int) expected_size);
|
le32_to_cpu(pDrvRaidMap->totalSize));
|
||||||
|
dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n",
|
||||||
|
(unsigned int) expected_size);
|
||||||
dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
|
dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
|
||||||
(unsigned int)sizeof(struct MR_LD_SPAN_MAP),
|
(unsigned int)sizeof(struct MR_LD_SPAN_MAP),
|
||||||
le32_to_cpu(pDrvRaidMap->totalSize));
|
le32_to_cpu(pDrvRaidMap->totalSize));
|
||||||
|
@ -787,7 +973,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
|
||||||
((fusion->adapter_type == THUNDERBOLT_SERIES) ||
|
((fusion->adapter_type == THUNDERBOLT_SERIES) ||
|
||||||
((fusion->adapter_type == INVADER_SERIES) &&
|
((fusion->adapter_type == INVADER_SERIES) &&
|
||||||
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
|
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
|
||||||
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
|
pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
|
||||||
else if (raid->level == 1) {
|
else if (raid->level == 1) {
|
||||||
physArm = physArm + 1;
|
physArm = physArm + 1;
|
||||||
pd = MR_ArPdGet(arRef, physArm, map);
|
pd = MR_ArPdGet(arRef, physArm, map);
|
||||||
|
@ -797,9 +983,16 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
|
||||||
}
|
}
|
||||||
|
|
||||||
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
|
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
|
||||||
pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
|
if (instance->is_ventura) {
|
||||||
physArm;
|
((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm =
|
||||||
io_info->span_arm = pRAID_Context->spanArm;
|
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
||||||
|
io_info->span_arm =
|
||||||
|
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
||||||
|
} else {
|
||||||
|
pRAID_Context->span_arm =
|
||||||
|
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
||||||
|
io_info->span_arm = pRAID_Context->span_arm;
|
||||||
|
}
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -891,7 +1084,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
|
||||||
((fusion->adapter_type == THUNDERBOLT_SERIES) ||
|
((fusion->adapter_type == THUNDERBOLT_SERIES) ||
|
||||||
((fusion->adapter_type == INVADER_SERIES) &&
|
((fusion->adapter_type == INVADER_SERIES) &&
|
||||||
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
|
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
|
||||||
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
|
pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
|
||||||
else if (raid->level == 1) {
|
else if (raid->level == 1) {
|
||||||
/* Get alternate Pd. */
|
/* Get alternate Pd. */
|
||||||
physArm = physArm + 1;
|
physArm = physArm + 1;
|
||||||
|
@ -903,9 +1096,16 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
|
||||||
}
|
}
|
||||||
|
|
||||||
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
|
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
|
||||||
pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
|
if (instance->is_ventura) {
|
||||||
physArm;
|
((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm =
|
||||||
io_info->span_arm = pRAID_Context->spanArm;
|
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
||||||
|
io_info->span_arm =
|
||||||
|
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
||||||
|
} else {
|
||||||
|
pRAID_Context->span_arm =
|
||||||
|
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
||||||
|
io_info->span_arm = pRAID_Context->span_arm;
|
||||||
|
}
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1109,20 +1309,20 @@ MR_BuildRaidContext(struct megasas_instance *instance,
|
||||||
regSize += stripSize;
|
regSize += stripSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
pRAID_Context->timeoutValue =
|
pRAID_Context->timeout_value =
|
||||||
cpu_to_le16(raid->fpIoTimeoutForLd ?
|
cpu_to_le16(raid->fpIoTimeoutForLd ?
|
||||||
raid->fpIoTimeoutForLd :
|
raid->fpIoTimeoutForLd :
|
||||||
map->raidMap.fpPdIoTimeoutSec);
|
map->raidMap.fpPdIoTimeoutSec);
|
||||||
if (fusion->adapter_type == INVADER_SERIES)
|
if (fusion->adapter_type == INVADER_SERIES)
|
||||||
pRAID_Context->regLockFlags = (isRead) ?
|
pRAID_Context->reg_lock_flags = (isRead) ?
|
||||||
raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
|
raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
|
||||||
else
|
else if (!instance->is_ventura)
|
||||||
pRAID_Context->regLockFlags = (isRead) ?
|
pRAID_Context->reg_lock_flags = (isRead) ?
|
||||||
REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
|
REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
|
||||||
pRAID_Context->VirtualDiskTgtId = raid->targetId;
|
pRAID_Context->virtual_disk_tgt_id = raid->targetId;
|
||||||
pRAID_Context->regLockRowLBA = cpu_to_le64(regStart);
|
pRAID_Context->reg_lock_row_lba = cpu_to_le64(regStart);
|
||||||
pRAID_Context->regLockLength = cpu_to_le32(regSize);
|
pRAID_Context->reg_lock_length = cpu_to_le32(regSize);
|
||||||
pRAID_Context->configSeqNum = raid->seqNum;
|
pRAID_Context->config_seq_num = raid->seqNum;
|
||||||
/* save pointer to raid->LUN array */
|
/* save pointer to raid->LUN array */
|
||||||
*raidLUN = raid->LUN;
|
*raidLUN = raid->LUN;
|
||||||
|
|
||||||
|
@ -1140,6 +1340,13 @@ MR_BuildRaidContext(struct megasas_instance *instance,
|
||||||
/* If IO on an invalid Pd, then FP is not possible.*/
|
/* If IO on an invalid Pd, then FP is not possible.*/
|
||||||
if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID))
|
if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID))
|
||||||
io_info->fpOkForIo = FALSE;
|
io_info->fpOkForIo = FALSE;
|
||||||
|
/* if FP possible, set the SLUD bit in
|
||||||
|
* regLockFlags for ventura
|
||||||
|
*/
|
||||||
|
else if ((instance->is_ventura) && (!isRead) &&
|
||||||
|
(raid->writeMode == MR_RL_WRITE_BACK_MODE) &&
|
||||||
|
(raid->capability.fp_cache_bypass_capable))
|
||||||
|
((struct RAID_CONTEXT_G35 *) pRAID_Context)->routing_flags.bits.sld = 1;
|
||||||
/* set raid 1/10 fast path write capable bit in io_info */
|
/* set raid 1/10 fast path write capable bit in io_info */
|
||||||
if (io_info->fpOkForIo &&
|
if (io_info->fpOkForIo &&
|
||||||
(io_info->r1_alt_dev_handle != MR_PD_INVALID) &&
|
(io_info->r1_alt_dev_handle != MR_PD_INVALID) &&
|
||||||
|
@ -1319,6 +1526,7 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
|
||||||
struct fusion_context *fusion;
|
struct fusion_context *fusion;
|
||||||
struct MR_LD_RAID *raid;
|
struct MR_LD_RAID *raid;
|
||||||
struct MR_DRV_RAID_MAP_ALL *drv_map;
|
struct MR_DRV_RAID_MAP_ALL *drv_map;
|
||||||
|
u16 pd1_dev_handle;
|
||||||
u16 pend0, pend1, ld;
|
u16 pend0, pend1, ld;
|
||||||
u64 diff0, diff1;
|
u64 diff0, diff1;
|
||||||
u8 bestArm, pd0, pd1, span, arm;
|
u8 bestArm, pd0, pd1, span, arm;
|
||||||
|
@ -1344,23 +1552,36 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
|
||||||
pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
|
pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
|
||||||
(arm + 1 - span_row_size) : arm + 1, drv_map);
|
(arm + 1 - span_row_size) : arm + 1, drv_map);
|
||||||
|
|
||||||
/* get the pending cmds for the data and mirror arms */
|
/* Get PD1 Dev Handle */
|
||||||
pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
|
|
||||||
pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
|
|
||||||
|
|
||||||
/* Determine the disk whose head is nearer to the req. block */
|
pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map);
|
||||||
diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
|
|
||||||
diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
|
|
||||||
bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
|
|
||||||
|
|
||||||
if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
|
if (pd1_dev_handle == MR_PD_INVALID) {
|
||||||
|
bestArm = arm;
|
||||||
|
} else {
|
||||||
|
/* get the pending cmds for the data and mirror arms */
|
||||||
|
pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
|
||||||
|
pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
|
||||||
|
|
||||||
|
/* Determine the disk whose head is nearer to the req. block */
|
||||||
|
diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
|
||||||
|
diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
|
||||||
|
bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
|
||||||
|
|
||||||
|
/* Make balance count from 16 to 4 to
|
||||||
|
* keep driver in sync with Firmware
|
||||||
|
*/
|
||||||
|
if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
|
||||||
(bestArm != arm && pend1 > pend0 + lb_pending_cmds))
|
(bestArm != arm && pend1 > pend0 + lb_pending_cmds))
|
||||||
bestArm ^= 1;
|
bestArm ^= 1;
|
||||||
|
|
||||||
|
/* Update the last accessed block on the correct pd */
|
||||||
|
io_info->span_arm =
|
||||||
|
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
|
||||||
|
io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
|
||||||
|
}
|
||||||
|
|
||||||
/* Update the last accessed block on the correct pd */
|
|
||||||
io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
|
|
||||||
lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
|
lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
|
||||||
io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
|
|
||||||
#if SPAN_DEBUG
|
#if SPAN_DEBUG
|
||||||
if (arm != bestArm)
|
if (arm != bestArm)
|
||||||
dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
|
dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
|
||||||
|
|
|
@ -1829,7 +1829,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||||
struct megasas_cmd_fusion *cmd)
|
struct megasas_cmd_fusion *cmd)
|
||||||
{
|
{
|
||||||
u8 fp_possible;
|
u8 fp_possible;
|
||||||
u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
|
u32 start_lba_lo, start_lba_hi, device_id, datalength = 0, ld;
|
||||||
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
|
struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
|
||||||
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
|
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
|
||||||
struct IO_REQUEST_INFO io_info;
|
struct IO_REQUEST_INFO io_info;
|
||||||
|
@ -1837,16 +1837,18 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||||
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
|
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
|
||||||
u8 *raidLUN;
|
u8 *raidLUN;
|
||||||
unsigned long spinlock_flags;
|
unsigned long spinlock_flags;
|
||||||
|
union RAID_CONTEXT_UNION *praid_context;
|
||||||
|
struct MR_LD_RAID *raid;
|
||||||
|
|
||||||
device_id = MEGASAS_DEV_INDEX(scp);
|
device_id = MEGASAS_DEV_INDEX(scp);
|
||||||
|
|
||||||
fusion = instance->ctrl_context;
|
fusion = instance->ctrl_context;
|
||||||
|
|
||||||
io_request = cmd->io_request;
|
io_request = cmd->io_request;
|
||||||
io_request->RaidContext.raid_context.VirtualDiskTgtId =
|
io_request->RaidContext.raid_context.virtual_disk_tgt_id =
|
||||||
cpu_to_le16(device_id);
|
cpu_to_le16(device_id);
|
||||||
io_request->RaidContext.raid_context.status = 0;
|
io_request->RaidContext.raid_context.status = 0;
|
||||||
io_request->RaidContext.raid_context.exStatus = 0;
|
io_request->RaidContext.raid_context.ex_status = 0;
|
||||||
|
|
||||||
req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
|
req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
|
||||||
|
|
||||||
|
@ -1915,10 +1917,12 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||||
io_info.isRead = 1;
|
io_info.isRead = 1;
|
||||||
|
|
||||||
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
|
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
|
||||||
|
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
|
||||||
|
raid = MR_LdRaidGet(ld, local_map_ptr);
|
||||||
|
|
||||||
if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
|
if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
|
||||||
instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
|
instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
|
||||||
io_request->RaidContext.raid_context.regLockFlags = 0;
|
io_request->RaidContext.raid_context.reg_lock_flags = 0;
|
||||||
fp_possible = 0;
|
fp_possible = 0;
|
||||||
} else {
|
} else {
|
||||||
if (MR_BuildRaidContext(instance, &io_info,
|
if (MR_BuildRaidContext(instance, &io_info,
|
||||||
|
@ -1945,6 +1949,8 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||||
fp_possible = false;
|
fp_possible = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
praid_context = &io_request->RaidContext;
|
||||||
|
|
||||||
if (fp_possible) {
|
if (fp_possible) {
|
||||||
megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
|
megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
|
||||||
local_map_ptr, start_lba_lo);
|
local_map_ptr, start_lba_lo);
|
||||||
|
@ -1953,18 +1959,25 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||||
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
|
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
|
||||||
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
||||||
if (fusion->adapter_type == INVADER_SERIES) {
|
if (fusion->adapter_type == INVADER_SERIES) {
|
||||||
if (io_request->RaidContext.raid_context.regLockFlags ==
|
if (io_request->RaidContext.raid_context.reg_lock_flags ==
|
||||||
REGION_TYPE_UNUSED)
|
REGION_TYPE_UNUSED)
|
||||||
cmd->request_desc->SCSIIO.RequestFlags =
|
cmd->request_desc->SCSIIO.RequestFlags =
|
||||||
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
|
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
|
||||||
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
||||||
io_request->RaidContext.raid_context.Type
|
io_request->RaidContext.raid_context.type
|
||||||
= MPI2_TYPE_CUDA;
|
= MPI2_TYPE_CUDA;
|
||||||
io_request->RaidContext.raid_context.nseg = 0x1;
|
io_request->RaidContext.raid_context.nseg = 0x1;
|
||||||
io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
|
io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
|
||||||
io_request->RaidContext.raid_context.regLockFlags |=
|
io_request->RaidContext.raid_context.reg_lock_flags |=
|
||||||
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
|
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
|
||||||
MR_RL_FLAGS_SEQ_NUM_ENABLE);
|
MR_RL_FLAGS_SEQ_NUM_ENABLE);
|
||||||
|
} else if (instance->is_ventura) {
|
||||||
|
io_request->RaidContext.raid_context_g35.type
|
||||||
|
= MPI2_TYPE_CUDA;
|
||||||
|
io_request->RaidContext.raid_context_g35.nseg = 0x1;
|
||||||
|
io_request->RaidContext.raid_context_g35.routing_flags.bits.sqn = 1;
|
||||||
|
io_request->IoFlags |=
|
||||||
|
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
|
||||||
}
|
}
|
||||||
if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
|
if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
|
||||||
(io_info.isRead)) {
|
(io_info.isRead)) {
|
||||||
|
@ -1974,6 +1987,13 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||||
&io_info);
|
&io_info);
|
||||||
scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
|
scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
|
||||||
cmd->pd_r1_lb = io_info.pd_after_lb;
|
cmd->pd_r1_lb = io_info.pd_after_lb;
|
||||||
|
if (instance->is_ventura)
|
||||||
|
io_request->RaidContext.raid_context_g35.span_arm
|
||||||
|
= io_info.span_arm;
|
||||||
|
else
|
||||||
|
io_request->RaidContext.raid_context.span_arm
|
||||||
|
= io_info.span_arm;
|
||||||
|
|
||||||
} else
|
} else
|
||||||
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
|
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
|
||||||
|
|
||||||
|
@ -1992,28 +2012,98 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||||
io_request->DevHandle = io_info.devHandle;
|
io_request->DevHandle = io_info.devHandle;
|
||||||
/* populate the LUN field */
|
/* populate the LUN field */
|
||||||
memcpy(io_request->LUN, raidLUN, 8);
|
memcpy(io_request->LUN, raidLUN, 8);
|
||||||
|
if (instance->is_ventura) {
|
||||||
|
if (io_info.isRead) {
|
||||||
|
if ((raid->cpuAffinity.pdRead.cpu0) &&
|
||||||
|
(raid->cpuAffinity.pdRead.cpu1))
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_FCFS;
|
||||||
|
else if (raid->cpuAffinity.pdRead.cpu1)
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_1;
|
||||||
|
else
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_0;
|
||||||
|
} else {
|
||||||
|
if ((raid->cpuAffinity.pdWrite.cpu0)
|
||||||
|
&& (raid->cpuAffinity.pdWrite.cpu1))
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_FCFS;
|
||||||
|
else if (raid->cpuAffinity.pdWrite.cpu1)
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_1;
|
||||||
|
else
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_0;
|
||||||
|
if (praid_context->raid_context_g35.routing_flags.bits.sld) {
|
||||||
|
praid_context->raid_context_g35.raid_flags
|
||||||
|
= (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
|
||||||
|
<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
io_request->RaidContext.raid_context.timeoutValue =
|
io_request->RaidContext.raid_context.timeout_value =
|
||||||
cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
|
cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
|
||||||
cmd->request_desc->SCSIIO.RequestFlags =
|
cmd->request_desc->SCSIIO.RequestFlags =
|
||||||
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
|
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
|
||||||
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
||||||
if (fusion->adapter_type == INVADER_SERIES) {
|
if (fusion->adapter_type == INVADER_SERIES) {
|
||||||
if (io_info.do_fp_rlbypass ||
|
if (io_info.do_fp_rlbypass ||
|
||||||
(io_request->RaidContext.raid_context.regLockFlags
|
(io_request->RaidContext.raid_context.reg_lock_flags
|
||||||
== REGION_TYPE_UNUSED))
|
== REGION_TYPE_UNUSED))
|
||||||
cmd->request_desc->SCSIIO.RequestFlags =
|
cmd->request_desc->SCSIIO.RequestFlags =
|
||||||
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
|
(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
|
||||||
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
||||||
io_request->RaidContext.raid_context.Type
|
io_request->RaidContext.raid_context.type
|
||||||
= MPI2_TYPE_CUDA;
|
= MPI2_TYPE_CUDA;
|
||||||
io_request->RaidContext.raid_context.regLockFlags |=
|
io_request->RaidContext.raid_context.reg_lock_flags |=
|
||||||
(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
|
(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
|
||||||
MR_RL_FLAGS_SEQ_NUM_ENABLE);
|
MR_RL_FLAGS_SEQ_NUM_ENABLE);
|
||||||
io_request->RaidContext.raid_context.nseg = 0x1;
|
io_request->RaidContext.raid_context.nseg = 0x1;
|
||||||
|
} else if (instance->is_ventura) {
|
||||||
|
io_request->RaidContext.raid_context_g35.type
|
||||||
|
= MPI2_TYPE_CUDA;
|
||||||
|
io_request->RaidContext.raid_context_g35.routing_flags.bits.sqn = 1;
|
||||||
|
io_request->RaidContext.raid_context_g35.nseg = 0x1;
|
||||||
}
|
}
|
||||||
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
|
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
|
||||||
io_request->DevHandle = cpu_to_le16(device_id);
|
io_request->DevHandle = cpu_to_le16(device_id);
|
||||||
|
|
||||||
|
if (instance->is_ventura) {
|
||||||
|
if (io_info.isRead) {
|
||||||
|
if ((raid->cpuAffinity.ldRead.cpu0)
|
||||||
|
&& (raid->cpuAffinity.ldRead.cpu1))
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_FCFS;
|
||||||
|
else if (raid->cpuAffinity.ldRead.cpu1)
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_1;
|
||||||
|
else
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_0;
|
||||||
|
} else {
|
||||||
|
if ((raid->cpuAffinity.ldWrite.cpu0) &&
|
||||||
|
(raid->cpuAffinity.ldWrite.cpu1))
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_FCFS;
|
||||||
|
else if (raid->cpuAffinity.ldWrite.cpu1)
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_1;
|
||||||
|
else
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_0;
|
||||||
|
|
||||||
|
if (io_request->RaidContext.raid_context_g35.stream_detected
|
||||||
|
&& (raid->level == 5) &&
|
||||||
|
(raid->writeMode == MR_RL_WRITE_THROUGH_MODE)) {
|
||||||
|
if (praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
== MR_RAID_CTX_CPUSEL_FCFS)
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
} /* Not FP */
|
} /* Not FP */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2048,9 +2138,9 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
|
||||||
/* get RAID_Context pointer */
|
/* get RAID_Context pointer */
|
||||||
pRAID_Context = &io_request->RaidContext.raid_context;
|
pRAID_Context = &io_request->RaidContext.raid_context;
|
||||||
/* Check with FW team */
|
/* Check with FW team */
|
||||||
pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
|
pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
|
||||||
pRAID_Context->regLockRowLBA = 0;
|
pRAID_Context->reg_lock_row_lba = 0;
|
||||||
pRAID_Context->regLockLength = 0;
|
pRAID_Context->reg_lock_length = 0;
|
||||||
|
|
||||||
if (fusion->fast_path_io && (
|
if (fusion->fast_path_io && (
|
||||||
device_id < instance->fw_supported_vd_count)) {
|
device_id < instance->fw_supported_vd_count)) {
|
||||||
|
@ -2069,7 +2159,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
|
||||||
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
|
io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
|
||||||
io_request->DevHandle = cpu_to_le16(device_id);
|
io_request->DevHandle = cpu_to_le16(device_id);
|
||||||
io_request->LUN[1] = scmd->device->lun;
|
io_request->LUN[1] = scmd->device->lun;
|
||||||
pRAID_Context->timeoutValue =
|
pRAID_Context->timeout_value =
|
||||||
cpu_to_le16 (scmd->request->timeout / HZ);
|
cpu_to_le16 (scmd->request->timeout / HZ);
|
||||||
cmd->request_desc->SCSIIO.RequestFlags =
|
cmd->request_desc->SCSIIO.RequestFlags =
|
||||||
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
|
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
|
||||||
|
@ -2077,9 +2167,11 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/* set RAID context values */
|
/* set RAID context values */
|
||||||
pRAID_Context->configSeqNum = raid->seqNum;
|
pRAID_Context->config_seq_num = raid->seqNum;
|
||||||
pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
|
if (!instance->is_ventura)
|
||||||
pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
|
pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
|
||||||
|
pRAID_Context->timeout_value =
|
||||||
|
cpu_to_le16(raid->fpIoTimeoutForLd);
|
||||||
|
|
||||||
/* get the DevHandle for the PD (since this is
|
/* get the DevHandle for the PD (since this is
|
||||||
fpNonRWCapable, this is a single disk RAID0) */
|
fpNonRWCapable, this is a single disk RAID0) */
|
||||||
|
@ -2134,12 +2226,12 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
|
||||||
io_request = cmd->io_request;
|
io_request = cmd->io_request;
|
||||||
/* get RAID_Context pointer */
|
/* get RAID_Context pointer */
|
||||||
pRAID_Context = &io_request->RaidContext.raid_context;
|
pRAID_Context = &io_request->RaidContext.raid_context;
|
||||||
pRAID_Context->regLockFlags = 0;
|
pRAID_Context->reg_lock_flags = 0;
|
||||||
pRAID_Context->regLockRowLBA = 0;
|
pRAID_Context->reg_lock_row_lba = 0;
|
||||||
pRAID_Context->regLockLength = 0;
|
pRAID_Context->reg_lock_length = 0;
|
||||||
io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
|
io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
|
||||||
io_request->LUN[1] = scmd->device->lun;
|
io_request->LUN[1] = scmd->device->lun;
|
||||||
pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
|
pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
|
||||||
<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
|
<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
|
||||||
|
|
||||||
/* If FW supports PD sequence number */
|
/* If FW supports PD sequence number */
|
||||||
|
@ -2148,24 +2240,27 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
|
||||||
/* TgtId must be incremented by 255 as jbod seq number is index
|
/* TgtId must be incremented by 255 as jbod seq number is index
|
||||||
* below raid map
|
* below raid map
|
||||||
*/
|
*/
|
||||||
pRAID_Context->VirtualDiskTgtId =
|
pRAID_Context->virtual_disk_tgt_id =
|
||||||
cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
|
cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
|
||||||
pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum;
|
pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
|
||||||
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
|
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
|
||||||
pRAID_Context->regLockFlags |=
|
if (instance->is_ventura)
|
||||||
|
io_request->RaidContext.raid_context_g35.routing_flags.bits.sqn = 1;
|
||||||
|
else
|
||||||
|
pRAID_Context->reg_lock_flags |=
|
||||||
(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
|
(MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
|
||||||
pRAID_Context->Type = MPI2_TYPE_CUDA;
|
pRAID_Context->type = MPI2_TYPE_CUDA;
|
||||||
pRAID_Context->nseg = 0x1;
|
pRAID_Context->nseg = 0x1;
|
||||||
} else if (fusion->fast_path_io) {
|
} else if (fusion->fast_path_io) {
|
||||||
pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
|
pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
|
||||||
pRAID_Context->configSeqNum = 0;
|
pRAID_Context->config_seq_num = 0;
|
||||||
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
|
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
|
||||||
io_request->DevHandle =
|
io_request->DevHandle =
|
||||||
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
|
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
|
||||||
} else {
|
} else {
|
||||||
/* Want to send all IO via FW path */
|
/* Want to send all IO via FW path */
|
||||||
pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
|
pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
|
||||||
pRAID_Context->configSeqNum = 0;
|
pRAID_Context->config_seq_num = 0;
|
||||||
io_request->DevHandle = cpu_to_le16(0xFFFF);
|
io_request->DevHandle = cpu_to_le16(0xFFFF);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2181,14 +2276,14 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
|
||||||
cmd->request_desc->SCSIIO.RequestFlags =
|
cmd->request_desc->SCSIIO.RequestFlags =
|
||||||
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
|
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
|
||||||
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
||||||
pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
|
pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
|
||||||
pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
|
pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
|
||||||
} else {
|
} else {
|
||||||
/* system pd Fast Path */
|
/* system pd Fast Path */
|
||||||
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
|
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
|
||||||
timeout_limit = (scmd->device->type == TYPE_DISK) ?
|
timeout_limit = (scmd->device->type == TYPE_DISK) ?
|
||||||
255 : 0xFFFF;
|
255 : 0xFFFF;
|
||||||
pRAID_Context->timeoutValue =
|
pRAID_Context->timeout_value =
|
||||||
cpu_to_le16((os_timeout_value > timeout_limit) ?
|
cpu_to_le16((os_timeout_value > timeout_limit) ?
|
||||||
timeout_limit : os_timeout_value);
|
timeout_limit : os_timeout_value);
|
||||||
if (fusion->adapter_type == INVADER_SERIES)
|
if (fusion->adapter_type == INVADER_SERIES)
|
||||||
|
@ -2227,8 +2322,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
|
||||||
io_request->Control = 0;
|
io_request->Control = 0;
|
||||||
io_request->EEDPBlockSize = 0;
|
io_request->EEDPBlockSize = 0;
|
||||||
io_request->ChainOffset = 0;
|
io_request->ChainOffset = 0;
|
||||||
io_request->RaidContext.raid_context.RAIDFlags = 0;
|
io_request->RaidContext.raid_context.raid_flags = 0;
|
||||||
io_request->RaidContext.raid_context.Type = 0;
|
io_request->RaidContext.raid_context.type = 0;
|
||||||
io_request->RaidContext.raid_context.nseg = 0;
|
io_request->RaidContext.raid_context.nseg = 0;
|
||||||
|
|
||||||
memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
|
memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
|
||||||
|
@ -2273,11 +2368,16 @@ megasas_build_io_fusion(struct megasas_instance *instance,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* numSGE store lower 8 bit of sge_count.
|
if (instance->is_ventura)
|
||||||
* numSGEExt store higher 8 bit of sge_count
|
io_request->RaidContext.raid_context_g35.num_sge = sge_count;
|
||||||
*/
|
else {
|
||||||
io_request->RaidContext.raid_context.numSGE = sge_count;
|
/* numSGE store lower 8 bit of sge_count.
|
||||||
io_request->RaidContext.raid_context.numSGEExt = (u8)(sge_count >> 8);
|
* numSGEExt store higher 8 bit of sge_count
|
||||||
|
*/
|
||||||
|
io_request->RaidContext.raid_context.num_sge = sge_count;
|
||||||
|
io_request->RaidContext.raid_context.num_sge_ext =
|
||||||
|
(u8)(sge_count >> 8);
|
||||||
|
}
|
||||||
|
|
||||||
io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
|
io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
|
||||||
|
|
||||||
|
@ -2326,6 +2426,10 @@ void megasas_fpio_to_ldio(struct megasas_instance *instance,
|
||||||
struct megasas_cmd_fusion *cmd, struct scsi_cmnd *scmd)
|
struct megasas_cmd_fusion *cmd, struct scsi_cmnd *scmd)
|
||||||
{
|
{
|
||||||
struct fusion_context *fusion;
|
struct fusion_context *fusion;
|
||||||
|
union RAID_CONTEXT_UNION *praid_context;
|
||||||
|
struct MR_LD_RAID *raid;
|
||||||
|
struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
|
||||||
|
u32 device_id, ld;
|
||||||
|
|
||||||
fusion = instance->ctrl_context;
|
fusion = instance->ctrl_context;
|
||||||
cmd->request_desc->SCSIIO.RequestFlags =
|
cmd->request_desc->SCSIIO.RequestFlags =
|
||||||
|
@ -2349,6 +2453,35 @@ void megasas_fpio_to_ldio(struct megasas_instance *instance,
|
||||||
cmd->io_request->Control = 0;
|
cmd->io_request->Control = 0;
|
||||||
cmd->io_request->EEDPBlockSize = 0;
|
cmd->io_request->EEDPBlockSize = 0;
|
||||||
cmd->is_raid_1_fp_write = 0;
|
cmd->is_raid_1_fp_write = 0;
|
||||||
|
|
||||||
|
device_id = MEGASAS_DEV_INDEX(cmd->scmd);
|
||||||
|
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
|
||||||
|
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
|
||||||
|
raid = MR_LdRaidGet(ld, local_map_ptr);
|
||||||
|
praid_context = &cmd->io_request->RaidContext;
|
||||||
|
if (cmd->scmd->sc_data_direction == PCI_DMA_FROMDEVICE) {
|
||||||
|
if ((raid->cpuAffinity.ldRead.cpu0)
|
||||||
|
&& (raid->cpuAffinity.ldRead.cpu1))
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_FCFS;
|
||||||
|
else if (raid->cpuAffinity.ldRead.cpu1)
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_1;
|
||||||
|
else
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_0;
|
||||||
|
} else {
|
||||||
|
if ((raid->cpuAffinity.ldWrite.cpu0)
|
||||||
|
&& (raid->cpuAffinity.ldWrite.cpu1))
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_FCFS;
|
||||||
|
else if (raid->cpuAffinity.ldWrite.cpu1)
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_1;
|
||||||
|
else
|
||||||
|
praid_context->raid_context_g35.routing_flags.bits.cpu_sel
|
||||||
|
= MR_RAID_CTX_CPUSEL_0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* megasas_prepate_secondRaid1_IO
|
/* megasas_prepate_secondRaid1_IO
|
||||||
|
@ -2585,7 +2718,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
|
||||||
|
|
||||||
scmd_local = cmd_fusion->scmd;
|
scmd_local = cmd_fusion->scmd;
|
||||||
status = scsi_io_req->RaidContext.raid_context.status;
|
status = scsi_io_req->RaidContext.raid_context.status;
|
||||||
extStatus = scsi_io_req->RaidContext.raid_context.exStatus;
|
extStatus = scsi_io_req->RaidContext.raid_context.ex_status;
|
||||||
sense = cmd_fusion->sense;
|
sense = cmd_fusion->sense;
|
||||||
data_length = scsi_io_req->DataLength;
|
data_length = scsi_io_req->DataLength;
|
||||||
|
|
||||||
|
@ -2653,13 +2786,13 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
|
||||||
status =
|
status =
|
||||||
r1_cmd->io_request->RaidContext.raid_context.status;
|
r1_cmd->io_request->RaidContext.raid_context.status;
|
||||||
extStatus =
|
extStatus =
|
||||||
r1_cmd->io_request->RaidContext.raid_context.exStatus;
|
r1_cmd->io_request->RaidContext.raid_context.ex_status;
|
||||||
data_length =
|
data_length =
|
||||||
r1_cmd->io_request->DataLength;
|
r1_cmd->io_request->DataLength;
|
||||||
sense = r1_cmd->sense;
|
sense = r1_cmd->sense;
|
||||||
}
|
}
|
||||||
r1_cmd->io_request->RaidContext.raid_context.status = 0;
|
r1_cmd->io_request->RaidContext.raid_context.status = 0;
|
||||||
r1_cmd->io_request->RaidContext.raid_context.exStatus = 0;
|
r1_cmd->io_request->RaidContext.raid_context.ex_status = 0;
|
||||||
cmd_fusion->is_raid_1_fp_write = 0;
|
cmd_fusion->is_raid_1_fp_write = 0;
|
||||||
r1_cmd->is_raid_1_fp_write = 0;
|
r1_cmd->is_raid_1_fp_write = 0;
|
||||||
r1_cmd->cmd_completed = false;
|
r1_cmd->cmd_completed = false;
|
||||||
|
@ -2669,10 +2802,8 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
|
||||||
if (!cmd_fusion->is_raid_1_fp_write) {
|
if (!cmd_fusion->is_raid_1_fp_write) {
|
||||||
map_cmd_status(fusion, scmd_local, status,
|
map_cmd_status(fusion, scmd_local, status,
|
||||||
extStatus, data_length, sense);
|
extStatus, data_length, sense);
|
||||||
scsi_io_req->RaidContext.raid_context.status
|
scsi_io_req->RaidContext.raid_context.status = 0;
|
||||||
= 0;
|
scsi_io_req->RaidContext.raid_context.ex_status = 0;
|
||||||
scsi_io_req->RaidContext.raid_context.exStatus
|
|
||||||
= 0;
|
|
||||||
megasas_return_cmd_fusion(instance, cmd_fusion);
|
megasas_return_cmd_fusion(instance, cmd_fusion);
|
||||||
scsi_dma_unmap(scmd_local);
|
scsi_dma_unmap(scmd_local);
|
||||||
scmd_local->scsi_done(scmd_local);
|
scmd_local->scsi_done(scmd_local);
|
||||||
|
|
|
@ -59,6 +59,8 @@
|
||||||
#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
|
#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
|
||||||
#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
|
#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
|
||||||
#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
|
#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
|
||||||
|
#define MR_RL_WRITE_THROUGH_MODE 0x00
|
||||||
|
#define MR_RL_WRITE_BACK_MODE 0x01
|
||||||
|
|
||||||
/* T10 PI defines */
|
/* T10 PI defines */
|
||||||
#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
|
#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
|
||||||
|
@ -81,6 +83,11 @@
|
||||||
enum MR_RAID_FLAGS_IO_SUB_TYPE {
|
enum MR_RAID_FLAGS_IO_SUB_TYPE {
|
||||||
MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
|
MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
|
||||||
MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
|
MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
|
||||||
|
MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA = 2,
|
||||||
|
MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3,
|
||||||
|
MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4,
|
||||||
|
MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
|
||||||
|
MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -109,29 +116,29 @@ enum MR_FUSION_ADAPTER_TYPE {
|
||||||
|
|
||||||
struct RAID_CONTEXT {
|
struct RAID_CONTEXT {
|
||||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||||
u8 nseg:4;
|
u8 nseg:4;
|
||||||
u8 Type:4;
|
u8 type:4;
|
||||||
#else
|
#else
|
||||||
u8 Type:4;
|
u8 type:4;
|
||||||
u8 nseg:4;
|
u8 nseg:4;
|
||||||
#endif
|
#endif
|
||||||
u8 resvd0;
|
u8 resvd0;
|
||||||
__le16 timeoutValue;
|
__le16 timeout_value;
|
||||||
u8 regLockFlags;
|
u8 reg_lock_flags;
|
||||||
u8 resvd1;
|
u8 resvd1;
|
||||||
__le16 VirtualDiskTgtId;
|
__le16 virtual_disk_tgt_id;
|
||||||
__le64 regLockRowLBA;
|
__le64 reg_lock_row_lba;
|
||||||
__le32 regLockLength;
|
__le32 reg_lock_length;
|
||||||
__le16 nextLMId;
|
__le16 next_lmid;
|
||||||
u8 exStatus;
|
u8 ex_status;
|
||||||
u8 status;
|
u8 status;
|
||||||
u8 RAIDFlags;
|
u8 raid_flags;
|
||||||
u8 numSGE;
|
u8 num_sge;
|
||||||
__le16 configSeqNum;
|
__le16 config_seq_num;
|
||||||
u8 spanArm;
|
u8 span_arm;
|
||||||
u8 priority;
|
u8 priority;
|
||||||
u8 numSGEExt;
|
u8 num_sge_ext;
|
||||||
u8 resvd2;
|
u8 resvd2;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -187,7 +194,7 @@ struct RAID_CONTEXT_G35 {
|
||||||
} smid;
|
} smid;
|
||||||
u8 ex_status; /* 0x16 : OUT */
|
u8 ex_status; /* 0x16 : OUT */
|
||||||
u8 status; /* 0x17 status */
|
u8 status; /* 0x17 status */
|
||||||
u8 RAIDFlags; /* 0x18 resvd[7:6], ioSubType[5:4],
|
u8 raid_flags; /* 0x18 resvd[7:6], ioSubType[5:4],
|
||||||
* resvd[3:1], preferredCpu[0]
|
* resvd[3:1], preferredCpu[0]
|
||||||
*/
|
*/
|
||||||
u8 span_arm; /* 0x1C span[7:5], arm[4:0] */
|
u8 span_arm; /* 0x1C span[7:5], arm[4:0] */
|
||||||
|
@ -672,14 +679,17 @@ struct MPI2_IOC_INIT_REQUEST {
|
||||||
#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
|
#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
|
||||||
#define MAX_LOGICAL_DRIVES 64
|
#define MAX_LOGICAL_DRIVES 64
|
||||||
#define MAX_LOGICAL_DRIVES_EXT 256
|
#define MAX_LOGICAL_DRIVES_EXT 256
|
||||||
|
#define MAX_LOGICAL_DRIVES_DYN 512
|
||||||
#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
|
#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
|
||||||
#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
|
#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
|
||||||
#define MAX_ARRAYS 128
|
#define MAX_ARRAYS 128
|
||||||
#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
|
#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
|
||||||
#define MAX_ARRAYS_EXT 256
|
#define MAX_ARRAYS_EXT 256
|
||||||
#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
|
#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
|
||||||
|
#define MAX_API_ARRAYS_DYN 512
|
||||||
#define MAX_PHYSICAL_DEVICES 256
|
#define MAX_PHYSICAL_DEVICES 256
|
||||||
#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
|
#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
|
||||||
|
#define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512
|
||||||
#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
|
#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
|
||||||
#define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
|
#define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
|
||||||
#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
|
#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
|
||||||
|
@ -726,12 +736,56 @@ struct MR_SPAN_BLOCK_INFO {
|
||||||
struct MR_SPAN_INFO block_span_info;
|
struct MR_SPAN_INFO block_span_info;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define MR_RAID_CTX_CPUSEL_0 0
|
||||||
|
#define MR_RAID_CTX_CPUSEL_1 1
|
||||||
|
#define MR_RAID_CTX_CPUSEL_2 2
|
||||||
|
#define MR_RAID_CTX_CPUSEL_3 3
|
||||||
|
#define MR_RAID_CTX_CPUSEL_FCFS 0xF
|
||||||
|
|
||||||
|
struct MR_CPU_AFFINITY_MASK {
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
#ifndef MFI_BIG_ENDIAN
|
||||||
|
u8 hw_path:1;
|
||||||
|
u8 cpu0:1;
|
||||||
|
u8 cpu1:1;
|
||||||
|
u8 cpu2:1;
|
||||||
|
u8 cpu3:1;
|
||||||
|
u8 reserved:3;
|
||||||
|
#else
|
||||||
|
u8 reserved:3;
|
||||||
|
u8 cpu3:1;
|
||||||
|
u8 cpu2:1;
|
||||||
|
u8 cpu1:1;
|
||||||
|
u8 cpu0:1;
|
||||||
|
u8 hw_path:1;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
u8 core_mask;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
struct MR_IO_AFFINITY {
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
struct MR_CPU_AFFINITY_MASK pdRead;
|
||||||
|
struct MR_CPU_AFFINITY_MASK pdWrite;
|
||||||
|
struct MR_CPU_AFFINITY_MASK ldRead;
|
||||||
|
struct MR_CPU_AFFINITY_MASK ldWrite;
|
||||||
|
};
|
||||||
|
u32 word;
|
||||||
|
};
|
||||||
|
u8 maxCores; /* Total cores + HW Path in ROC */
|
||||||
|
u8 reserved[3];
|
||||||
|
};
|
||||||
|
|
||||||
struct MR_LD_RAID {
|
struct MR_LD_RAID {
|
||||||
struct {
|
struct {
|
||||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||||
u32 reserved4:3;
|
u32 reserved4:2;
|
||||||
u32 fp_cache_bypass_capable:1;
|
u32 fp_cache_bypass_capable:1;
|
||||||
u32 fp_rmw_capable:1;
|
u32 fp_rmw_capable:1;
|
||||||
|
u32 disable_coalescing:1;
|
||||||
u32 fpBypassRegionLock:1;
|
u32 fpBypassRegionLock:1;
|
||||||
u32 tmCapable:1;
|
u32 tmCapable:1;
|
||||||
u32 fpNonRWCapable:1;
|
u32 fpNonRWCapable:1;
|
||||||
|
@ -759,9 +813,10 @@ struct MR_LD_RAID {
|
||||||
u32 fpNonRWCapable:1;
|
u32 fpNonRWCapable:1;
|
||||||
u32 tmCapable:1;
|
u32 tmCapable:1;
|
||||||
u32 fpBypassRegionLock:1;
|
u32 fpBypassRegionLock:1;
|
||||||
u32 fp_rmw_capable:1;
|
u32 disable_coalescing:1;
|
||||||
u32 fp_cache_bypass_capable:1;
|
u32 fp_rmw_capable:1;
|
||||||
u32 reserved4:3;
|
u32 fp_cache_bypass_capable:1;
|
||||||
|
u32 reserved4:2;
|
||||||
#endif
|
#endif
|
||||||
} capability;
|
} capability;
|
||||||
__le32 reserved6;
|
__le32 reserved6;
|
||||||
|
@ -788,7 +843,36 @@ struct MR_LD_RAID {
|
||||||
|
|
||||||
u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
|
u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
|
||||||
u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
|
u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
|
||||||
u8 reserved3[0x80-0x2D]; /* 0x2D */
|
/* Ox2D This LD accept priority boost of this type */
|
||||||
|
u8 ld_accept_priority_type;
|
||||||
|
u8 reserved2[2]; /* 0x2E - 0x2F */
|
||||||
|
/* 0x30 - 0x33, Logical block size for the LD */
|
||||||
|
u32 logical_block_length;
|
||||||
|
struct {
|
||||||
|
#ifndef MFI_BIG_ENDIAN
|
||||||
|
/* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
|
||||||
|
u32 ld_pi_exp:4;
|
||||||
|
/* 0x34, LOGICAL BLOCKS PER PHYSICAL
|
||||||
|
* BLOCK EXPONENT from READ CAPACITY 16
|
||||||
|
*/
|
||||||
|
u32 ld_logical_block_exp:4;
|
||||||
|
u32 reserved1:24; /* 0x34 */
|
||||||
|
#else
|
||||||
|
u32 reserved1:24; /* 0x34 */
|
||||||
|
/* 0x34, LOGICAL BLOCKS PER PHYSICAL
|
||||||
|
* BLOCK EXPONENT from READ CAPACITY 16
|
||||||
|
*/
|
||||||
|
u32 ld_logical_block_exp:4;
|
||||||
|
/* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
|
||||||
|
u32 ld_pi_exp:4;
|
||||||
|
#endif
|
||||||
|
}; /* 0x34 - 0x37 */
|
||||||
|
/* 0x38 - 0x3f, This will determine which
|
||||||
|
* core will process LD IO and PD IO.
|
||||||
|
*/
|
||||||
|
struct MR_IO_AFFINITY cpuAffinity;
|
||||||
|
/* Bit definiations are specified by MR_IO_AFFINITY */
|
||||||
|
u8 reserved3[0x80-0x40]; /* 0x40 - 0x7f */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MR_LD_SPAN_MAP {
|
struct MR_LD_SPAN_MAP {
|
||||||
|
@ -846,6 +930,91 @@ struct MR_LD_TARGET_SYNC {
|
||||||
__le16 seqNum;
|
__le16 seqNum;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* RAID Map descriptor Types.
|
||||||
|
* Each element should uniquely idetify one data structure in the RAID map
|
||||||
|
*/
|
||||||
|
enum MR_RAID_MAP_DESC_TYPE {
|
||||||
|
/* MR_DEV_HANDLE_INFO data */
|
||||||
|
RAID_MAP_DESC_TYPE_DEVHDL_INFO = 0x0,
|
||||||
|
/* target to Ld num Index map */
|
||||||
|
RAID_MAP_DESC_TYPE_TGTID_INFO = 0x1,
|
||||||
|
/* MR_ARRAY_INFO data */
|
||||||
|
RAID_MAP_DESC_TYPE_ARRAY_INFO = 0x2,
|
||||||
|
/* MR_LD_SPAN_MAP data */
|
||||||
|
RAID_MAP_DESC_TYPE_SPAN_INFO = 0x3,
|
||||||
|
RAID_MAP_DESC_TYPE_COUNT,
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This table defines the offset, size and num elements of each descriptor
|
||||||
|
* type in the RAID Map buffer
|
||||||
|
*/
|
||||||
|
struct MR_RAID_MAP_DESC_TABLE {
|
||||||
|
/* Raid map descriptor type */
|
||||||
|
u32 raid_map_desc_type;
|
||||||
|
/* Offset into the RAID map buffer where
|
||||||
|
* descriptor data is saved
|
||||||
|
*/
|
||||||
|
u32 raid_map_desc_offset;
|
||||||
|
/* total size of the
|
||||||
|
* descriptor buffer
|
||||||
|
*/
|
||||||
|
u32 raid_map_desc_buffer_size;
|
||||||
|
/* Number of elements contained in the
|
||||||
|
* descriptor buffer
|
||||||
|
*/
|
||||||
|
u32 raid_map_desc_elements;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Dynamic Raid Map Structure.
|
||||||
|
*/
|
||||||
|
struct MR_FW_RAID_MAP_DYNAMIC {
|
||||||
|
u32 raid_map_size; /* total size of RAID Map structure */
|
||||||
|
u32 desc_table_offset;/* Offset of desc table into RAID map*/
|
||||||
|
u32 desc_table_size; /* Total Size of desc table */
|
||||||
|
/* Total Number of elements in the desc table */
|
||||||
|
u32 desc_table_num_elements;
|
||||||
|
u64 reserved1;
|
||||||
|
u32 reserved2[3]; /*future use */
|
||||||
|
/* timeout value used by driver in FP IOs */
|
||||||
|
u8 fp_pd_io_timeout_sec;
|
||||||
|
u8 reserved3[3];
|
||||||
|
/* when this seqNum increments, driver needs to
|
||||||
|
* release RMW buffers asap
|
||||||
|
*/
|
||||||
|
u32 rmw_fp_seq_num;
|
||||||
|
u16 ld_count; /* count of lds. */
|
||||||
|
u16 ar_count; /* count of arrays */
|
||||||
|
u16 span_count; /* count of spans */
|
||||||
|
u16 reserved4[3];
|
||||||
|
/*
|
||||||
|
* The below structure of pointers is only to be used by the driver.
|
||||||
|
* This is added in the ,API to reduce the amount of code changes
|
||||||
|
* needed in the driver to support dynamic RAID map Firmware should
|
||||||
|
* not update these pointers while preparing the raid map
|
||||||
|
*/
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
struct MR_DEV_HANDLE_INFO *dev_hndl_info;
|
||||||
|
u16 *ld_tgt_id_to_ld;
|
||||||
|
struct MR_ARRAY_INFO *ar_map_info;
|
||||||
|
struct MR_LD_SPAN_MAP *ld_span_map;
|
||||||
|
};
|
||||||
|
u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT];
|
||||||
|
};
|
||||||
|
/*
|
||||||
|
* RAID Map descriptor table defines the layout of data in the RAID Map.
|
||||||
|
* The size of the descriptor table itself could change.
|
||||||
|
*/
|
||||||
|
/* Variable Size descriptor Table. */
|
||||||
|
struct MR_RAID_MAP_DESC_TABLE
|
||||||
|
raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
|
||||||
|
/* Variable Size buffer containing all data */
|
||||||
|
u32 raid_map_desc_data[1];
|
||||||
|
}; /* Dynamicaly sized RAID MAp structure */
|
||||||
|
|
||||||
#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
|
#define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
|
||||||
#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
|
#define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
|
||||||
#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
|
#define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
|
||||||
|
@ -955,9 +1124,10 @@ struct MR_DRV_RAID_MAP {
|
||||||
__le16 spanCount;
|
__le16 spanCount;
|
||||||
__le16 reserve3;
|
__le16 reserve3;
|
||||||
|
|
||||||
struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
|
struct MR_DEV_HANDLE_INFO
|
||||||
u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
|
devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
|
||||||
struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
|
u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
|
||||||
|
struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
|
||||||
struct MR_LD_SPAN_MAP ldSpanMap[1];
|
struct MR_LD_SPAN_MAP ldSpanMap[1];
|
||||||
|
|
||||||
};
|
};
|
||||||
|
@ -969,7 +1139,7 @@ struct MR_DRV_RAID_MAP {
|
||||||
struct MR_DRV_RAID_MAP_ALL {
|
struct MR_DRV_RAID_MAP_ALL {
|
||||||
|
|
||||||
struct MR_DRV_RAID_MAP raidMap;
|
struct MR_DRV_RAID_MAP raidMap;
|
||||||
struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1];
|
struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1];
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
|
|
||||||
|
@ -1088,7 +1258,7 @@ struct fusion_context {
|
||||||
u8 chain_offset_io_request;
|
u8 chain_offset_io_request;
|
||||||
u8 chain_offset_mfi_pthru;
|
u8 chain_offset_mfi_pthru;
|
||||||
|
|
||||||
struct MR_FW_RAID_MAP_ALL *ld_map[2];
|
struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2];
|
||||||
dma_addr_t ld_map_phys[2];
|
dma_addr_t ld_map_phys[2];
|
||||||
|
|
||||||
/*Non dma-able memory. Driver local copy.*/
|
/*Non dma-able memory. Driver local copy.*/
|
||||||
|
@ -1096,6 +1266,8 @@ struct fusion_context {
|
||||||
|
|
||||||
u32 max_map_sz;
|
u32 max_map_sz;
|
||||||
u32 current_map_sz;
|
u32 current_map_sz;
|
||||||
|
u32 old_map_sz;
|
||||||
|
u32 new_map_sz;
|
||||||
u32 drv_map_sz;
|
u32 drv_map_sz;
|
||||||
u32 drv_map_pages;
|
u32 drv_map_pages;
|
||||||
struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT];
|
struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT];
|
||||||
|
|
Загрузка…
Ссылка в новой задаче