|
@@ -41,11 +41,14 @@
|
|
|
#include "target_core_alua.h"
|
|
|
#include "target_core_ua.h"
|
|
|
|
|
|
-static sense_reason_t core_alua_check_transition(int state, int *primary);
|
|
|
+static sense_reason_t core_alua_check_transition(int state, int valid,
|
|
|
+ int *primary);
|
|
|
static int core_alua_set_tg_pt_secondary_state(
|
|
|
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
|
|
|
struct se_port *port, int explicit, int offline);
|
|
|
|
|
|
+static char *core_alua_dump_state(int state);
|
|
|
+
|
|
|
static u16 alua_lu_gps_counter;
|
|
|
static u32 alua_lu_gps_count;
|
|
|
|
|
@@ -54,6 +57,86 @@ static LIST_HEAD(lu_gps_list);
|
|
|
|
|
|
struct t10_alua_lu_gp *default_lu_gp;
|
|
|
|
|
|
+/*
|
|
|
+ * REPORT REFERRALS
|
|
|
+ *
|
|
|
+ * See sbc3r35 section 5.23
|
|
|
+ */
|
|
|
+sense_reason_t
|
|
|
+target_emulate_report_referrals(struct se_cmd *cmd)
|
|
|
+{
|
|
|
+ struct se_device *dev = cmd->se_dev;
|
|
|
+ struct t10_alua_lba_map *map;
|
|
|
+ struct t10_alua_lba_map_member *map_mem;
|
|
|
+ unsigned char *buf;
|
|
|
+ u32 rd_len = 0, off;
|
|
|
+
|
|
|
+ if (cmd->data_length < 4) {
|
|
|
+ pr_warn("REPORT REFERRALS allocation length %u too"
|
|
|
+ " small\n", cmd->data_length);
|
|
|
+ return TCM_INVALID_CDB_FIELD;
|
|
|
+ }
|
|
|
+
|
|
|
+ buf = transport_kmap_data_sg(cmd);
|
|
|
+ if (!buf)
|
|
|
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
|
|
+
|
|
|
+ off = 4;
|
|
|
+ spin_lock(&dev->t10_alua.lba_map_lock);
|
|
|
+ if (list_empty(&dev->t10_alua.lba_map_list)) {
|
|
|
+ spin_unlock(&dev->t10_alua.lba_map_lock);
|
|
|
+ transport_kunmap_data_sg(cmd);
|
|
|
+
|
|
|
+ return TCM_UNSUPPORTED_SCSI_OPCODE;
|
|
|
+ }
|
|
|
+
|
|
|
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list,
|
|
|
+ lba_map_list) {
|
|
|
+ int desc_num = off + 3;
|
|
|
+ int pg_num;
|
|
|
+
|
|
|
+ off += 4;
|
|
|
+ if (cmd->data_length > off)
|
|
|
+ put_unaligned_be64(map->lba_map_first_lba, &buf[off]);
|
|
|
+ off += 8;
|
|
|
+ if (cmd->data_length > off)
|
|
|
+ put_unaligned_be64(map->lba_map_last_lba, &buf[off]);
|
|
|
+ off += 8;
|
|
|
+ rd_len += 20;
|
|
|
+ pg_num = 0;
|
|
|
+ list_for_each_entry(map_mem, &map->lba_map_mem_list,
|
|
|
+ lba_map_mem_list) {
|
|
|
+ int alua_state = map_mem->lba_map_mem_alua_state;
|
|
|
+ int alua_pg_id = map_mem->lba_map_mem_alua_pg_id;
|
|
|
+
|
|
|
+ if (cmd->data_length > off)
|
|
|
+ buf[off] = alua_state & 0x0f;
|
|
|
+ off += 2;
|
|
|
+ if (cmd->data_length > off)
|
|
|
+ buf[off] = (alua_pg_id >> 8) & 0xff;
|
|
|
+ off++;
|
|
|
+ if (cmd->data_length > off)
|
|
|
+ buf[off] = (alua_pg_id & 0xff);
|
|
|
+ off++;
|
|
|
+ rd_len += 4;
|
|
|
+ pg_num++;
|
|
|
+ }
|
|
|
+ if (cmd->data_length > desc_num)
|
|
|
+ buf[desc_num] = pg_num;
|
|
|
+ }
|
|
|
+ spin_unlock(&dev->t10_alua.lba_map_lock);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Set the RETURN DATA LENGTH set in the header of the DataIN Payload
|
|
|
+ */
|
|
|
+ put_unaligned_be16(rd_len, &buf[2]);
|
|
|
+
|
|
|
+ transport_kunmap_data_sg(cmd);
|
|
|
+
|
|
|
+ target_complete_cmd(cmd, GOOD);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* REPORT_TARGET_PORT_GROUPS
|
|
|
*
|
|
@@ -210,7 +293,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
|
|
unsigned char *ptr;
|
|
|
sense_reason_t rc = TCM_NO_SENSE;
|
|
|
u32 len = 4; /* Skip over RESERVED area in header */
|
|
|
- int alua_access_state, primary = 0;
|
|
|
+ int alua_access_state, primary = 0, valid_states;
|
|
|
u16 tg_pt_id, rtpi;
|
|
|
|
|
|
if (!l_port)
|
|
@@ -252,6 +335,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
|
|
rc = TCM_UNSUPPORTED_SCSI_OPCODE;
|
|
|
goto out;
|
|
|
}
|
|
|
+ valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
|
|
|
|
|
|
ptr = &buf[4]; /* Skip over RESERVED area in header */
|
|
|
|
|
@@ -263,7 +347,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
|
|
|
* the state is a primary or secondary target port asymmetric
|
|
|
* access state.
|
|
|
*/
|
|
|
- rc = core_alua_check_transition(alua_access_state, &primary);
|
|
|
+ rc = core_alua_check_transition(alua_access_state,
|
|
|
+ valid_states, &primary);
|
|
|
if (rc) {
|
|
|
/*
|
|
|
* If the SET TARGET PORT GROUPS attempts to establish
|
|
@@ -386,6 +471,81 @@ static inline int core_alua_state_nonoptimized(
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static inline int core_alua_state_lba_dependent(
|
|
|
+ struct se_cmd *cmd,
|
|
|
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
|
|
|
+ u8 *alua_ascq)
|
|
|
+{
|
|
|
+ struct se_device *dev = cmd->se_dev;
|
|
|
+ u64 segment_size, segment_mult, sectors, lba;
|
|
|
+
|
|
|
+ /* Only need to check for cdb actually containing LBAs */
|
|
|
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ spin_lock(&dev->t10_alua.lba_map_lock);
|
|
|
+ segment_size = dev->t10_alua.lba_map_segment_size;
|
|
|
+ segment_mult = dev->t10_alua.lba_map_segment_multiplier;
|
|
|
+ sectors = cmd->data_length / dev->dev_attrib.block_size;
|
|
|
+
|
|
|
+ lba = cmd->t_task_lba;
|
|
|
+ while (lba < cmd->t_task_lba + sectors) {
|
|
|
+ struct t10_alua_lba_map *cur_map = NULL, *map;
|
|
|
+ struct t10_alua_lba_map_member *map_mem;
|
|
|
+
|
|
|
+ list_for_each_entry(map, &dev->t10_alua.lba_map_list,
|
|
|
+ lba_map_list) {
|
|
|
+ u64 start_lba, last_lba;
|
|
|
+ u64 first_lba = map->lba_map_first_lba;
|
|
|
+
|
|
|
+ if (segment_mult) {
|
|
|
+ u64 tmp = lba;
|
|
|
+ start_lba = sector_div(tmp, segment_size * segment_mult);
|
|
|
+
|
|
|
+ last_lba = first_lba + segment_size - 1;
|
|
|
+ if (start_lba >= first_lba &&
|
|
|
+ start_lba <= last_lba) {
|
|
|
+ lba += segment_size;
|
|
|
+ cur_map = map;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ last_lba = map->lba_map_last_lba;
|
|
|
+ if (lba >= first_lba && lba <= last_lba) {
|
|
|
+ lba = last_lba + 1;
|
|
|
+ cur_map = map;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (!cur_map) {
|
|
|
+ spin_unlock(&dev->t10_alua.lba_map_lock);
|
|
|
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
|
|
|
+ lba_map_mem_list) {
|
|
|
+ if (map_mem->lba_map_mem_alua_pg_id !=
|
|
|
+ tg_pt_gp->tg_pt_gp_id)
|
|
|
+ continue;
|
|
|
+ switch(map_mem->lba_map_mem_alua_state) {
|
|
|
+ case ALUA_ACCESS_STATE_STANDBY:
|
|
|
+ spin_unlock(&dev->t10_alua.lba_map_lock);
|
|
|
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
|
|
|
+ return 1;
|
|
|
+ case ALUA_ACCESS_STATE_UNAVAILABLE:
|
|
|
+ spin_unlock(&dev->t10_alua.lba_map_lock);
|
|
|
+ *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
|
|
|
+ return 1;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spin_unlock(&dev->t10_alua.lba_map_lock);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static inline int core_alua_state_standby(
|
|
|
struct se_cmd *cmd,
|
|
|
unsigned char *cdb,
|
|
@@ -583,6 +743,9 @@ target_alua_state_check(struct se_cmd *cmd)
|
|
|
case ALUA_ACCESS_STATE_TRANSITION:
|
|
|
ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
|
|
|
break;
|
|
|
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
|
|
|
+ ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
|
|
|
+ break;
|
|
|
/*
|
|
|
* OFFLINE is a secondary ALUA target port group access state, that is
|
|
|
* handled above with struct se_port->sep_tg_pt_secondary_offline=1
|
|
@@ -618,17 +781,36 @@ out:
|
|
|
* Check implicit and explicit ALUA state change request.
|
|
|
*/
|
|
|
static sense_reason_t
|
|
|
-core_alua_check_transition(int state, int *primary)
|
|
|
+core_alua_check_transition(int state, int valid, int *primary)
|
|
|
{
|
|
|
+ /*
|
|
|
+ * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
|
|
|
+ * defined as primary target port asymmetric access states.
|
|
|
+ */
|
|
|
switch (state) {
|
|
|
case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
|
|
|
+ if (!(valid & ALUA_AO_SUP))
|
|
|
+ goto not_supported;
|
|
|
+ *primary = 1;
|
|
|
+ break;
|
|
|
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
|
|
|
+ if (!(valid & ALUA_AN_SUP))
|
|
|
+ goto not_supported;
|
|
|
+ *primary = 1;
|
|
|
+ break;
|
|
|
case ALUA_ACCESS_STATE_STANDBY:
|
|
|
+ if (!(valid & ALUA_S_SUP))
|
|
|
+ goto not_supported;
|
|
|
+ *primary = 1;
|
|
|
+ break;
|
|
|
case ALUA_ACCESS_STATE_UNAVAILABLE:
|
|
|
- /*
|
|
|
- * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
|
|
|
- * defined as primary target port asymmetric access states.
|
|
|
- */
|
|
|
+ if (!(valid & ALUA_U_SUP))
|
|
|
+ goto not_supported;
|
|
|
+ *primary = 1;
|
|
|
+ break;
|
|
|
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
|
|
|
+ if (!(valid & ALUA_LBD_SUP))
|
|
|
+ goto not_supported;
|
|
|
*primary = 1;
|
|
|
break;
|
|
|
case ALUA_ACCESS_STATE_OFFLINE:
|
|
@@ -636,14 +818,27 @@ core_alua_check_transition(int state, int *primary)
|
|
|
* OFFLINE state is defined as a secondary target port
|
|
|
* asymmetric access state.
|
|
|
*/
|
|
|
+ if (!(valid & ALUA_O_SUP))
|
|
|
+ goto not_supported;
|
|
|
*primary = 0;
|
|
|
break;
|
|
|
+ case ALUA_ACCESS_STATE_TRANSITION:
|
|
|
+ /*
|
|
|
+ * Transitioning is set internally, and
|
|
|
+ * cannot be selected manually.
|
|
|
+ */
|
|
|
+ goto not_supported;
|
|
|
default:
|
|
|
pr_err("Unknown ALUA access state: 0x%02x\n", state);
|
|
|
return TCM_INVALID_PARAMETER_LIST;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
+
|
|
|
+not_supported:
|
|
|
+ pr_err("ALUA access state %s not supported",
|
|
|
+ core_alua_dump_state(state));
|
|
|
+ return TCM_INVALID_PARAMETER_LIST;
|
|
|
}
|
|
|
|
|
|
static char *core_alua_dump_state(int state)
|
|
@@ -653,12 +848,16 @@ static char *core_alua_dump_state(int state)
|
|
|
return "Active/Optimized";
|
|
|
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
|
|
|
return "Active/NonOptimized";
|
|
|
+ case ALUA_ACCESS_STATE_LBA_DEPENDENT:
|
|
|
+ return "LBA Dependent";
|
|
|
case ALUA_ACCESS_STATE_STANDBY:
|
|
|
return "Standby";
|
|
|
case ALUA_ACCESS_STATE_UNAVAILABLE:
|
|
|
return "Unavailable";
|
|
|
case ALUA_ACCESS_STATE_OFFLINE:
|
|
|
return "Offline";
|
|
|
+ case ALUA_ACCESS_STATE_TRANSITION:
|
|
|
+ return "Transitioning";
|
|
|
default:
|
|
|
return "Unknown";
|
|
|
}
|
|
@@ -735,58 +934,49 @@ static int core_alua_write_tpg_metadata(
|
|
|
* Called with tg_pt_gp->tg_pt_gp_md_mutex held
|
|
|
*/
|
|
|
static int core_alua_update_tpg_primary_metadata(
|
|
|
- struct t10_alua_tg_pt_gp *tg_pt_gp,
|
|
|
- int primary_state,
|
|
|
- unsigned char *md_buf)
|
|
|
+ struct t10_alua_tg_pt_gp *tg_pt_gp)
|
|
|
{
|
|
|
+ unsigned char *md_buf;
|
|
|
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
|
|
|
char path[ALUA_METADATA_PATH_LEN];
|
|
|
- int len;
|
|
|
+ int len, rc;
|
|
|
+
|
|
|
+ md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
|
|
|
+ if (!md_buf) {
|
|
|
+ pr_err("Unable to allocate buf for ALUA metadata\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
|
|
|
memset(path, 0, ALUA_METADATA_PATH_LEN);
|
|
|
|
|
|
- len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
|
|
|
+ len = snprintf(md_buf, ALUA_MD_BUF_LEN,
|
|
|
"tg_pt_gp_id=%hu\n"
|
|
|
"alua_access_state=0x%02x\n"
|
|
|
"alua_access_status=0x%02x\n",
|
|
|
- tg_pt_gp->tg_pt_gp_id, primary_state,
|
|
|
+ tg_pt_gp->tg_pt_gp_id,
|
|
|
+ tg_pt_gp->tg_pt_gp_alua_pending_state,
|
|
|
tg_pt_gp->tg_pt_gp_alua_access_status);
|
|
|
|
|
|
snprintf(path, ALUA_METADATA_PATH_LEN,
|
|
|
"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
|
|
|
config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
|
|
|
|
|
|
- return core_alua_write_tpg_metadata(path, md_buf, len);
|
|
|
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
|
|
|
+ kfree(md_buf);
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
-static int core_alua_do_transition_tg_pt(
|
|
|
- struct t10_alua_tg_pt_gp *tg_pt_gp,
|
|
|
- struct se_port *l_port,
|
|
|
- struct se_node_acl *nacl,
|
|
|
- unsigned char *md_buf,
|
|
|
- int new_state,
|
|
|
- int explicit)
|
|
|
+static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
|
|
|
{
|
|
|
+ struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
|
|
|
+ struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
|
|
|
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
|
|
|
struct se_dev_entry *se_deve;
|
|
|
struct se_lun_acl *lacl;
|
|
|
struct se_port *port;
|
|
|
struct t10_alua_tg_pt_gp_member *mem;
|
|
|
- int old_state = 0;
|
|
|
- /*
|
|
|
- * Save the old primary ALUA access state, and set the current state
|
|
|
- * to ALUA_ACCESS_STATE_TRANSITION.
|
|
|
- */
|
|
|
- old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
|
|
|
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
|
|
- ALUA_ACCESS_STATE_TRANSITION);
|
|
|
- tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
|
|
|
- ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
|
|
|
- ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
|
|
|
- /*
|
|
|
- * Check for the optional ALUA primary state transition delay
|
|
|
- */
|
|
|
- if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
|
|
|
- msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
|
|
|
+ bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
|
|
|
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
|
|
|
|
|
|
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
|
|
|
list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
|
|
@@ -821,9 +1011,12 @@ static int core_alua_do_transition_tg_pt(
|
|
|
if (!lacl)
|
|
|
continue;
|
|
|
|
|
|
- if (explicit &&
|
|
|
- (nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
|
|
|
- (l_port != NULL) && (l_port == port))
|
|
|
+ if ((tg_pt_gp->tg_pt_gp_alua_access_status ==
|
|
|
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
|
|
|
+ (tg_pt_gp->tg_pt_gp_alua_nacl != NULL) &&
|
|
|
+ (tg_pt_gp->tg_pt_gp_alua_nacl == lacl->se_lun_nacl) &&
|
|
|
+ (tg_pt_gp->tg_pt_gp_alua_port != NULL) &&
|
|
|
+ (tg_pt_gp->tg_pt_gp_alua_port == port))
|
|
|
continue;
|
|
|
|
|
|
core_scsi3_ua_allocate(lacl->se_lun_nacl,
|
|
@@ -851,20 +1044,102 @@ static int core_alua_do_transition_tg_pt(
|
|
|
*/
|
|
|
if (tg_pt_gp->tg_pt_gp_write_metadata) {
|
|
|
mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
|
|
|
- core_alua_update_tpg_primary_metadata(tg_pt_gp,
|
|
|
- new_state, md_buf);
|
|
|
+ core_alua_update_tpg_primary_metadata(tg_pt_gp);
|
|
|
mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
|
|
|
}
|
|
|
/*
|
|
|
* Set the current primary ALUA access state to the requested new state
|
|
|
*/
|
|
|
- atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
|
|
|
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
|
|
+ tg_pt_gp->tg_pt_gp_alua_pending_state);
|
|
|
|
|
|
pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
|
|
|
" from primary access state %s to %s\n", (explicit) ? "explicit" :
|
|
|
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
|
|
|
- tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
|
|
|
- core_alua_dump_state(new_state));
|
|
|
+ tg_pt_gp->tg_pt_gp_id,
|
|
|
+ core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
|
|
|
+ core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
|
|
|
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
|
|
+ atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
|
|
+ smp_mb__after_atomic_dec();
|
|
|
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
|
|
+
|
|
|
+ if (tg_pt_gp->tg_pt_gp_transition_complete)
|
|
|
+ complete(tg_pt_gp->tg_pt_gp_transition_complete);
|
|
|
+}
|
|
|
+
|
|
|
+static int core_alua_do_transition_tg_pt(
|
|
|
+ struct t10_alua_tg_pt_gp *tg_pt_gp,
|
|
|
+ int new_state,
|
|
|
+ int explicit)
|
|
|
+{
|
|
|
+ struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
|
|
|
+ DECLARE_COMPLETION_ONSTACK(wait);
|
|
|
+
|
|
|
+ /* Nothing to be done here */
|
|
|
+ if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (new_state == ALUA_ACCESS_STATE_TRANSITION)
|
|
|
+ return -EAGAIN;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Flush any pending transitions
|
|
|
+ */
|
|
|
+ if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
|
|
|
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
|
|
|
+ ALUA_ACCESS_STATE_TRANSITION) {
|
|
|
+ /* Just in case */
|
|
|
+ tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
|
|
|
+ tg_pt_gp->tg_pt_gp_transition_complete = &wait;
|
|
|
+ flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
|
|
|
+ wait_for_completion(&wait);
|
|
|
+ tg_pt_gp->tg_pt_gp_transition_complete = NULL;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Save the old primary ALUA access state, and set the current state
|
|
|
+ * to ALUA_ACCESS_STATE_TRANSITION.
|
|
|
+ */
|
|
|
+ tg_pt_gp->tg_pt_gp_alua_previous_state =
|
|
|
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
|
|
|
+ tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
|
|
|
+
|
|
|
+ atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
|
|
+ ALUA_ACCESS_STATE_TRANSITION);
|
|
|
+ tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
|
|
|
+ ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
|
|
|
+ ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check for the optional ALUA primary state transition delay
|
|
|
+ */
|
|
|
+ if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
|
|
|
+ msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Take a reference for workqueue item
|
|
|
+ */
|
|
|
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
|
|
+ atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
|
|
+ smp_mb__after_atomic_inc();
|
|
|
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
|
|
+
|
|
|
+ if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
|
|
|
+ unsigned long transition_tmo;
|
|
|
+
|
|
|
+ transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
|
|
|
+ queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
|
|
|
+ &tg_pt_gp->tg_pt_gp_transition_work,
|
|
|
+ transition_tmo);
|
|
|
+ } else {
|
|
|
+ tg_pt_gp->tg_pt_gp_transition_complete = &wait;
|
|
|
+ queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
|
|
|
+ &tg_pt_gp->tg_pt_gp_transition_work, 0);
|
|
|
+ wait_for_completion(&wait);
|
|
|
+ tg_pt_gp->tg_pt_gp_transition_complete = NULL;
|
|
|
+ }
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -878,23 +1153,15 @@ int core_alua_do_port_transition(
|
|
|
int explicit)
|
|
|
{
|
|
|
struct se_device *dev;
|
|
|
- struct se_port *port;
|
|
|
- struct se_node_acl *nacl;
|
|
|
struct t10_alua_lu_gp *lu_gp;
|
|
|
struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
|
|
|
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
|
|
- unsigned char *md_buf;
|
|
|
- int primary;
|
|
|
+ int primary, valid_states, rc = 0;
|
|
|
|
|
|
- if (core_alua_check_transition(new_state, &primary) != 0)
|
|
|
+ valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
|
|
|
+ if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
|
|
|
- if (!md_buf) {
|
|
|
- pr_err("Unable to allocate buf for ALUA metadata\n");
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
-
|
|
|
local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
|
|
|
spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
|
|
|
lu_gp = local_lu_gp_mem->lu_gp;
|
|
@@ -911,12 +1178,13 @@ int core_alua_do_port_transition(
|
|
|
* core_alua_do_transition_tg_pt() will always return
|
|
|
* success.
|
|
|
*/
|
|
|
- core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
|
|
|
- md_buf, new_state, explicit);
|
|
|
+ l_tg_pt_gp->tg_pt_gp_alua_port = l_port;
|
|
|
+ l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
|
|
|
+ rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
|
|
|
+ new_state, explicit);
|
|
|
atomic_dec(&lu_gp->lu_gp_ref_cnt);
|
|
|
smp_mb__after_atomic_dec();
|
|
|
- kfree(md_buf);
|
|
|
- return 0;
|
|
|
+ return rc;
|
|
|
}
|
|
|
/*
|
|
|
* For all other LU groups aside from 'default_lu_gp', walk all of
|
|
@@ -951,11 +1219,11 @@ int core_alua_do_port_transition(
|
|
|
continue;
|
|
|
|
|
|
if (l_tg_pt_gp == tg_pt_gp) {
|
|
|
- port = l_port;
|
|
|
- nacl = l_nacl;
|
|
|
+ tg_pt_gp->tg_pt_gp_alua_port = l_port;
|
|
|
+ tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
|
|
|
} else {
|
|
|
- port = NULL;
|
|
|
- nacl = NULL;
|
|
|
+ tg_pt_gp->tg_pt_gp_alua_port = NULL;
|
|
|
+ tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
|
|
|
}
|
|
|
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
|
|
smp_mb__after_atomic_inc();
|
|
@@ -964,12 +1232,14 @@ int core_alua_do_port_transition(
|
|
|
* core_alua_do_transition_tg_pt() will always return
|
|
|
* success.
|
|
|
*/
|
|
|
- core_alua_do_transition_tg_pt(tg_pt_gp, port,
|
|
|
- nacl, md_buf, new_state, explicit);
|
|
|
+ rc = core_alua_do_transition_tg_pt(tg_pt_gp,
|
|
|
+ new_state, explicit);
|
|
|
|
|
|
spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
|
|
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
|
|
|
smp_mb__after_atomic_dec();
|
|
|
+ if (rc)
|
|
|
+ break;
|
|
|
}
|
|
|
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
|
|
|
|
@@ -979,16 +1249,18 @@ int core_alua_do_port_transition(
|
|
|
}
|
|
|
spin_unlock(&lu_gp->lu_gp_lock);
|
|
|
|
|
|
- pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
|
|
|
- " Group IDs: %hu %s transition to primary state: %s\n",
|
|
|
- config_item_name(&lu_gp->lu_gp_group.cg_item),
|
|
|
- l_tg_pt_gp->tg_pt_gp_id, (explicit) ? "explicit" : "implicit",
|
|
|
- core_alua_dump_state(new_state));
|
|
|
+ if (!rc) {
|
|
|
+ pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
|
|
|
+ " Group IDs: %hu %s transition to primary state: %s\n",
|
|
|
+ config_item_name(&lu_gp->lu_gp_group.cg_item),
|
|
|
+ l_tg_pt_gp->tg_pt_gp_id,
|
|
|
+ (explicit) ? "explicit" : "implicit",
|
|
|
+ core_alua_dump_state(new_state));
|
|
|
+ }
|
|
|
|
|
|
atomic_dec(&lu_gp->lu_gp_ref_cnt);
|
|
|
smp_mb__after_atomic_dec();
|
|
|
- kfree(md_buf);
|
|
|
- return 0;
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -996,13 +1268,18 @@ int core_alua_do_port_transition(
|
|
|
*/
|
|
|
static int core_alua_update_tpg_secondary_metadata(
|
|
|
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
|
|
|
- struct se_port *port,
|
|
|
- unsigned char *md_buf,
|
|
|
- u32 md_buf_len)
|
|
|
+ struct se_port *port)
|
|
|
{
|
|
|
+ unsigned char *md_buf;
|
|
|
struct se_portal_group *se_tpg = port->sep_tpg;
|
|
|
char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
|
|
|
- int len;
|
|
|
+ int len, rc;
|
|
|
+
|
|
|
+ md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
|
|
|
+ if (!md_buf) {
|
|
|
+ pr_err("Unable to allocate buf for ALUA metadata\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
|
|
|
memset(path, 0, ALUA_METADATA_PATH_LEN);
|
|
|
memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
|
|
@@ -1014,7 +1291,7 @@ static int core_alua_update_tpg_secondary_metadata(
|
|
|
snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
|
|
|
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
|
|
|
|
|
|
- len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
|
|
|
+ len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
|
|
|
"alua_tg_pt_status=0x%02x\n",
|
|
|
atomic_read(&port->sep_tg_pt_secondary_offline),
|
|
|
port->sep_tg_pt_secondary_stat);
|
|
@@ -1023,7 +1300,10 @@ static int core_alua_update_tpg_secondary_metadata(
|
|
|
se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
|
|
|
port->sep_lun->unpacked_lun);
|
|
|
|
|
|
- return core_alua_write_tpg_metadata(path, md_buf, len);
|
|
|
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
|
|
|
+ kfree(md_buf);
|
|
|
+
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
static int core_alua_set_tg_pt_secondary_state(
|
|
@@ -1033,8 +1313,6 @@ static int core_alua_set_tg_pt_secondary_state(
|
|
|
int offline)
|
|
|
{
|
|
|
struct t10_alua_tg_pt_gp *tg_pt_gp;
|
|
|
- unsigned char *md_buf;
|
|
|
- u32 md_buf_len;
|
|
|
int trans_delay_msecs;
|
|
|
|
|
|
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
|
|
@@ -1055,7 +1333,6 @@ static int core_alua_set_tg_pt_secondary_state(
|
|
|
else
|
|
|
atomic_set(&port->sep_tg_pt_secondary_offline, 0);
|
|
|
|
|
|
- md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
|
|
|
port->sep_tg_pt_secondary_stat = (explicit) ?
|
|
|
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
|
|
|
ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
|
|
@@ -1077,23 +1354,115 @@ static int core_alua_set_tg_pt_secondary_state(
|
|
|
* secondary state and status
|
|
|
*/
|
|
|
if (port->sep_tg_pt_secondary_write_md) {
|
|
|
- md_buf = kzalloc(md_buf_len, GFP_KERNEL);
|
|
|
- if (!md_buf) {
|
|
|
- pr_err("Unable to allocate md_buf for"
|
|
|
- " secondary ALUA access metadata\n");
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
mutex_lock(&port->sep_tg_pt_md_mutex);
|
|
|
- core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
|
|
|
- md_buf, md_buf_len);
|
|
|
+ core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port);
|
|
|
mutex_unlock(&port->sep_tg_pt_md_mutex);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+struct t10_alua_lba_map *
|
|
|
+core_alua_allocate_lba_map(struct list_head *list,
|
|
|
+ u64 first_lba, u64 last_lba)
|
|
|
+{
|
|
|
+ struct t10_alua_lba_map *lba_map;
|
|
|
+
|
|
|
+ lba_map = kmem_cache_zalloc(t10_alua_lba_map_cache, GFP_KERNEL);
|
|
|
+ if (!lba_map) {
|
|
|
+ pr_err("Unable to allocate struct t10_alua_lba_map\n");
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+ }
|
|
|
+ INIT_LIST_HEAD(&lba_map->lba_map_mem_list);
|
|
|
+ lba_map->lba_map_first_lba = first_lba;
|
|
|
+ lba_map->lba_map_last_lba = last_lba;
|
|
|
|
|
|
- kfree(md_buf);
|
|
|
+ list_add_tail(&lba_map->lba_map_list, list);
|
|
|
+ return lba_map;
|
|
|
+}
|
|
|
+
|
|
|
+int
|
|
|
+core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map,
|
|
|
+ int pg_id, int state)
|
|
|
+{
|
|
|
+ struct t10_alua_lba_map_member *lba_map_mem;
|
|
|
+
|
|
|
+ list_for_each_entry(lba_map_mem, &lba_map->lba_map_mem_list,
|
|
|
+ lba_map_mem_list) {
|
|
|
+ if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) {
|
|
|
+ pr_err("Duplicate pg_id %d in lba_map\n", pg_id);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ lba_map_mem = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, GFP_KERNEL);
|
|
|
+ if (!lba_map_mem) {
|
|
|
+ pr_err("Unable to allocate struct t10_alua_lba_map_mem\n");
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
+ lba_map_mem->lba_map_mem_alua_state = state;
|
|
|
+ lba_map_mem->lba_map_mem_alua_pg_id = pg_id;
|
|
|
|
|
|
+ list_add_tail(&lba_map_mem->lba_map_mem_list,
|
|
|
+ &lba_map->lba_map_mem_list);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+void
|
|
|
+core_alua_free_lba_map(struct list_head *lba_list)
|
|
|
+{
|
|
|
+ struct t10_alua_lba_map *lba_map, *lba_map_tmp;
|
|
|
+ struct t10_alua_lba_map_member *lba_map_mem, *lba_map_mem_tmp;
|
|
|
+
|
|
|
+ list_for_each_entry_safe(lba_map, lba_map_tmp, lba_list,
|
|
|
+ lba_map_list) {
|
|
|
+ list_for_each_entry_safe(lba_map_mem, lba_map_mem_tmp,
|
|
|
+ &lba_map->lba_map_mem_list,
|
|
|
+ lba_map_mem_list) {
|
|
|
+ list_del(&lba_map_mem->lba_map_mem_list);
|
|
|
+ kmem_cache_free(t10_alua_lba_map_mem_cache,
|
|
|
+ lba_map_mem);
|
|
|
+ }
|
|
|
+ list_del(&lba_map->lba_map_list);
|
|
|
+ kmem_cache_free(t10_alua_lba_map_cache, lba_map);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void
|
|
|
+core_alua_set_lba_map(struct se_device *dev, struct list_head *lba_map_list,
|
|
|
+ int segment_size, int segment_mult)
|
|
|
+{
|
|
|
+ struct list_head old_lba_map_list;
|
|
|
+ struct t10_alua_tg_pt_gp *tg_pt_gp;
|
|
|
+ int activate = 0, supported;
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&old_lba_map_list);
|
|
|
+ spin_lock(&dev->t10_alua.lba_map_lock);
|
|
|
+ dev->t10_alua.lba_map_segment_size = segment_size;
|
|
|
+ dev->t10_alua.lba_map_segment_multiplier = segment_mult;
|
|
|
+ list_splice_init(&dev->t10_alua.lba_map_list, &old_lba_map_list);
|
|
|
+ if (lba_map_list) {
|
|
|
+ list_splice_init(lba_map_list, &dev->t10_alua.lba_map_list);
|
|
|
+ activate = 1;
|
|
|
+ }
|
|
|
+ spin_unlock(&dev->t10_alua.lba_map_lock);
|
|
|
+ spin_lock(&dev->t10_alua.tg_pt_gps_lock);
|
|
|
+ list_for_each_entry(tg_pt_gp, &dev->t10_alua.tg_pt_gps_list,
|
|
|
+ tg_pt_gp_list) {
|
|
|
+
|
|
|
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
|
|
|
+ continue;
|
|
|
+ supported = tg_pt_gp->tg_pt_gp_alua_supported_states;
|
|
|
+ if (activate)
|
|
|
+ supported |= ALUA_LBD_SUP;
|
|
|
+ else
|
|
|
+ supported &= ~ALUA_LBD_SUP;
|
|
|
+ tg_pt_gp->tg_pt_gp_alua_supported_states = supported;
|
|
|
+ }
|
|
|
+ spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
|
|
+ core_alua_free_lba_map(&old_lba_map_list);
|
|
|
+}
|
|
|
+
|
|
|
struct t10_alua_lu_gp *
|
|
|
core_alua_allocate_lu_gp(const char *name, int def_group)
|
|
|
{
|
|
@@ -1346,8 +1715,9 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
|
|
|
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
|
|
|
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
|
|
|
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
|
|
|
+ INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
|
|
|
+ core_alua_do_transition_tg_pt_work);
|
|
|
tg_pt_gp->tg_pt_gp_dev = dev;
|
|
|
- tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
|
|
|
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
|
|
|
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
|
|
|
/*
|
|
@@ -1475,6 +1845,8 @@ void core_alua_free_tg_pt_gp(
|
|
|
dev->t10_alua.alua_tg_pt_gps_counter--;
|
|
|
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
|
|
|
|
|
|
+ flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
|
|
|
+
|
|
|
/*
|
|
|
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
|
|
|
* core_alua_get_tg_pt_gp_by_name() in
|