|
|
@@ -1,6 +1,6 @@
|
|
|
/*
|
|
|
* Disk Array driver for HP Smart Array SAS controllers
|
|
|
- * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
|
|
|
+ * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
|
|
|
*
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
@@ -47,13 +47,13 @@
|
|
|
#include <linux/string.h>
|
|
|
#include <linux/bitmap.h>
|
|
|
#include <linux/atomic.h>
|
|
|
-#include <linux/kthread.h>
|
|
|
#include <linux/jiffies.h>
|
|
|
+#include <asm/div64.h>
|
|
|
#include "hpsa_cmd.h"
|
|
|
#include "hpsa.h"
|
|
|
|
|
|
/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
|
|
|
-#define HPSA_DRIVER_VERSION "3.4.0-1"
|
|
|
+#define HPSA_DRIVER_VERSION "3.4.4-1"
|
|
|
#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
|
|
|
#define HPSA "hpsa"
|
|
|
|
|
|
@@ -118,6 +118,11 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
|
|
|
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
|
|
|
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
|
|
|
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
|
|
|
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
|
|
|
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
|
|
|
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
|
|
|
+ {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
|
|
|
+ {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
|
|
|
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
|
|
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
|
|
|
{0,}
|
|
|
@@ -163,6 +168,11 @@ static struct board_type products[] = {
|
|
|
{0x21C7103C, "Smart Array", &SA5_access},
|
|
|
{0x21C8103C, "Smart Array", &SA5_access},
|
|
|
{0x21C9103C, "Smart Array", &SA5_access},
|
|
|
+ {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
|
|
|
+ {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
|
|
|
+ {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
|
|
|
+ {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
|
|
|
+ {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
|
|
|
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
|
|
|
};
|
|
|
|
|
|
@@ -182,8 +192,9 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
|
|
|
static struct CommandList *cmd_alloc(struct ctlr_info *h);
|
|
|
static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
|
|
|
static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
|
|
|
- void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
|
|
|
+ void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
|
|
|
int cmd_type);
|
|
|
+#define VPD_PAGE (1 << 8)
|
|
|
|
|
|
static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
|
|
|
static void hpsa_scan_start(struct Scsi_Host *);
|
|
|
@@ -204,7 +215,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
|
|
|
struct CommandList *c);
|
|
|
/* performant mode helper functions */
|
|
|
static void calc_bucket_map(int *bucket, int num_buckets,
|
|
|
- int nsgs, int *bucket_map);
|
|
|
+ int nsgs, int min_blocks, int *bucket_map);
|
|
|
static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
|
|
|
static inline u32 next_command(struct ctlr_info *h, u8 q);
|
|
|
static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
|
|
|
@@ -216,8 +227,14 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
|
|
|
static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
|
|
|
int wait_for_ready);
|
|
|
static inline void finish_cmd(struct CommandList *c);
|
|
|
+static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
|
|
|
#define BOARD_NOT_READY 0
|
|
|
#define BOARD_READY 1
|
|
|
+static void hpsa_drain_accel_commands(struct ctlr_info *h);
|
|
|
+static void hpsa_flush_cache(struct ctlr_info *h);
|
|
|
+static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
|
|
|
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
|
|
|
+ u8 *scsi3addr);
|
|
|
|
|
|
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
|
|
|
{
|
|
|
@@ -280,6 +297,55 @@ static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
|
|
|
+ struct device_attribute *attr,
|
|
|
+ const char *buf, size_t count)
|
|
|
+{
|
|
|
+ int status, len;
|
|
|
+ struct ctlr_info *h;
|
|
|
+ struct Scsi_Host *shost = class_to_shost(dev);
|
|
|
+ char tmpbuf[10];
|
|
|
+
|
|
|
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
|
|
|
+ return -EACCES;
|
|
|
+ len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
|
|
|
+ strncpy(tmpbuf, buf, len);
|
|
|
+ tmpbuf[len] = '\0';
|
|
|
+ if (sscanf(tmpbuf, "%d", &status) != 1)
|
|
|
+ return -EINVAL;
|
|
|
+ h = shost_to_hba(shost);
|
|
|
+ h->acciopath_status = !!status;
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "hpsa: HP SSD Smart Path %s via sysfs update.\n",
|
|
|
+ h->acciopath_status ? "enabled" : "disabled");
|
|
|
+ return count;
|
|
|
+}
|
|
|
+
|
|
|
+static ssize_t host_store_raid_offload_debug(struct device *dev,
|
|
|
+ struct device_attribute *attr,
|
|
|
+ const char *buf, size_t count)
|
|
|
+{
|
|
|
+ int debug_level, len;
|
|
|
+ struct ctlr_info *h;
|
|
|
+ struct Scsi_Host *shost = class_to_shost(dev);
|
|
|
+ char tmpbuf[10];
|
|
|
+
|
|
|
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
|
|
|
+ return -EACCES;
|
|
|
+ len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
|
|
|
+ strncpy(tmpbuf, buf, len);
|
|
|
+ tmpbuf[len] = '\0';
|
|
|
+ if (sscanf(tmpbuf, "%d", &debug_level) != 1)
|
|
|
+ return -EINVAL;
|
|
|
+ if (debug_level < 0)
|
|
|
+ debug_level = 0;
|
|
|
+ h = shost_to_hba(shost);
|
|
|
+ h->raid_offload_debug = debug_level;
|
|
|
+ dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
|
|
|
+ h->raid_offload_debug);
|
|
|
+ return count;
|
|
|
+}
|
|
|
+
|
|
|
static ssize_t host_store_rescan(struct device *dev,
|
|
|
struct device_attribute *attr,
|
|
|
const char *buf, size_t count)
|
|
|
@@ -327,6 +393,17 @@ static ssize_t host_show_transport_mode(struct device *dev,
|
|
|
"performant" : "simple");
|
|
|
}
|
|
|
|
|
|
+static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
|
|
|
+ struct device_attribute *attr, char *buf)
|
|
|
+{
|
|
|
+ struct ctlr_info *h;
|
|
|
+ struct Scsi_Host *shost = class_to_shost(dev);
|
|
|
+
|
|
|
+ h = shost_to_hba(shost);
|
|
|
+ return snprintf(buf, 30, "HP SSD Smart Path %s\n",
|
|
|
+ (h->acciopath_status == 1) ? "enabled" : "disabled");
|
|
|
+}
|
|
|
+
|
|
|
/* List of controllers which cannot be hard reset on kexec with reset_devices */
|
|
|
static u32 unresettable_controller[] = {
|
|
|
0x324a103C, /* Smart Array P712m */
|
|
|
@@ -416,6 +493,13 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
|
|
|
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
|
|
|
"1(ADM)", "UNKNOWN"
|
|
|
};
|
|
|
+#define HPSA_RAID_0 0
|
|
|
+#define HPSA_RAID_4 1
|
|
|
+#define HPSA_RAID_1 2 /* also used for RAID 10 */
|
|
|
+#define HPSA_RAID_5 3 /* also used for RAID 50 */
|
|
|
+#define HPSA_RAID_51 4
|
|
|
+#define HPSA_RAID_6 5 /* also used for RAID 60 */
|
|
|
+#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
|
|
|
#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
|
|
|
|
|
|
static ssize_t raid_level_show(struct device *dev,
|
|
|
@@ -504,10 +588,39 @@ static ssize_t unique_id_show(struct device *dev,
|
|
|
sn[12], sn[13], sn[14], sn[15]);
|
|
|
}
|
|
|
|
|
|
+static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
|
|
|
+ struct device_attribute *attr, char *buf)
|
|
|
+{
|
|
|
+ struct ctlr_info *h;
|
|
|
+ struct scsi_device *sdev;
|
|
|
+ struct hpsa_scsi_dev_t *hdev;
|
|
|
+ unsigned long flags;
|
|
|
+ int offload_enabled;
|
|
|
+
|
|
|
+ sdev = to_scsi_device(dev);
|
|
|
+ h = sdev_to_hba(sdev);
|
|
|
+ spin_lock_irqsave(&h->lock, flags);
|
|
|
+ hdev = sdev->hostdata;
|
|
|
+ if (!hdev) {
|
|
|
+ spin_unlock_irqrestore(&h->lock, flags);
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+ offload_enabled = hdev->offload_enabled;
|
|
|
+ spin_unlock_irqrestore(&h->lock, flags);
|
|
|
+ return snprintf(buf, 20, "%d\n", offload_enabled);
|
|
|
+}
|
|
|
+
|
|
|
static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
|
|
|
static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
|
|
|
static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
|
|
|
static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
|
|
|
+static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
|
|
|
+ host_show_hp_ssd_smart_path_enabled, NULL);
|
|
|
+static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
|
|
|
+ host_show_hp_ssd_smart_path_status,
|
|
|
+ host_store_hp_ssd_smart_path_status);
|
|
|
+static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
|
|
|
+ host_store_raid_offload_debug);
|
|
|
static DEVICE_ATTR(firmware_revision, S_IRUGO,
|
|
|
host_show_firmware_revision, NULL);
|
|
|
static DEVICE_ATTR(commands_outstanding, S_IRUGO,
|
|
|
@@ -521,6 +634,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = {
|
|
|
&dev_attr_raid_level,
|
|
|
&dev_attr_lunid,
|
|
|
&dev_attr_unique_id,
|
|
|
+ &dev_attr_hp_ssd_smart_path_enabled,
|
|
|
NULL,
|
|
|
};
|
|
|
|
|
|
@@ -530,6 +644,8 @@ static struct device_attribute *hpsa_shost_attrs[] = {
|
|
|
&dev_attr_commands_outstanding,
|
|
|
&dev_attr_transport_mode,
|
|
|
&dev_attr_resettable,
|
|
|
+ &dev_attr_hp_ssd_smart_path_status,
|
|
|
+ &dev_attr_raid_offload_debug,
|
|
|
NULL,
|
|
|
};
|
|
|
|
|
|
@@ -570,6 +686,9 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
|
|
|
struct reply_pool *rq = &h->reply_queue[q];
|
|
|
unsigned long flags;
|
|
|
|
|
|
+ if (h->transMethod & CFGTBL_Trans_io_accel1)
|
|
|
+ return h->access.command_completed(h, q);
|
|
|
+
|
|
|
if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
|
|
|
return h->access.command_completed(h, q);
|
|
|
|
|
|
@@ -590,6 +709,32 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
|
|
|
return a;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * There are some special bits in the bus address of the
|
|
|
+ * command that we have to set for the controller to know
|
|
|
+ * how to process the command:
|
|
|
+ *
|
|
|
+ * Normal performant mode:
|
|
|
+ * bit 0: 1 means performant mode, 0 means simple mode.
|
|
|
+ * bits 1-3 = block fetch table entry
|
|
|
+ * bits 4-6 = command type (== 0)
|
|
|
+ *
|
|
|
+ * ioaccel1 mode:
|
|
|
+ * bit 0 = "performant mode" bit.
|
|
|
+ * bits 1-3 = block fetch table entry
|
|
|
+ * bits 4-6 = command type (== 110)
|
|
|
+ * (command type is needed because ioaccel1 mode
|
|
|
+ * commands are submitted through the same register as normal
|
|
|
+ * mode commands, so this is how the controller knows whether
|
|
|
+ * the command is normal mode or ioaccel1 mode.)
|
|
|
+ *
|
|
|
+ * ioaccel2 mode:
|
|
|
+ * bit 0 = "performant mode" bit.
|
|
|
+ * bits 1-4 = block fetch table entry (note extra bit)
|
|
|
+ * bits 4-6 = not needed, because ioaccel2 mode has
|
|
|
+ * a separate special register for submitting commands.
|
|
|
+ */
|
|
|
+
|
|
|
/* set_performant_mode: Modify the tag for cciss performant
|
|
|
* set bit 0 for pull model, bits 3-1 for block fetch
|
|
|
* register number
|
|
|
@@ -598,12 +743,47 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
|
|
|
{
|
|
|
if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
|
|
|
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
|
|
|
- if (likely(h->msix_vector))
|
|
|
+ if (likely(h->msix_vector > 0))
|
|
|
c->Header.ReplyQueue =
|
|
|
raw_smp_processor_id() % h->nreply_queues;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void set_ioaccel1_performant_mode(struct ctlr_info *h,
|
|
|
+ struct CommandList *c)
|
|
|
+{
|
|
|
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
|
|
|
+
|
|
|
+ /* Tell the controller to post the reply to the queue for this
|
|
|
+ * processor. This seems to give the best I/O throughput.
|
|
|
+ */
|
|
|
+ cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
|
|
|
+ /* Set the bits in the address sent down to include:
|
|
|
+ * - performant mode bit (bit 0)
|
|
|
+ * - pull count (bits 1-3)
|
|
|
+ * - command type (bits 4-6)
|
|
|
+ */
|
|
|
+ c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
|
|
|
+ IOACCEL1_BUSADDR_CMDTYPE;
|
|
|
+}
|
|
|
+
|
|
|
+static void set_ioaccel2_performant_mode(struct ctlr_info *h,
|
|
|
+ struct CommandList *c)
|
|
|
+{
|
|
|
+ struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
|
|
|
+
|
|
|
+ /* Tell the controller to post the reply to the queue for this
|
|
|
+ * processor. This seems to give the best I/O throughput.
|
|
|
+ */
|
|
|
+ cp->reply_queue = smp_processor_id() % h->nreply_queues;
|
|
|
+ /* Set the bits in the address sent down to include:
|
|
|
+ * - performant mode bit not used in ioaccel mode 2
|
|
|
+ * - pull count (bits 0-3)
|
|
|
+ * - command type isn't needed for ioaccel2
|
|
|
+ */
|
|
|
+ c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
|
|
|
+}
|
|
|
+
|
|
|
static int is_firmware_flash_cmd(u8 *cdb)
|
|
|
{
|
|
|
return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
|
|
|
@@ -638,7 +818,16 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
|
|
|
- set_performant_mode(h, c);
|
|
|
+ switch (c->cmd_type) {
|
|
|
+ case CMD_IOACCEL1:
|
|
|
+ set_ioaccel1_performant_mode(h, c);
|
|
|
+ break;
|
|
|
+ case CMD_IOACCEL2:
|
|
|
+ set_ioaccel2_performant_mode(h, c);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ set_performant_mode(h, c);
|
|
|
+ }
|
|
|
dial_down_lockup_detection_during_fw_flash(h, c);
|
|
|
spin_lock_irqsave(&h->lock, flags);
|
|
|
addQ(&h->reqQ, c);
|
|
|
@@ -782,6 +971,14 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
|
|
|
|
|
|
/* Raid level changed. */
|
|
|
h->dev[entry]->raid_level = new_entry->raid_level;
|
|
|
+
|
|
|
+ /* Raid offload parameters changed. */
|
|
|
+ h->dev[entry]->offload_config = new_entry->offload_config;
|
|
|
+ h->dev[entry]->offload_enabled = new_entry->offload_enabled;
|
|
|
+ h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
|
|
|
+ h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
|
|
|
+ h->dev[entry]->raid_map = new_entry->raid_map;
|
|
|
+
|
|
|
dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
|
|
|
scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
|
|
|
new_entry->target, new_entry->lun);
|
|
|
@@ -902,6 +1099,10 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
|
|
|
*/
|
|
|
if (dev1->raid_level != dev2->raid_level)
|
|
|
return 1;
|
|
|
+ if (dev1->offload_config != dev2->offload_config)
|
|
|
+ return 1;
|
|
|
+ if (dev1->offload_enabled != dev2->offload_enabled)
|
|
|
+ return 1;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
@@ -932,6 +1133,9 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
|
|
|
return DEVICE_UPDATED;
|
|
|
return DEVICE_SAME;
|
|
|
} else {
|
|
|
+ /* Keep offline devices offline */
|
|
|
+ if (needle->volume_offline)
|
|
|
+ return DEVICE_NOT_FOUND;
|
|
|
return DEVICE_CHANGED;
|
|
|
}
|
|
|
}
|
|
|
@@ -940,6 +1144,110 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
|
|
|
return DEVICE_NOT_FOUND;
|
|
|
}
|
|
|
|
|
|
+static void hpsa_monitor_offline_device(struct ctlr_info *h,
|
|
|
+ unsigned char scsi3addr[])
|
|
|
+{
|
|
|
+ struct offline_device_entry *device;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ /* Check to see if device is already on the list */
|
|
|
+ spin_lock_irqsave(&h->offline_device_lock, flags);
|
|
|
+ list_for_each_entry(device, &h->offline_device_list, offline_list) {
|
|
|
+ if (memcmp(device->scsi3addr, scsi3addr,
|
|
|
+ sizeof(device->scsi3addr)) == 0) {
|
|
|
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
|
|
|
+
|
|
|
+ /* Device is not on the list, add it. */
|
|
|
+ device = kmalloc(sizeof(*device), GFP_KERNEL);
|
|
|
+ if (!device) {
|
|
|
+ dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
|
|
|
+ spin_lock_irqsave(&h->offline_device_lock, flags);
|
|
|
+ list_add_tail(&device->offline_list, &h->offline_device_list);
|
|
|
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+/* Print a message explaining various offline volume states */
|
|
|
+static void hpsa_show_volume_status(struct ctlr_info *h,
|
|
|
+ struct hpsa_scsi_dev_t *sd)
|
|
|
+{
|
|
|
+ if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
|
|
|
+ h->scsi_host->host_no,
|
|
|
+ sd->bus, sd->target, sd->lun);
|
|
|
+ switch (sd->volume_offline) {
|
|
|
+ case HPSA_LV_OK:
|
|
|
+ break;
|
|
|
+ case HPSA_LV_UNDERGOING_ERASE:
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
|
|
|
+ h->scsi_host->host_no,
|
|
|
+ sd->bus, sd->target, sd->lun);
|
|
|
+ break;
|
|
|
+ case HPSA_LV_UNDERGOING_RPI:
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
|
|
|
+ h->scsi_host->host_no,
|
|
|
+ sd->bus, sd->target, sd->lun);
|
|
|
+ break;
|
|
|
+ case HPSA_LV_PENDING_RPI:
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
|
|
|
+ h->scsi_host->host_no,
|
|
|
+ sd->bus, sd->target, sd->lun);
|
|
|
+ break;
|
|
|
+ case HPSA_LV_ENCRYPTED_NO_KEY:
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
|
|
|
+ h->scsi_host->host_no,
|
|
|
+ sd->bus, sd->target, sd->lun);
|
|
|
+ break;
|
|
|
+ case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
|
|
|
+ h->scsi_host->host_no,
|
|
|
+ sd->bus, sd->target, sd->lun);
|
|
|
+ break;
|
|
|
+ case HPSA_LV_UNDERGOING_ENCRYPTION:
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
|
|
|
+ h->scsi_host->host_no,
|
|
|
+ sd->bus, sd->target, sd->lun);
|
|
|
+ break;
|
|
|
+ case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
|
|
|
+ h->scsi_host->host_no,
|
|
|
+ sd->bus, sd->target, sd->lun);
|
|
|
+ break;
|
|
|
+ case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
|
|
|
+ h->scsi_host->host_no,
|
|
|
+ sd->bus, sd->target, sd->lun);
|
|
|
+ break;
|
|
|
+ case HPSA_LV_PENDING_ENCRYPTION:
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
|
|
|
+ h->scsi_host->host_no,
|
|
|
+ sd->bus, sd->target, sd->lun);
|
|
|
+ break;
|
|
|
+ case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
|
|
|
+ h->scsi_host->host_no,
|
|
|
+ sd->bus, sd->target, sd->lun);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
|
|
|
struct hpsa_scsi_dev_t *sd[], int nsds)
|
|
|
{
|
|
|
@@ -1004,6 +1312,20 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
|
|
|
for (i = 0; i < nsds; i++) {
|
|
|
if (!sd[i]) /* if already added above. */
|
|
|
continue;
|
|
|
+
|
|
|
+ /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
|
|
|
+ * as the SCSI mid-layer does not handle such devices well.
|
|
|
+ * It relentlessly loops sending TUR at 3Hz, then READ(10)
|
|
|
+ * at 160Hz, and prevents the system from coming up.
|
|
|
+ */
|
|
|
+ if (sd[i]->volume_offline) {
|
|
|
+ hpsa_show_volume_status(h, sd[i]);
|
|
|
+ dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
|
|
|
+ h->scsi_host->host_no,
|
|
|
+ sd[i]->bus, sd[i]->target, sd[i]->lun);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
device_change = hpsa_scsi_find_entry(sd[i], h->dev,
|
|
|
h->ndevices, &entry);
|
|
|
if (device_change == DEVICE_NOT_FOUND) {
|
|
|
@@ -1022,6 +1344,17 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
|
|
|
}
|
|
|
spin_unlock_irqrestore(&h->devlock, flags);
|
|
|
|
|
|
+ /* Monitor devices which are in one of several NOT READY states to be
|
|
|
+ * brought online later. This must be done without holding h->devlock,
|
|
|
+ * so don't touch h->dev[]
|
|
|
+ */
|
|
|
+ for (i = 0; i < nsds; i++) {
|
|
|
+ if (!sd[i]) /* if already added above. */
|
|
|
+ continue;
|
|
|
+ if (sd[i]->volume_offline)
|
|
|
+ hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
|
|
|
+ }
|
|
|
+
|
|
|
/* Don't notify scsi mid layer of any changes the first time through
|
|
|
* (or if there are no changes) scsi_scan_host will do it later the
|
|
|
* first time through.
|
|
|
@@ -1187,11 +1520,163 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
|
|
|
pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+/* Decode the various types of errors on ioaccel2 path.
|
|
|
+ * Return 1 for any error that should generate a RAID path retry.
|
|
|
+ * Return 0 for errors that don't require a RAID path retry.
|
|
|
+ */
|
|
|
+static int handle_ioaccel_mode2_error(struct ctlr_info *h,
|
|
|
+ struct CommandList *c,
|
|
|
+ struct scsi_cmnd *cmd,
|
|
|
+ struct io_accel2_cmd *c2)
|
|
|
+{
|
|
|
+ int data_len;
|
|
|
+ int retry = 0;
|
|
|
+
|
|
|
+ switch (c2->error_data.serv_response) {
|
|
|
+ case IOACCEL2_SERV_RESPONSE_COMPLETE:
|
|
|
+ switch (c2->error_data.status) {
|
|
|
+ case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
|
|
|
+ break;
|
|
|
+ case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "%s: task complete with check condition.\n",
|
|
|
+ "HP SSD Smart Path");
|
|
|
+ if (c2->error_data.data_present !=
|
|
|
+ IOACCEL2_SENSE_DATA_PRESENT)
|
|
|
+ break;
|
|
|
+ /* copy the sense data */
|
|
|
+ data_len = c2->error_data.sense_data_len;
|
|
|
+ if (data_len > SCSI_SENSE_BUFFERSIZE)
|
|
|
+ data_len = SCSI_SENSE_BUFFERSIZE;
|
|
|
+ if (data_len > sizeof(c2->error_data.sense_data_buff))
|
|
|
+ data_len =
|
|
|
+ sizeof(c2->error_data.sense_data_buff);
|
|
|
+ memcpy(cmd->sense_buffer,
|
|
|
+ c2->error_data.sense_data_buff, data_len);
|
|
|
+ cmd->result |= SAM_STAT_CHECK_CONDITION;
|
|
|
+ retry = 1;
|
|
|
+ break;
|
|
|
+ case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "%s: task complete with BUSY status.\n",
|
|
|
+ "HP SSD Smart Path");
|
|
|
+ retry = 1;
|
|
|
+ break;
|
|
|
+ case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "%s: task complete with reservation conflict.\n",
|
|
|
+ "HP SSD Smart Path");
|
|
|
+ retry = 1;
|
|
|
+ break;
|
|
|
+ case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
|
|
|
+ /* Make scsi midlayer do unlimited retries */
|
|
|
+ cmd->result = DID_IMM_RETRY << 16;
|
|
|
+ break;
|
|
|
+ case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "%s: task complete with aborted status.\n",
|
|
|
+ "HP SSD Smart Path");
|
|
|
+ retry = 1;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "%s: task complete with unrecognized status: 0x%02x\n",
|
|
|
+ "HP SSD Smart Path", c2->error_data.status);
|
|
|
+ retry = 1;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case IOACCEL2_SERV_RESPONSE_FAILURE:
|
|
|
+ /* don't expect to get here. */
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "unexpected delivery or target failure, status = 0x%02x\n",
|
|
|
+ c2->error_data.status);
|
|
|
+ retry = 1;
|
|
|
+ break;
|
|
|
+ case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
|
|
|
+ break;
|
|
|
+ case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
|
|
|
+ break;
|
|
|
+ case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
|
|
|
+ dev_warn(&h->pdev->dev, "task management function rejected.\n");
|
|
|
+ retry = 1;
|
|
|
+ break;
|
|
|
+ case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
|
|
|
+ dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "%s: Unrecognized server response: 0x%02x\n",
|
|
|
+ "HP SSD Smart Path",
|
|
|
+ c2->error_data.serv_response);
|
|
|
+ retry = 1;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ return retry; /* retry on raid path? */
|
|
|
+}
|
|
|
+
|
|
|
+static void process_ioaccel2_completion(struct ctlr_info *h,
|
|
|
+ struct CommandList *c, struct scsi_cmnd *cmd,
|
|
|
+ struct hpsa_scsi_dev_t *dev)
|
|
|
+{
|
|
|
+ struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
|
|
|
+ int raid_retry = 0;
|
|
|
+
|
|
|
+ /* check for good status */
|
|
|
+ if (likely(c2->error_data.serv_response == 0 &&
|
|
|
+ c2->error_data.status == 0)) {
|
|
|
+ cmd_free(h, c);
|
|
|
+ cmd->scsi_done(cmd);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Any RAID offload error results in retry which will use
|
|
|
+ * the normal I/O path so the controller can handle whatever's
|
|
|
+ * wrong.
|
|
|
+ */
|
|
|
+ if (is_logical_dev_addr_mode(dev->scsi3addr) &&
|
|
|
+ c2->error_data.serv_response ==
|
|
|
+ IOACCEL2_SERV_RESPONSE_FAILURE) {
|
|
|
+ if (c2->error_data.status ==
|
|
|
+ IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "%s: Path is unavailable, retrying on standard path.\n",
|
|
|
+ "HP SSD Smart Path");
|
|
|
+ else
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "%s: Error 0x%02x, retrying on standard path.\n",
|
|
|
+ "HP SSD Smart Path", c2->error_data.status);
|
|
|
+
|
|
|
+ dev->offload_enabled = 0;
|
|
|
+ h->drv_req_rescan = 1; /* schedule controller for a rescan */
|
|
|
+ cmd->result = DID_SOFT_ERROR << 16;
|
|
|
+ cmd_free(h, c);
|
|
|
+ cmd->scsi_done(cmd);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
|
|
|
+ /* If error found, disable Smart Path, schedule a rescan,
|
|
|
+ * and force a retry on the standard path.
|
|
|
+ */
|
|
|
+ if (raid_retry) {
|
|
|
+ dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
|
|
|
+ "HP SSD Smart Path");
|
|
|
+ dev->offload_enabled = 0; /* Disable Smart Path */
|
|
|
+ h->drv_req_rescan = 1; /* schedule controller rescan */
|
|
|
+ cmd->result = DID_SOFT_ERROR << 16;
|
|
|
+ }
|
|
|
+ cmd_free(h, c);
|
|
|
+ cmd->scsi_done(cmd);
|
|
|
+}
|
|
|
+
|
|
|
static void complete_scsi_command(struct CommandList *cp)
|
|
|
{
|
|
|
struct scsi_cmnd *cmd;
|
|
|
struct ctlr_info *h;
|
|
|
struct ErrorInfo *ei;
|
|
|
+ struct hpsa_scsi_dev_t *dev;
|
|
|
|
|
|
unsigned char sense_key;
|
|
|
unsigned char asc; /* additional sense code */
|
|
|
@@ -1201,13 +1686,19 @@ static void complete_scsi_command(struct CommandList *cp)
|
|
|
ei = cp->err_info;
|
|
|
cmd = (struct scsi_cmnd *) cp->scsi_cmd;
|
|
|
h = cp->h;
|
|
|
+ dev = cmd->device->hostdata;
|
|
|
|
|
|
scsi_dma_unmap(cmd); /* undo the DMA mappings */
|
|
|
- if (cp->Header.SGTotal > h->max_cmd_sg_entries)
|
|
|
+ if ((cp->cmd_type == CMD_SCSI) &&
|
|
|
+ (cp->Header.SGTotal > h->max_cmd_sg_entries))
|
|
|
hpsa_unmap_sg_chain_block(h, cp);
|
|
|
|
|
|
cmd->result = (DID_OK << 16); /* host byte */
|
|
|
cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
|
|
|
+
|
|
|
+ if (cp->cmd_type == CMD_IOACCEL2)
|
|
|
+ return process_ioaccel2_completion(h, cp, cmd, dev);
|
|
|
+
|
|
|
cmd->result |= ei->ScsiStatus;
|
|
|
|
|
|
/* copy the sense data whether we need to or not. */
|
|
|
@@ -1227,6 +1718,32 @@ static void complete_scsi_command(struct CommandList *cp)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+ /* For I/O accelerator commands, copy over some fields to the normal
|
|
|
+ * CISS header used below for error handling.
|
|
|
+ */
|
|
|
+ if (cp->cmd_type == CMD_IOACCEL1) {
|
|
|
+ struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
|
|
|
+ cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
|
|
|
+ cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
|
|
|
+ cp->Header.Tag.lower = c->Tag.lower;
|
|
|
+ cp->Header.Tag.upper = c->Tag.upper;
|
|
|
+ memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
|
|
|
+ memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
|
|
|
+
|
|
|
+ /* Any RAID offload error results in retry which will use
|
|
|
+ * the normal I/O path so the controller can handle whatever's
|
|
|
+ * wrong.
|
|
|
+ */
|
|
|
+ if (is_logical_dev_addr_mode(dev->scsi3addr)) {
|
|
|
+ if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
|
|
|
+ dev->offload_enabled = 0;
|
|
|
+ cmd->result = DID_SOFT_ERROR << 16;
|
|
|
+ cmd_free(h, cp);
|
|
|
+ cmd->scsi_done(cmd);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/* an error has occurred */
|
|
|
switch (ei->CommandStatus) {
|
|
|
|
|
|
@@ -1389,6 +1906,14 @@ static void complete_scsi_command(struct CommandList *cp)
|
|
|
cmd->result = DID_ERROR << 16;
|
|
|
dev_warn(&h->pdev->dev, "Command unabortable\n");
|
|
|
break;
|
|
|
+ case CMD_IOACCEL_DISABLED:
|
|
|
+ /* This only handles the direct pass-through case since RAID
|
|
|
+ * offload is handled above. Just attempt a retry.
|
|
|
+ */
|
|
|
+ cmd->result = DID_SOFT_ERROR << 16;
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "cp %p had HP SSD Smart Path error\n", cp);
|
|
|
+ break;
|
|
|
default:
|
|
|
cmd->result = DID_ERROR << 16;
|
|
|
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
|
|
|
@@ -1438,6 +1963,7 @@ static int hpsa_map_one(struct pci_dev *pdev,
|
|
|
cp->SG[0].Addr.upper =
|
|
|
(u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
|
|
|
cp->SG[0].Len = buflen;
|
|
|
+ cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
|
|
|
cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
|
|
|
cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
|
|
|
return 0;
|
|
|
@@ -1490,17 +2016,37 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
|
|
|
hpsa_pci_unmap(h->pdev, c, 1, data_direction);
|
|
|
}
|
|
|
|
|
|
-static void hpsa_scsi_interpret_error(struct CommandList *cp)
|
|
|
+static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
|
|
|
+ struct CommandList *c)
|
|
|
{
|
|
|
- struct ErrorInfo *ei;
|
|
|
+ const u8 *cdb = c->Request.CDB;
|
|
|
+ const u8 *lun = c->Header.LUN.LunAddrBytes;
|
|
|
+
|
|
|
+ dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
|
|
|
+ " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
|
|
|
+ txt, lun[0], lun[1], lun[2], lun[3],
|
|
|
+ lun[4], lun[5], lun[6], lun[7],
|
|
|
+ cdb[0], cdb[1], cdb[2], cdb[3],
|
|
|
+ cdb[4], cdb[5], cdb[6], cdb[7],
|
|
|
+ cdb[8], cdb[9], cdb[10], cdb[11],
|
|
|
+ cdb[12], cdb[13], cdb[14], cdb[15]);
|
|
|
+}
|
|
|
+
|
|
|
+static void hpsa_scsi_interpret_error(struct ctlr_info *h,
|
|
|
+ struct CommandList *cp)
|
|
|
+{
|
|
|
+ const struct ErrorInfo *ei = cp->err_info;
|
|
|
struct device *d = &cp->h->pdev->dev;
|
|
|
+ const u8 *sd = ei->SenseInfo;
|
|
|
|
|
|
- ei = cp->err_info;
|
|
|
switch (ei->CommandStatus) {
|
|
|
case CMD_TARGET_STATUS:
|
|
|
- dev_warn(d, "cmd %p has completed with errors\n", cp);
|
|
|
- dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
|
|
|
- ei->ScsiStatus);
|
|
|
+ hpsa_print_cmd(h, "SCSI status", cp);
|
|
|
+ if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
|
|
|
+ dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
|
|
|
+ sd[2] & 0x0f, sd[12], sd[13]);
|
|
|
+ else
|
|
|
+ dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
|
|
|
if (ei->ScsiStatus == 0)
|
|
|
dev_warn(d, "SCSI status is abnormally zero. "
|
|
|
"(probably indicates selection timeout "
|
|
|
@@ -1508,54 +2054,51 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)
|
|
|
"firmware bug, circa July, 2001.)\n");
|
|
|
break;
|
|
|
case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
|
|
|
- dev_info(d, "UNDERRUN\n");
|
|
|
break;
|
|
|
case CMD_DATA_OVERRUN:
|
|
|
- dev_warn(d, "cp %p has completed with data overrun\n", cp);
|
|
|
+ hpsa_print_cmd(h, "overrun condition", cp);
|
|
|
break;
|
|
|
case CMD_INVALID: {
|
|
|
/* controller unfortunately reports SCSI passthru's
|
|
|
* to non-existent targets as invalid commands.
|
|
|
*/
|
|
|
- dev_warn(d, "cp %p is reported invalid (probably means "
|
|
|
- "target device no longer present)\n", cp);
|
|
|
- /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
|
|
|
- print_cmd(cp); */
|
|
|
+ hpsa_print_cmd(h, "invalid command", cp);
|
|
|
+ dev_warn(d, "probably means device no longer present\n");
|
|
|
}
|
|
|
break;
|
|
|
case CMD_PROTOCOL_ERR:
|
|
|
- dev_warn(d, "cp %p has protocol error \n", cp);
|
|
|
+ hpsa_print_cmd(h, "protocol error", cp);
|
|
|
break;
|
|
|
case CMD_HARDWARE_ERR:
|
|
|
- /* cmd->result = DID_ERROR << 16; */
|
|
|
- dev_warn(d, "cp %p had hardware error\n", cp);
|
|
|
+ hpsa_print_cmd(h, "hardware error", cp);
|
|
|
break;
|
|
|
case CMD_CONNECTION_LOST:
|
|
|
- dev_warn(d, "cp %p had connection lost\n", cp);
|
|
|
+ hpsa_print_cmd(h, "connection lost", cp);
|
|
|
break;
|
|
|
case CMD_ABORTED:
|
|
|
- dev_warn(d, "cp %p was aborted\n", cp);
|
|
|
+ hpsa_print_cmd(h, "aborted", cp);
|
|
|
break;
|
|
|
case CMD_ABORT_FAILED:
|
|
|
- dev_warn(d, "cp %p reports abort failed\n", cp);
|
|
|
+ hpsa_print_cmd(h, "abort failed", cp);
|
|
|
break;
|
|
|
case CMD_UNSOLICITED_ABORT:
|
|
|
- dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
|
|
|
+ hpsa_print_cmd(h, "unsolicited abort", cp);
|
|
|
break;
|
|
|
case CMD_TIMEOUT:
|
|
|
- dev_warn(d, "cp %p timed out\n", cp);
|
|
|
+ hpsa_print_cmd(h, "timed out", cp);
|
|
|
break;
|
|
|
case CMD_UNABORTABLE:
|
|
|
- dev_warn(d, "Command unabortable\n");
|
|
|
+ hpsa_print_cmd(h, "unabortable", cp);
|
|
|
break;
|
|
|
default:
|
|
|
- dev_warn(d, "cp %p returned unknown status %x\n", cp,
|
|
|
+ hpsa_print_cmd(h, "unknown status", cp);
|
|
|
+ dev_warn(d, "Unknown command status %x\n",
|
|
|
ei->CommandStatus);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
|
|
|
- unsigned char page, unsigned char *buf,
|
|
|
+ u16 page, unsigned char *buf,
|
|
|
unsigned char bufsize)
|
|
|
{
|
|
|
int rc = IO_OK;
|
|
|
@@ -1577,7 +2120,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
|
|
|
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
|
|
|
ei = c->err_info;
|
|
|
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
|
|
|
- hpsa_scsi_interpret_error(c);
|
|
|
+ hpsa_scsi_interpret_error(h, c);
|
|
|
rc = -1;
|
|
|
}
|
|
|
out:
|
|
|
@@ -1585,7 +2128,39 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
-static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
|
|
|
+static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
|
|
|
+ unsigned char *scsi3addr, unsigned char page,
|
|
|
+ struct bmic_controller_parameters *buf, size_t bufsize)
|
|
|
+{
|
|
|
+ int rc = IO_OK;
|
|
|
+ struct CommandList *c;
|
|
|
+ struct ErrorInfo *ei;
|
|
|
+
|
|
|
+ c = cmd_special_alloc(h);
|
|
|
+
|
|
|
+ if (c == NULL) { /* trouble... */
|
|
|
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
|
|
|
+ page, scsi3addr, TYPE_CMD)) {
|
|
|
+ rc = -1;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
|
|
|
+ ei = c->err_info;
|
|
|
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
|
|
|
+ hpsa_scsi_interpret_error(h, c);
|
|
|
+ rc = -1;
|
|
|
+ }
|
|
|
+out:
|
|
|
+ cmd_special_free(h, c);
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+
|
|
|
+static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
|
|
|
+ u8 reset_type)
|
|
|
{
|
|
|
int rc = IO_OK;
|
|
|
struct CommandList *c;
|
|
|
@@ -1599,14 +2174,15 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
|
|
|
}
|
|
|
|
|
|
/* fill_cmd can't fail here, no data buffer to map. */
|
|
|
- (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
|
|
|
- NULL, 0, 0, scsi3addr, TYPE_MSG);
|
|
|
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
|
|
|
+ scsi3addr, TYPE_MSG);
|
|
|
+ c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
|
|
|
hpsa_scsi_do_simple_cmd_core(h, c);
|
|
|
/* no unmap needed here because no data xfer. */
|
|
|
|
|
|
ei = c->err_info;
|
|
|
if (ei->CommandStatus != 0) {
|
|
|
- hpsa_scsi_interpret_error(c);
|
|
|
+ hpsa_scsi_interpret_error(h, c);
|
|
|
rc = -1;
|
|
|
}
|
|
|
cmd_special_free(h, c);
|
|
|
@@ -1623,7 +2199,7 @@ static void hpsa_get_raid_level(struct ctlr_info *h,
|
|
|
buf = kzalloc(64, GFP_KERNEL);
|
|
|
if (!buf)
|
|
|
return;
|
|
|
- rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
|
|
|
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
|
|
|
if (rc == 0)
|
|
|
*raid_level = buf[8];
|
|
|
if (*raid_level > RAID_UNKNOWN)
|
|
|
@@ -1632,30 +2208,228 @@ static void hpsa_get_raid_level(struct ctlr_info *h,
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
-/* Get the device id from inquiry page 0x83 */
|
|
|
-static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
|
|
|
- unsigned char *device_id, int buflen)
|
|
|
+#define HPSA_MAP_DEBUG
|
|
|
+#ifdef HPSA_MAP_DEBUG
|
|
|
+static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
|
|
|
+ struct raid_map_data *map_buff)
|
|
|
{
|
|
|
- int rc;
|
|
|
- unsigned char *buf;
|
|
|
+ struct raid_map_disk_data *dd = &map_buff->data[0];
|
|
|
+ int map, row, col;
|
|
|
+ u16 map_cnt, row_cnt, disks_per_row;
|
|
|
|
|
|
- if (buflen > 16)
|
|
|
- buflen = 16;
|
|
|
- buf = kzalloc(64, GFP_KERNEL);
|
|
|
- if (!buf)
|
|
|
- return -1;
|
|
|
- rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
|
|
|
- if (rc == 0)
|
|
|
- memcpy(device_id, &buf[8], buflen);
|
|
|
- kfree(buf);
|
|
|
- return rc != 0;
|
|
|
+ if (rc != 0)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Show details only if debugging has been activated. */
|
|
|
+ if (h->raid_offload_debug < 2)
|
|
|
+ return;
|
|
|
+
|
|
|
+ dev_info(&h->pdev->dev, "structure_size = %u\n",
|
|
|
+ le32_to_cpu(map_buff->structure_size));
|
|
|
+ dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
|
|
|
+ le32_to_cpu(map_buff->volume_blk_size));
|
|
|
+ dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
|
|
|
+ le64_to_cpu(map_buff->volume_blk_cnt));
|
|
|
+ dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
|
|
|
+ map_buff->phys_blk_shift);
|
|
|
+ dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
|
|
|
+ map_buff->parity_rotation_shift);
|
|
|
+ dev_info(&h->pdev->dev, "strip_size = %u\n",
|
|
|
+ le16_to_cpu(map_buff->strip_size));
|
|
|
+ dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
|
|
|
+ le64_to_cpu(map_buff->disk_starting_blk));
|
|
|
+ dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
|
|
|
+ le64_to_cpu(map_buff->disk_blk_cnt));
|
|
|
+ dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
|
|
|
+ le16_to_cpu(map_buff->data_disks_per_row));
|
|
|
+ dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
|
|
|
+ le16_to_cpu(map_buff->metadata_disks_per_row));
|
|
|
+ dev_info(&h->pdev->dev, "row_cnt = %u\n",
|
|
|
+ le16_to_cpu(map_buff->row_cnt));
|
|
|
+ dev_info(&h->pdev->dev, "layout_map_count = %u\n",
|
|
|
+ le16_to_cpu(map_buff->layout_map_count));
|
|
|
+ dev_info(&h->pdev->dev, "flags = %u\n",
|
|
|
+ le16_to_cpu(map_buff->flags));
|
|
|
+ if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
|
|
|
+ dev_info(&h->pdev->dev, "encrypytion = ON\n");
|
|
|
+ else
|
|
|
+ dev_info(&h->pdev->dev, "encrypytion = OFF\n");
|
|
|
+ dev_info(&h->pdev->dev, "dekindex = %u\n",
|
|
|
+ le16_to_cpu(map_buff->dekindex));
|
|
|
+
|
|
|
+ map_cnt = le16_to_cpu(map_buff->layout_map_count);
|
|
|
+ for (map = 0; map < map_cnt; map++) {
|
|
|
+ dev_info(&h->pdev->dev, "Map%u:\n", map);
|
|
|
+ row_cnt = le16_to_cpu(map_buff->row_cnt);
|
|
|
+ for (row = 0; row < row_cnt; row++) {
|
|
|
+ dev_info(&h->pdev->dev, " Row%u:\n", row);
|
|
|
+ disks_per_row =
|
|
|
+ le16_to_cpu(map_buff->data_disks_per_row);
|
|
|
+ for (col = 0; col < disks_per_row; col++, dd++)
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ " D%02u: h=0x%04x xor=%u,%u\n",
|
|
|
+ col, dd->ioaccel_handle,
|
|
|
+ dd->xor_mult[0], dd->xor_mult[1]);
|
|
|
+ disks_per_row =
|
|
|
+ le16_to_cpu(map_buff->metadata_disks_per_row);
|
|
|
+ for (col = 0; col < disks_per_row; col++, dd++)
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ " M%02u: h=0x%04x xor=%u,%u\n",
|
|
|
+ col, dd->ioaccel_handle,
|
|
|
+ dd->xor_mult[0], dd->xor_mult[1]);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
+#else
|
|
|
+static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
|
|
|
+ __attribute__((unused)) int rc,
|
|
|
+ __attribute__((unused)) struct raid_map_data *map_buff)
|
|
|
+{
|
|
|
+}
|
|
|
+#endif
|
|
|
|
|
|
-static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
|
|
|
- struct ReportLUNdata *buf, int bufsize,
|
|
|
- int extended_response)
|
|
|
+static int hpsa_get_raid_map(struct ctlr_info *h,
|
|
|
+ unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
|
|
|
{
|
|
|
- int rc = IO_OK;
|
|
|
+ int rc = 0;
|
|
|
+ struct CommandList *c;
|
|
|
+ struct ErrorInfo *ei;
|
|
|
+
|
|
|
+ c = cmd_special_alloc(h);
|
|
|
+ if (c == NULL) {
|
|
|
+ dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
|
|
|
+ sizeof(this_device->raid_map), 0,
|
|
|
+ scsi3addr, TYPE_CMD)) {
|
|
|
+ dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
|
|
|
+ cmd_special_free(h, c);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
|
|
|
+ ei = c->err_info;
|
|
|
+ if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
|
|
|
+ hpsa_scsi_interpret_error(h, c);
|
|
|
+ cmd_special_free(h, c);
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+ cmd_special_free(h, c);
|
|
|
+
|
|
|
+ /* @todo in the future, dynamically allocate RAID map memory */
|
|
|
+ if (le32_to_cpu(this_device->raid_map.structure_size) >
|
|
|
+ sizeof(this_device->raid_map)) {
|
|
|
+ dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
|
|
|
+ rc = -1;
|
|
|
+ }
|
|
|
+ hpsa_debug_map_buff(h, rc, &this_device->raid_map);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+static int hpsa_vpd_page_supported(struct ctlr_info *h,
|
|
|
+ unsigned char scsi3addr[], u8 page)
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+ int i;
|
|
|
+ int pages;
|
|
|
+ unsigned char *buf, bufsize;
|
|
|
+
|
|
|
+ buf = kzalloc(256, GFP_KERNEL);
|
|
|
+ if (!buf)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* Get the size of the page list first */
|
|
|
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr,
|
|
|
+ VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
|
|
|
+ buf, HPSA_VPD_HEADER_SZ);
|
|
|
+ if (rc != 0)
|
|
|
+ goto exit_unsupported;
|
|
|
+ pages = buf[3];
|
|
|
+ if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
|
|
|
+ bufsize = pages + HPSA_VPD_HEADER_SZ;
|
|
|
+ else
|
|
|
+ bufsize = 255;
|
|
|
+
|
|
|
+ /* Get the whole VPD page list */
|
|
|
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr,
|
|
|
+ VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
|
|
|
+ buf, bufsize);
|
|
|
+ if (rc != 0)
|
|
|
+ goto exit_unsupported;
|
|
|
+
|
|
|
+ pages = buf[3];
|
|
|
+ for (i = 1; i <= pages; i++)
|
|
|
+ if (buf[3 + i] == page)
|
|
|
+ goto exit_supported;
|
|
|
+exit_unsupported:
|
|
|
+ kfree(buf);
|
|
|
+ return 0;
|
|
|
+exit_supported:
|
|
|
+ kfree(buf);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static void hpsa_get_ioaccel_status(struct ctlr_info *h,
|
|
|
+ unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+ unsigned char *buf;
|
|
|
+ u8 ioaccel_status;
|
|
|
+
|
|
|
+ this_device->offload_config = 0;
|
|
|
+ this_device->offload_enabled = 0;
|
|
|
+
|
|
|
+ buf = kzalloc(64, GFP_KERNEL);
|
|
|
+ if (!buf)
|
|
|
+ return;
|
|
|
+ if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
|
|
|
+ goto out;
|
|
|
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr,
|
|
|
+ VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
|
|
|
+ if (rc != 0)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+#define IOACCEL_STATUS_BYTE 4
|
|
|
+#define OFFLOAD_CONFIGURED_BIT 0x01
|
|
|
+#define OFFLOAD_ENABLED_BIT 0x02
|
|
|
+ ioaccel_status = buf[IOACCEL_STATUS_BYTE];
|
|
|
+ this_device->offload_config =
|
|
|
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
|
|
|
+ if (this_device->offload_config) {
|
|
|
+ this_device->offload_enabled =
|
|
|
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
|
|
|
+ if (hpsa_get_raid_map(h, scsi3addr, this_device))
|
|
|
+ this_device->offload_enabled = 0;
|
|
|
+ }
|
|
|
+out:
|
|
|
+ kfree(buf);
|
|
|
+ return;
|
|
|
+}
|
|
|
+
|
|
|
+/* Get the device id from inquiry page 0x83 */
|
|
|
+static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
|
|
|
+ unsigned char *device_id, int buflen)
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+ unsigned char *buf;
|
|
|
+
|
|
|
+ if (buflen > 16)
|
|
|
+ buflen = 16;
|
|
|
+ buf = kzalloc(64, GFP_KERNEL);
|
|
|
+ if (!buf)
|
|
|
+ return -1;
|
|
|
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
|
|
|
+ if (rc == 0)
|
|
|
+ memcpy(device_id, &buf[8], buflen);
|
|
|
+ kfree(buf);
|
|
|
+ return rc != 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
|
|
|
+ struct ReportLUNdata *buf, int bufsize,
|
|
|
+ int extended_response)
|
|
|
+{
|
|
|
+ int rc = IO_OK;
|
|
|
struct CommandList *c;
|
|
|
unsigned char scsi3addr[8];
|
|
|
struct ErrorInfo *ei;
|
|
|
@@ -1678,8 +2452,16 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
|
|
|
ei = c->err_info;
|
|
|
if (ei->CommandStatus != 0 &&
|
|
|
ei->CommandStatus != CMD_DATA_UNDERRUN) {
|
|
|
- hpsa_scsi_interpret_error(c);
|
|
|
+ hpsa_scsi_interpret_error(h, c);
|
|
|
rc = -1;
|
|
|
+ } else {
|
|
|
+ if (buf->extended_response_flag != extended_response) {
|
|
|
+ dev_err(&h->pdev->dev,
|
|
|
+ "report luns requested format %u, got %u\n",
|
|
|
+ extended_response,
|
|
|
+ buf->extended_response_flag);
|
|
|
+ rc = -1;
|
|
|
+ }
|
|
|
}
|
|
|
out:
|
|
|
cmd_special_free(h, c);
|
|
|
@@ -1707,6 +2489,117 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
|
|
|
device->lun = lun;
|
|
|
}
|
|
|
|
|
|
+/* Use VPD inquiry to get details of volume status */
|
|
|
+static int hpsa_get_volume_status(struct ctlr_info *h,
|
|
|
+ unsigned char scsi3addr[])
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+ int status;
|
|
|
+ int size;
|
|
|
+ unsigned char *buf;
|
|
|
+
|
|
|
+ buf = kzalloc(64, GFP_KERNEL);
|
|
|
+ if (!buf)
|
|
|
+ return HPSA_VPD_LV_STATUS_UNSUPPORTED;
|
|
|
+
|
|
|
+ /* Does controller have VPD for logical volume status? */
|
|
|
+ if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) {
|
|
|
+ dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n");
|
|
|
+ goto exit_failed;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Get the size of the VPD return buffer */
|
|
|
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
|
|
|
+ buf, HPSA_VPD_HEADER_SZ);
|
|
|
+ if (rc != 0) {
|
|
|
+ dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
|
|
|
+ goto exit_failed;
|
|
|
+ }
|
|
|
+ size = buf[3];
|
|
|
+
|
|
|
+ /* Now get the whole VPD buffer */
|
|
|
+ rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
|
|
|
+ buf, size + HPSA_VPD_HEADER_SZ);
|
|
|
+ if (rc != 0) {
|
|
|
+ dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
|
|
|
+ goto exit_failed;
|
|
|
+ }
|
|
|
+ status = buf[4]; /* status byte */
|
|
|
+
|
|
|
+ kfree(buf);
|
|
|
+ return status;
|
|
|
+exit_failed:
|
|
|
+ kfree(buf);
|
|
|
+ return HPSA_VPD_LV_STATUS_UNSUPPORTED;
|
|
|
+}
|
|
|
+
|
|
|
+/* Determine offline status of a volume.
|
|
|
+ * Return either:
|
|
|
+ * 0 (not offline)
|
|
|
+ * -1 (offline for unknown reasons)
|
|
|
+ * # (integer code indicating one of several NOT READY states
|
|
|
+ * describing why a volume is to be kept offline)
|
|
|
+ */
|
|
|
+static unsigned char hpsa_volume_offline(struct ctlr_info *h,
|
|
|
+ unsigned char scsi3addr[])
|
|
|
+{
|
|
|
+ struct CommandList *c;
|
|
|
+ unsigned char *sense, sense_key, asc, ascq;
|
|
|
+ int ldstat = 0;
|
|
|
+ u16 cmd_status;
|
|
|
+ u8 scsi_status;
|
|
|
+#define ASC_LUN_NOT_READY 0x04
|
|
|
+#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
|
|
|
+#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
|
|
|
+
|
|
|
+ c = cmd_alloc(h);
|
|
|
+ if (!c)
|
|
|
+ return 0;
|
|
|
+ (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
|
|
|
+ hpsa_scsi_do_simple_cmd_core(h, c);
|
|
|
+ sense = c->err_info->SenseInfo;
|
|
|
+ sense_key = sense[2];
|
|
|
+ asc = sense[12];
|
|
|
+ ascq = sense[13];
|
|
|
+ cmd_status = c->err_info->CommandStatus;
|
|
|
+ scsi_status = c->err_info->ScsiStatus;
|
|
|
+ cmd_free(h, c);
|
|
|
+ /* Is the volume 'not ready'? */
|
|
|
+ if (cmd_status != CMD_TARGET_STATUS ||
|
|
|
+ scsi_status != SAM_STAT_CHECK_CONDITION ||
|
|
|
+ sense_key != NOT_READY ||
|
|
|
+ asc != ASC_LUN_NOT_READY) {
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Determine the reason for not ready state */
|
|
|
+ ldstat = hpsa_get_volume_status(h, scsi3addr);
|
|
|
+
|
|
|
+ /* Keep volume offline in certain cases: */
|
|
|
+ switch (ldstat) {
|
|
|
+ case HPSA_LV_UNDERGOING_ERASE:
|
|
|
+ case HPSA_LV_UNDERGOING_RPI:
|
|
|
+ case HPSA_LV_PENDING_RPI:
|
|
|
+ case HPSA_LV_ENCRYPTED_NO_KEY:
|
|
|
+ case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
|
|
|
+ case HPSA_LV_UNDERGOING_ENCRYPTION:
|
|
|
+ case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
|
|
|
+ case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
|
|
|
+ return ldstat;
|
|
|
+ case HPSA_VPD_LV_STATUS_UNSUPPORTED:
|
|
|
+ /* If VPD status page isn't available,
|
|
|
+ * use ASC/ASCQ to determine state
|
|
|
+ */
|
|
|
+ if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
|
|
|
+ (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
|
|
|
+ return ldstat;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int hpsa_update_device_info(struct ctlr_info *h,
|
|
|
unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
|
|
|
unsigned char *is_OBDR_device)
|
|
|
@@ -1745,10 +2638,18 @@ static int hpsa_update_device_info(struct ctlr_info *h,
|
|
|
sizeof(this_device->device_id));
|
|
|
|
|
|
if (this_device->devtype == TYPE_DISK &&
|
|
|
- is_logical_dev_addr_mode(scsi3addr))
|
|
|
+ is_logical_dev_addr_mode(scsi3addr)) {
|
|
|
hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
|
|
|
- else
|
|
|
+ if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
|
|
|
+ hpsa_get_ioaccel_status(h, scsi3addr, this_device);
|
|
|
+ this_device->volume_offline =
|
|
|
+ hpsa_volume_offline(h, scsi3addr);
|
|
|
+ } else {
|
|
|
this_device->raid_level = RAID_UNKNOWN;
|
|
|
+ this_device->offload_config = 0;
|
|
|
+ this_device->offload_enabled = 0;
|
|
|
+ this_device->volume_offline = 0;
|
|
|
+ }
|
|
|
|
|
|
if (is_OBDR_device) {
|
|
|
/* See if this is a One-Button-Disaster-Recovery device
|
|
|
@@ -1877,6 +2778,105 @@ static int add_ext_target_dev(struct ctlr_info *h,
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Get address of physical disk used for an ioaccel2 mode command:
|
|
|
+ * 1. Extract ioaccel2 handle from the command.
|
|
|
+ * 2. Find a matching ioaccel2 handle from list of physical disks.
|
|
|
+ * 3. Return:
|
|
|
+ * 1 and set scsi3addr to address of matching physical
|
|
|
+ * 0 if no matching physical disk was found.
|
|
|
+ */
|
|
|
+static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
|
|
|
+ struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
|
|
|
+{
|
|
|
+ struct ReportExtendedLUNdata *physicals = NULL;
|
|
|
+ int responsesize = 24; /* size of physical extended response */
|
|
|
+ int extended = 2; /* flag forces reporting 'other dev info'. */
|
|
|
+ int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
|
|
|
+ u32 nphysicals = 0; /* number of reported physical devs */
|
|
|
+ int found = 0; /* found match (1) or not (0) */
|
|
|
+ u32 find; /* handle we need to match */
|
|
|
+ int i;
|
|
|
+ struct scsi_cmnd *scmd; /* scsi command within request being aborted */
|
|
|
+ struct hpsa_scsi_dev_t *d; /* device of request being aborted */
|
|
|
+ struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
|
|
|
+ u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
|
|
|
+ u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
|
|
|
+
|
|
|
+ if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
|
|
|
+ return 0; /* no match */
|
|
|
+
|
|
|
+ /* point to the ioaccel2 device handle */
|
|
|
+ c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
|
|
|
+ if (c2a == NULL)
|
|
|
+ return 0; /* no match */
|
|
|
+
|
|
|
+ scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
|
|
|
+ if (scmd == NULL)
|
|
|
+ return 0; /* no match */
|
|
|
+
|
|
|
+ d = scmd->device->hostdata;
|
|
|
+ if (d == NULL)
|
|
|
+ return 0; /* no match */
|
|
|
+
|
|
|
+ it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
|
|
|
+ scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
|
|
|
+ find = c2a->scsi_nexus;
|
|
|
+
|
|
|
+ if (h->raid_offload_debug > 0)
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
|
|
|
+ __func__, scsi_nexus,
|
|
|
+ d->device_id[0], d->device_id[1], d->device_id[2],
|
|
|
+ d->device_id[3], d->device_id[4], d->device_id[5],
|
|
|
+ d->device_id[6], d->device_id[7], d->device_id[8],
|
|
|
+ d->device_id[9], d->device_id[10], d->device_id[11],
|
|
|
+ d->device_id[12], d->device_id[13], d->device_id[14],
|
|
|
+ d->device_id[15]);
|
|
|
+
|
|
|
+ /* Get the list of physical devices */
|
|
|
+ physicals = kzalloc(reportsize, GFP_KERNEL);
|
|
|
+ if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
|
|
|
+ reportsize, extended)) {
|
|
|
+ dev_err(&h->pdev->dev,
|
|
|
+ "Can't lookup %s device handle: report physical LUNs failed.\n",
|
|
|
+ "HP SSD Smart Path");
|
|
|
+ kfree(physicals);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
|
|
|
+ responsesize;
|
|
|
+
|
|
|
+
|
|
|
+ /* find ioaccel2 handle in list of physicals: */
|
|
|
+ for (i = 0; i < nphysicals; i++) {
|
|
|
+ /* handle is in bytes 28-31 of each lun */
|
|
|
+ if (memcmp(&((struct ReportExtendedLUNdata *)
|
|
|
+ physicals)->LUN[i][20], &find, 4) != 0) {
|
|
|
+ continue; /* didn't match */
|
|
|
+ }
|
|
|
+ found = 1;
|
|
|
+ memcpy(scsi3addr, &((struct ReportExtendedLUNdata *)
|
|
|
+ physicals)->LUN[i][0], 8);
|
|
|
+ if (h->raid_offload_debug > 0)
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
|
|
|
+ __func__, find,
|
|
|
+ ((struct ReportExtendedLUNdata *)
|
|
|
+ physicals)->LUN[i][20],
|
|
|
+ scsi3addr[0], scsi3addr[1], scsi3addr[2],
|
|
|
+ scsi3addr[3], scsi3addr[4], scsi3addr[5],
|
|
|
+ scsi3addr[6], scsi3addr[7]);
|
|
|
+ break; /* found it */
|
|
|
+ }
|
|
|
+
|
|
|
+ kfree(physicals);
|
|
|
+ if (found)
|
|
|
+ return 1;
|
|
|
+ else
|
|
|
+ return 0;
|
|
|
+
|
|
|
+}
|
|
|
/*
|
|
|
* Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
|
|
|
* logdev. The number of luns in physdev and logdev are returned in
|
|
|
@@ -1885,14 +2885,26 @@ static int add_ext_target_dev(struct ctlr_info *h,
|
|
|
*/
|
|
|
static int hpsa_gather_lun_info(struct ctlr_info *h,
|
|
|
int reportlunsize,
|
|
|
- struct ReportLUNdata *physdev, u32 *nphysicals,
|
|
|
+ struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
|
|
|
struct ReportLUNdata *logdev, u32 *nlogicals)
|
|
|
{
|
|
|
- if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
|
|
|
+ int physical_entry_size = 8;
|
|
|
+
|
|
|
+ *physical_mode = 0;
|
|
|
+
|
|
|
+ /* For I/O accelerator mode we need to read physical device handles */
|
|
|
+ if (h->transMethod & CFGTBL_Trans_io_accel1 ||
|
|
|
+ h->transMethod & CFGTBL_Trans_io_accel2) {
|
|
|
+ *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
|
|
|
+ physical_entry_size = 24;
|
|
|
+ }
|
|
|
+ if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize,
|
|
|
+ *physical_mode)) {
|
|
|
dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
|
|
|
return -1;
|
|
|
}
|
|
|
- *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
|
|
|
+ *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
|
|
|
+ physical_entry_size;
|
|
|
if (*nphysicals > HPSA_MAX_PHYS_LUN) {
|
|
|
dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
|
|
|
" %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
|
|
|
@@ -1923,7 +2935,8 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,
|
|
|
}
|
|
|
|
|
|
u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
|
|
|
- int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
|
|
|
+ int nphysicals, int nlogicals,
|
|
|
+ struct ReportExtendedLUNdata *physdev_list,
|
|
|
struct ReportLUNdata *logdev_list)
|
|
|
{
|
|
|
/* Helper function, figure out where the LUN ID info is coming from
|
|
|
@@ -1947,6 +2960,24 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+static int hpsa_hba_mode_enabled(struct ctlr_info *h)
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+ struct bmic_controller_parameters *ctlr_params;
|
|
|
+ ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
|
|
|
+ GFP_KERNEL);
|
|
|
+
|
|
|
+ if (!ctlr_params)
|
|
|
+ return 0;
|
|
|
+ rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
|
|
|
+ sizeof(struct bmic_controller_parameters));
|
|
|
+ if (rc != 0) {
|
|
|
+ kfree(ctlr_params);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0;
|
|
|
+}
|
|
|
+
|
|
|
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
|
|
{
|
|
|
/* the idea here is we could get notified
|
|
|
@@ -1959,16 +2990,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
|
|
* tell which devices we already know about, vs. new
|
|
|
* devices, vs. disappearing devices.
|
|
|
*/
|
|
|
- struct ReportLUNdata *physdev_list = NULL;
|
|
|
+ struct ReportExtendedLUNdata *physdev_list = NULL;
|
|
|
struct ReportLUNdata *logdev_list = NULL;
|
|
|
u32 nphysicals = 0;
|
|
|
u32 nlogicals = 0;
|
|
|
+ int physical_mode = 0;
|
|
|
u32 ndev_allocated = 0;
|
|
|
struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
|
|
|
int ncurrent = 0;
|
|
|
- int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
|
|
|
+ int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
|
|
|
int i, n_ext_target_devs, ndevs_to_allocate;
|
|
|
int raid_ctlr_position;
|
|
|
+ u8 rescan_hba_mode;
|
|
|
DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
|
|
|
|
|
|
currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
|
|
|
@@ -1982,8 +3015,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
|
|
}
|
|
|
memset(lunzerobits, 0, sizeof(lunzerobits));
|
|
|
|
|
|
- if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
|
|
|
- logdev_list, &nlogicals))
|
|
|
+ rescan_hba_mode = hpsa_hba_mode_enabled(h);
|
|
|
+
|
|
|
+ if (!h->hba_mode_enabled && rescan_hba_mode)
|
|
|
+ dev_warn(&h->pdev->dev, "HBA mode enabled\n");
|
|
|
+ else if (h->hba_mode_enabled && !rescan_hba_mode)
|
|
|
+ dev_warn(&h->pdev->dev, "HBA mode disabled\n");
|
|
|
+
|
|
|
+ h->hba_mode_enabled = rescan_hba_mode;
|
|
|
+
|
|
|
+ if (hpsa_gather_lun_info(h, reportlunsize,
|
|
|
+ (struct ReportLUNdata *) physdev_list, &nphysicals,
|
|
|
+ &physical_mode, logdev_list, &nlogicals))
|
|
|
goto out;
|
|
|
|
|
|
/* We might see up to the maximum number of logical and physical disks
|
|
|
@@ -2064,9 +3107,28 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
|
|
|
ncurrent++;
|
|
|
break;
|
|
|
case TYPE_DISK:
|
|
|
- if (i < nphysicals)
|
|
|
+ if (h->hba_mode_enabled) {
|
|
|
+ /* never use raid mapper in HBA mode */
|
|
|
+ this_device->offload_enabled = 0;
|
|
|
+ ncurrent++;
|
|
|
break;
|
|
|
- ncurrent++;
|
|
|
+ } else if (h->acciopath_status) {
|
|
|
+ if (i >= nphysicals) {
|
|
|
+ ncurrent++;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (i < nphysicals)
|
|
|
+ break;
|
|
|
+ ncurrent++;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
|
|
|
+ memcpy(&this_device->ioaccel_handle,
|
|
|
+ &lunaddrbytes[20],
|
|
|
+ sizeof(this_device->ioaccel_handle));
|
|
|
+ ncurrent++;
|
|
|
+ }
|
|
|
break;
|
|
|
case TYPE_TAPE:
|
|
|
case TYPE_MEDIUM_CHANGER:
|
|
|
@@ -2136,30 +3198,750 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
|
|
|
curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
|
|
|
curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
|
|
|
curr_sg->Len = len;
|
|
|
- curr_sg->Ext = 0; /* we are not chaining */
|
|
|
+ curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
|
|
|
curr_sg++;
|
|
|
}
|
|
|
|
|
|
- if (use_sg + chained > h->maxSG)
|
|
|
- h->maxSG = use_sg + chained;
|
|
|
+ if (use_sg + chained > h->maxSG)
|
|
|
+ h->maxSG = use_sg + chained;
|
|
|
+
|
|
|
+ if (chained) {
|
|
|
+ cp->Header.SGList = h->max_cmd_sg_entries;
|
|
|
+ cp->Header.SGTotal = (u16) (use_sg + 1);
|
|
|
+ if (hpsa_map_sg_chain_block(h, cp)) {
|
|
|
+ scsi_dma_unmap(cmd);
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+sglist_finished:
|
|
|
+
|
|
|
+ cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
|
|
|
+ cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+#define IO_ACCEL_INELIGIBLE (1)
|
|
|
+static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
|
|
|
+{
|
|
|
+ int is_write = 0;
|
|
|
+ u32 block;
|
|
|
+ u32 block_cnt;
|
|
|
+
|
|
|
+ /* Perform some CDB fixups if needed using 10 byte reads/writes only */
|
|
|
+ switch (cdb[0]) {
|
|
|
+ case WRITE_6:
|
|
|
+ case WRITE_12:
|
|
|
+ is_write = 1;
|
|
|
+ case READ_6:
|
|
|
+ case READ_12:
|
|
|
+ if (*cdb_len == 6) {
|
|
|
+ block = (((u32) cdb[2]) << 8) | cdb[3];
|
|
|
+ block_cnt = cdb[4];
|
|
|
+ } else {
|
|
|
+ BUG_ON(*cdb_len != 12);
|
|
|
+ block = (((u32) cdb[2]) << 24) |
|
|
|
+ (((u32) cdb[3]) << 16) |
|
|
|
+ (((u32) cdb[4]) << 8) |
|
|
|
+ cdb[5];
|
|
|
+ block_cnt =
|
|
|
+ (((u32) cdb[6]) << 24) |
|
|
|
+ (((u32) cdb[7]) << 16) |
|
|
|
+ (((u32) cdb[8]) << 8) |
|
|
|
+ cdb[9];
|
|
|
+ }
|
|
|
+ if (block_cnt > 0xffff)
|
|
|
+ return IO_ACCEL_INELIGIBLE;
|
|
|
+
|
|
|
+ cdb[0] = is_write ? WRITE_10 : READ_10;
|
|
|
+ cdb[1] = 0;
|
|
|
+ cdb[2] = (u8) (block >> 24);
|
|
|
+ cdb[3] = (u8) (block >> 16);
|
|
|
+ cdb[4] = (u8) (block >> 8);
|
|
|
+ cdb[5] = (u8) (block);
|
|
|
+ cdb[6] = 0;
|
|
|
+ cdb[7] = (u8) (block_cnt >> 8);
|
|
|
+ cdb[8] = (u8) (block_cnt);
|
|
|
+ cdb[9] = 0;
|
|
|
+ *cdb_len = 10;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
|
|
|
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
|
|
|
+ u8 *scsi3addr)
|
|
|
+{
|
|
|
+ struct scsi_cmnd *cmd = c->scsi_cmd;
|
|
|
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
|
|
|
+ unsigned int len;
|
|
|
+ unsigned int total_len = 0;
|
|
|
+ struct scatterlist *sg;
|
|
|
+ u64 addr64;
|
|
|
+ int use_sg, i;
|
|
|
+ struct SGDescriptor *curr_sg;
|
|
|
+ u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
|
|
|
+
|
|
|
+ /* TODO: implement chaining support */
|
|
|
+ if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
|
|
|
+ return IO_ACCEL_INELIGIBLE;
|
|
|
+
|
|
|
+ BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
|
|
|
+
|
|
|
+ if (fixup_ioaccel_cdb(cdb, &cdb_len))
|
|
|
+ return IO_ACCEL_INELIGIBLE;
|
|
|
+
|
|
|
+ c->cmd_type = CMD_IOACCEL1;
|
|
|
+
|
|
|
+ /* Adjust the DMA address to point to the accelerated command buffer */
|
|
|
+ c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
|
|
|
+ (c->cmdindex * sizeof(*cp));
|
|
|
+ BUG_ON(c->busaddr & 0x0000007F);
|
|
|
+
|
|
|
+ use_sg = scsi_dma_map(cmd);
|
|
|
+ if (use_sg < 0)
|
|
|
+ return use_sg;
|
|
|
+
|
|
|
+ if (use_sg) {
|
|
|
+ curr_sg = cp->SG;
|
|
|
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
|
|
|
+ addr64 = (u64) sg_dma_address(sg);
|
|
|
+ len = sg_dma_len(sg);
|
|
|
+ total_len += len;
|
|
|
+ curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
|
|
|
+ curr_sg->Addr.upper =
|
|
|
+ (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
|
|
|
+ curr_sg->Len = len;
|
|
|
+
|
|
|
+ if (i == (scsi_sg_count(cmd) - 1))
|
|
|
+ curr_sg->Ext = HPSA_SG_LAST;
|
|
|
+ else
|
|
|
+ curr_sg->Ext = 0; /* we are not chaining */
|
|
|
+ curr_sg++;
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (cmd->sc_data_direction) {
|
|
|
+ case DMA_TO_DEVICE:
|
|
|
+ control |= IOACCEL1_CONTROL_DATA_OUT;
|
|
|
+ break;
|
|
|
+ case DMA_FROM_DEVICE:
|
|
|
+ control |= IOACCEL1_CONTROL_DATA_IN;
|
|
|
+ break;
|
|
|
+ case DMA_NONE:
|
|
|
+ control |= IOACCEL1_CONTROL_NODATAXFER;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ dev_err(&h->pdev->dev, "unknown data direction: %d\n",
|
|
|
+ cmd->sc_data_direction);
|
|
|
+ BUG();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ control |= IOACCEL1_CONTROL_NODATAXFER;
|
|
|
+ }
|
|
|
+
|
|
|
+ c->Header.SGList = use_sg;
|
|
|
+ /* Fill out the command structure to submit */
|
|
|
+ cp->dev_handle = ioaccel_handle & 0xFFFF;
|
|
|
+ cp->transfer_len = total_len;
|
|
|
+ cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
|
|
|
+ (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
|
|
|
+ cp->control = control;
|
|
|
+ memcpy(cp->CDB, cdb, cdb_len);
|
|
|
+ memcpy(cp->CISS_LUN, scsi3addr, 8);
|
|
|
+ /* Tag was already set at init time. */
|
|
|
+ enqueue_cmd_and_start_io(h, c);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Queue a command directly to a device behind the controller using the
|
|
|
+ * I/O accelerator path.
|
|
|
+ */
|
|
|
+static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
|
|
|
+ struct CommandList *c)
|
|
|
+{
|
|
|
+ struct scsi_cmnd *cmd = c->scsi_cmd;
|
|
|
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
|
|
|
+
|
|
|
+ return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
|
|
|
+ cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Set encryption parameters for the ioaccel2 request
|
|
|
+ */
|
|
|
+static void set_encrypt_ioaccel2(struct ctlr_info *h,
|
|
|
+ struct CommandList *c, struct io_accel2_cmd *cp)
|
|
|
+{
|
|
|
+ struct scsi_cmnd *cmd = c->scsi_cmd;
|
|
|
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
|
|
|
+ struct raid_map_data *map = &dev->raid_map;
|
|
|
+ u64 first_block;
|
|
|
+
|
|
|
+ BUG_ON(!(dev->offload_config && dev->offload_enabled));
|
|
|
+
|
|
|
+ /* Are we doing encryption on this device */
|
|
|
+ if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
|
|
|
+ return;
|
|
|
+ /* Set the data encryption key index. */
|
|
|
+ cp->dekindex = map->dekindex;
|
|
|
+
|
|
|
+ /* Set the encryption enable flag, encoded into direction field. */
|
|
|
+ cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
|
|
|
+
|
|
|
+ /* Set encryption tweak values based on logical block address
|
|
|
+ * If block size is 512, tweak value is LBA.
|
|
|
+ * For other block sizes, tweak is (LBA * block size)/ 512)
|
|
|
+ */
|
|
|
+ switch (cmd->cmnd[0]) {
|
|
|
+ /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
|
|
|
+ case WRITE_6:
|
|
|
+ case READ_6:
|
|
|
+ if (map->volume_blk_size == 512) {
|
|
|
+ cp->tweak_lower =
|
|
|
+ (((u32) cmd->cmnd[2]) << 8) |
|
|
|
+ cmd->cmnd[3];
|
|
|
+ cp->tweak_upper = 0;
|
|
|
+ } else {
|
|
|
+ first_block =
|
|
|
+ (((u64) cmd->cmnd[2]) << 8) |
|
|
|
+ cmd->cmnd[3];
|
|
|
+ first_block = (first_block * map->volume_blk_size)/512;
|
|
|
+ cp->tweak_lower = (u32)first_block;
|
|
|
+ cp->tweak_upper = (u32)(first_block >> 32);
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case WRITE_10:
|
|
|
+ case READ_10:
|
|
|
+ if (map->volume_blk_size == 512) {
|
|
|
+ cp->tweak_lower =
|
|
|
+ (((u32) cmd->cmnd[2]) << 24) |
|
|
|
+ (((u32) cmd->cmnd[3]) << 16) |
|
|
|
+ (((u32) cmd->cmnd[4]) << 8) |
|
|
|
+ cmd->cmnd[5];
|
|
|
+ cp->tweak_upper = 0;
|
|
|
+ } else {
|
|
|
+ first_block =
|
|
|
+ (((u64) cmd->cmnd[2]) << 24) |
|
|
|
+ (((u64) cmd->cmnd[3]) << 16) |
|
|
|
+ (((u64) cmd->cmnd[4]) << 8) |
|
|
|
+ cmd->cmnd[5];
|
|
|
+ first_block = (first_block * map->volume_blk_size)/512;
|
|
|
+ cp->tweak_lower = (u32)first_block;
|
|
|
+ cp->tweak_upper = (u32)(first_block >> 32);
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
|
|
|
+ case WRITE_12:
|
|
|
+ case READ_12:
|
|
|
+ if (map->volume_blk_size == 512) {
|
|
|
+ cp->tweak_lower =
|
|
|
+ (((u32) cmd->cmnd[2]) << 24) |
|
|
|
+ (((u32) cmd->cmnd[3]) << 16) |
|
|
|
+ (((u32) cmd->cmnd[4]) << 8) |
|
|
|
+ cmd->cmnd[5];
|
|
|
+ cp->tweak_upper = 0;
|
|
|
+ } else {
|
|
|
+ first_block =
|
|
|
+ (((u64) cmd->cmnd[2]) << 24) |
|
|
|
+ (((u64) cmd->cmnd[3]) << 16) |
|
|
|
+ (((u64) cmd->cmnd[4]) << 8) |
|
|
|
+ cmd->cmnd[5];
|
|
|
+ first_block = (first_block * map->volume_blk_size)/512;
|
|
|
+ cp->tweak_lower = (u32)first_block;
|
|
|
+ cp->tweak_upper = (u32)(first_block >> 32);
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ case WRITE_16:
|
|
|
+ case READ_16:
|
|
|
+ if (map->volume_blk_size == 512) {
|
|
|
+ cp->tweak_lower =
|
|
|
+ (((u32) cmd->cmnd[6]) << 24) |
|
|
|
+ (((u32) cmd->cmnd[7]) << 16) |
|
|
|
+ (((u32) cmd->cmnd[8]) << 8) |
|
|
|
+ cmd->cmnd[9];
|
|
|
+ cp->tweak_upper =
|
|
|
+ (((u32) cmd->cmnd[2]) << 24) |
|
|
|
+ (((u32) cmd->cmnd[3]) << 16) |
|
|
|
+ (((u32) cmd->cmnd[4]) << 8) |
|
|
|
+ cmd->cmnd[5];
|
|
|
+ } else {
|
|
|
+ first_block =
|
|
|
+ (((u64) cmd->cmnd[2]) << 56) |
|
|
|
+ (((u64) cmd->cmnd[3]) << 48) |
|
|
|
+ (((u64) cmd->cmnd[4]) << 40) |
|
|
|
+ (((u64) cmd->cmnd[5]) << 32) |
|
|
|
+ (((u64) cmd->cmnd[6]) << 24) |
|
|
|
+ (((u64) cmd->cmnd[7]) << 16) |
|
|
|
+ (((u64) cmd->cmnd[8]) << 8) |
|
|
|
+ cmd->cmnd[9];
|
|
|
+ first_block = (first_block * map->volume_blk_size)/512;
|
|
|
+ cp->tweak_lower = (u32)first_block;
|
|
|
+ cp->tweak_upper = (u32)(first_block >> 32);
|
|
|
+ }
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ dev_err(&h->pdev->dev,
|
|
|
+ "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
|
|
|
+ __func__);
|
|
|
+ BUG();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
|
|
|
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
|
|
|
+ u8 *scsi3addr)
|
|
|
+{
|
|
|
+ struct scsi_cmnd *cmd = c->scsi_cmd;
|
|
|
+ struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
|
|
|
+ struct ioaccel2_sg_element *curr_sg;
|
|
|
+ int use_sg, i;
|
|
|
+ struct scatterlist *sg;
|
|
|
+ u64 addr64;
|
|
|
+ u32 len;
|
|
|
+ u32 total_len = 0;
|
|
|
+
|
|
|
+ if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
|
|
|
+ return IO_ACCEL_INELIGIBLE;
|
|
|
+
|
|
|
+ if (fixup_ioaccel_cdb(cdb, &cdb_len))
|
|
|
+ return IO_ACCEL_INELIGIBLE;
|
|
|
+ c->cmd_type = CMD_IOACCEL2;
|
|
|
+ /* Adjust the DMA address to point to the accelerated command buffer */
|
|
|
+ c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
|
|
|
+ (c->cmdindex * sizeof(*cp));
|
|
|
+ BUG_ON(c->busaddr & 0x0000007F);
|
|
|
+
|
|
|
+ memset(cp, 0, sizeof(*cp));
|
|
|
+ cp->IU_type = IOACCEL2_IU_TYPE;
|
|
|
+
|
|
|
+ use_sg = scsi_dma_map(cmd);
|
|
|
+ if (use_sg < 0)
|
|
|
+ return use_sg;
|
|
|
+
|
|
|
+ if (use_sg) {
|
|
|
+ BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
|
|
|
+ curr_sg = cp->sg;
|
|
|
+ scsi_for_each_sg(cmd, sg, use_sg, i) {
|
|
|
+ addr64 = (u64) sg_dma_address(sg);
|
|
|
+ len = sg_dma_len(sg);
|
|
|
+ total_len += len;
|
|
|
+ curr_sg->address = cpu_to_le64(addr64);
|
|
|
+ curr_sg->length = cpu_to_le32(len);
|
|
|
+ curr_sg->reserved[0] = 0;
|
|
|
+ curr_sg->reserved[1] = 0;
|
|
|
+ curr_sg->reserved[2] = 0;
|
|
|
+ curr_sg->chain_indicator = 0;
|
|
|
+ curr_sg++;
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (cmd->sc_data_direction) {
|
|
|
+ case DMA_TO_DEVICE:
|
|
|
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
|
|
|
+ cp->direction |= IOACCEL2_DIR_DATA_OUT;
|
|
|
+ break;
|
|
|
+ case DMA_FROM_DEVICE:
|
|
|
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
|
|
|
+ cp->direction |= IOACCEL2_DIR_DATA_IN;
|
|
|
+ break;
|
|
|
+ case DMA_NONE:
|
|
|
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
|
|
|
+ cp->direction |= IOACCEL2_DIR_NO_DATA;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ dev_err(&h->pdev->dev, "unknown data direction: %d\n",
|
|
|
+ cmd->sc_data_direction);
|
|
|
+ BUG();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ cp->direction &= ~IOACCEL2_DIRECTION_MASK;
|
|
|
+ cp->direction |= IOACCEL2_DIR_NO_DATA;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Set encryption parameters, if necessary */
|
|
|
+ set_encrypt_ioaccel2(h, c, cp);
|
|
|
+
|
|
|
+ cp->scsi_nexus = ioaccel_handle;
|
|
|
+ cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
|
|
|
+ DIRECT_LOOKUP_BIT;
|
|
|
+ memcpy(cp->cdb, cdb, sizeof(cp->cdb));
|
|
|
+
|
|
|
+ /* fill in sg elements */
|
|
|
+ cp->sg_count = (u8) use_sg;
|
|
|
+
|
|
|
+ cp->data_len = cpu_to_le32(total_len);
|
|
|
+ cp->err_ptr = cpu_to_le64(c->busaddr +
|
|
|
+ offsetof(struct io_accel2_cmd, error_data));
|
|
|
+ cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
|
|
|
+
|
|
|
+ enqueue_cmd_and_start_io(h, c);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Queue a command to the correct I/O accelerator path.
|
|
|
+ */
|
|
|
+static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
|
|
|
+ struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
|
|
|
+ u8 *scsi3addr)
|
|
|
+{
|
|
|
+ if (h->transMethod & CFGTBL_Trans_io_accel1)
|
|
|
+ return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
|
|
|
+ cdb, cdb_len, scsi3addr);
|
|
|
+ else
|
|
|
+ return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
|
|
|
+ cdb, cdb_len, scsi3addr);
|
|
|
+}
|
|
|
+
|
|
|
+static void raid_map_helper(struct raid_map_data *map,
|
|
|
+ int offload_to_mirror, u32 *map_index, u32 *current_group)
|
|
|
+{
|
|
|
+ if (offload_to_mirror == 0) {
|
|
|
+ /* use physical disk in the first mirrored group. */
|
|
|
+ *map_index %= map->data_disks_per_row;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ do {
|
|
|
+ /* determine mirror group that *map_index indicates */
|
|
|
+ *current_group = *map_index / map->data_disks_per_row;
|
|
|
+ if (offload_to_mirror == *current_group)
|
|
|
+ continue;
|
|
|
+ if (*current_group < (map->layout_map_count - 1)) {
|
|
|
+ /* select map index from next group */
|
|
|
+ *map_index += map->data_disks_per_row;
|
|
|
+ (*current_group)++;
|
|
|
+ } else {
|
|
|
+ /* select map index from first group */
|
|
|
+ *map_index %= map->data_disks_per_row;
|
|
|
+ *current_group = 0;
|
|
|
+ }
|
|
|
+ } while (offload_to_mirror != *current_group);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
|
|
|
+ */
|
|
|
+static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
|
|
|
+ struct CommandList *c)
|
|
|
+{
|
|
|
+ struct scsi_cmnd *cmd = c->scsi_cmd;
|
|
|
+ struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
|
|
|
+ struct raid_map_data *map = &dev->raid_map;
|
|
|
+ struct raid_map_disk_data *dd = &map->data[0];
|
|
|
+ int is_write = 0;
|
|
|
+ u32 map_index;
|
|
|
+ u64 first_block, last_block;
|
|
|
+ u32 block_cnt;
|
|
|
+ u32 blocks_per_row;
|
|
|
+ u64 first_row, last_row;
|
|
|
+ u32 first_row_offset, last_row_offset;
|
|
|
+ u32 first_column, last_column;
|
|
|
+ u64 r0_first_row, r0_last_row;
|
|
|
+ u32 r5or6_blocks_per_row;
|
|
|
+ u64 r5or6_first_row, r5or6_last_row;
|
|
|
+ u32 r5or6_first_row_offset, r5or6_last_row_offset;
|
|
|
+ u32 r5or6_first_column, r5or6_last_column;
|
|
|
+ u32 total_disks_per_row;
|
|
|
+ u32 stripesize;
|
|
|
+ u32 first_group, last_group, current_group;
|
|
|
+ u32 map_row;
|
|
|
+ u32 disk_handle;
|
|
|
+ u64 disk_block;
|
|
|
+ u32 disk_block_cnt;
|
|
|
+ u8 cdb[16];
|
|
|
+ u8 cdb_len;
|
|
|
+#if BITS_PER_LONG == 32
|
|
|
+ u64 tmpdiv;
|
|
|
+#endif
|
|
|
+ int offload_to_mirror;
|
|
|
+
|
|
|
+ BUG_ON(!(dev->offload_config && dev->offload_enabled));
|
|
|
+
|
|
|
+ /* check for valid opcode, get LBA and block count */
|
|
|
+ switch (cmd->cmnd[0]) {
|
|
|
+ case WRITE_6:
|
|
|
+ is_write = 1;
|
|
|
+ case READ_6:
|
|
|
+ first_block =
|
|
|
+ (((u64) cmd->cmnd[2]) << 8) |
|
|
|
+ cmd->cmnd[3];
|
|
|
+ block_cnt = cmd->cmnd[4];
|
|
|
+ break;
|
|
|
+ case WRITE_10:
|
|
|
+ is_write = 1;
|
|
|
+ case READ_10:
|
|
|
+ first_block =
|
|
|
+ (((u64) cmd->cmnd[2]) << 24) |
|
|
|
+ (((u64) cmd->cmnd[3]) << 16) |
|
|
|
+ (((u64) cmd->cmnd[4]) << 8) |
|
|
|
+ cmd->cmnd[5];
|
|
|
+ block_cnt =
|
|
|
+ (((u32) cmd->cmnd[7]) << 8) |
|
|
|
+ cmd->cmnd[8];
|
|
|
+ break;
|
|
|
+ case WRITE_12:
|
|
|
+ is_write = 1;
|
|
|
+ case READ_12:
|
|
|
+ first_block =
|
|
|
+ (((u64) cmd->cmnd[2]) << 24) |
|
|
|
+ (((u64) cmd->cmnd[3]) << 16) |
|
|
|
+ (((u64) cmd->cmnd[4]) << 8) |
|
|
|
+ cmd->cmnd[5];
|
|
|
+ block_cnt =
|
|
|
+ (((u32) cmd->cmnd[6]) << 24) |
|
|
|
+ (((u32) cmd->cmnd[7]) << 16) |
|
|
|
+ (((u32) cmd->cmnd[8]) << 8) |
|
|
|
+ cmd->cmnd[9];
|
|
|
+ break;
|
|
|
+ case WRITE_16:
|
|
|
+ is_write = 1;
|
|
|
+ case READ_16:
|
|
|
+ first_block =
|
|
|
+ (((u64) cmd->cmnd[2]) << 56) |
|
|
|
+ (((u64) cmd->cmnd[3]) << 48) |
|
|
|
+ (((u64) cmd->cmnd[4]) << 40) |
|
|
|
+ (((u64) cmd->cmnd[5]) << 32) |
|
|
|
+ (((u64) cmd->cmnd[6]) << 24) |
|
|
|
+ (((u64) cmd->cmnd[7]) << 16) |
|
|
|
+ (((u64) cmd->cmnd[8]) << 8) |
|
|
|
+ cmd->cmnd[9];
|
|
|
+ block_cnt =
|
|
|
+ (((u32) cmd->cmnd[10]) << 24) |
|
|
|
+ (((u32) cmd->cmnd[11]) << 16) |
|
|
|
+ (((u32) cmd->cmnd[12]) << 8) |
|
|
|
+ cmd->cmnd[13];
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
|
|
|
+ }
|
|
|
+ BUG_ON(block_cnt == 0);
|
|
|
+ last_block = first_block + block_cnt - 1;
|
|
|
+
|
|
|
+ /* check for write to non-RAID-0 */
|
|
|
+ if (is_write && dev->raid_level != 0)
|
|
|
+ return IO_ACCEL_INELIGIBLE;
|
|
|
+
|
|
|
+ /* check for invalid block or wraparound */
|
|
|
+ if (last_block >= map->volume_blk_cnt || last_block < first_block)
|
|
|
+ return IO_ACCEL_INELIGIBLE;
|
|
|
+
|
|
|
+ /* calculate stripe information for the request */
|
|
|
+ blocks_per_row = map->data_disks_per_row * map->strip_size;
|
|
|
+#if BITS_PER_LONG == 32
|
|
|
+ tmpdiv = first_block;
|
|
|
+ (void) do_div(tmpdiv, blocks_per_row);
|
|
|
+ first_row = tmpdiv;
|
|
|
+ tmpdiv = last_block;
|
|
|
+ (void) do_div(tmpdiv, blocks_per_row);
|
|
|
+ last_row = tmpdiv;
|
|
|
+ first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
|
|
|
+ last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
|
|
|
+ tmpdiv = first_row_offset;
|
|
|
+ (void) do_div(tmpdiv, map->strip_size);
|
|
|
+ first_column = tmpdiv;
|
|
|
+ tmpdiv = last_row_offset;
|
|
|
+ (void) do_div(tmpdiv, map->strip_size);
|
|
|
+ last_column = tmpdiv;
|
|
|
+#else
|
|
|
+ first_row = first_block / blocks_per_row;
|
|
|
+ last_row = last_block / blocks_per_row;
|
|
|
+ first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
|
|
|
+ last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
|
|
|
+ first_column = first_row_offset / map->strip_size;
|
|
|
+ last_column = last_row_offset / map->strip_size;
|
|
|
+#endif
|
|
|
|
|
|
- if (chained) {
|
|
|
- cp->Header.SGList = h->max_cmd_sg_entries;
|
|
|
- cp->Header.SGTotal = (u16) (use_sg + 1);
|
|
|
- if (hpsa_map_sg_chain_block(h, cp)) {
|
|
|
- scsi_dma_unmap(cmd);
|
|
|
- return -1;
|
|
|
- }
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ /* if this isn't a single row/column then give to the controller */
|
|
|
+ if ((first_row != last_row) || (first_column != last_column))
|
|
|
+ return IO_ACCEL_INELIGIBLE;
|
|
|
+
|
|
|
+ /* proceeding with driver mapping */
|
|
|
+ total_disks_per_row = map->data_disks_per_row +
|
|
|
+ map->metadata_disks_per_row;
|
|
|
+ map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
|
|
|
+ map->row_cnt;
|
|
|
+ map_index = (map_row * total_disks_per_row) + first_column;
|
|
|
+
|
|
|
+ switch (dev->raid_level) {
|
|
|
+ case HPSA_RAID_0:
|
|
|
+ break; /* nothing special to do */
|
|
|
+ case HPSA_RAID_1:
|
|
|
+ /* Handles load balance across RAID 1 members.
|
|
|
+ * (2-drive R1 and R10 with even # of drives.)
|
|
|
+ * Appropriate for SSDs, not optimal for HDDs
|
|
|
+ */
|
|
|
+ BUG_ON(map->layout_map_count != 2);
|
|
|
+ if (dev->offload_to_mirror)
|
|
|
+ map_index += map->data_disks_per_row;
|
|
|
+ dev->offload_to_mirror = !dev->offload_to_mirror;
|
|
|
+ break;
|
|
|
+ case HPSA_RAID_ADM:
|
|
|
+ /* Handles N-way mirrors (R1-ADM)
|
|
|
+ * and R10 with # of drives divisible by 3.)
|
|
|
+ */
|
|
|
+ BUG_ON(map->layout_map_count != 3);
|
|
|
+
|
|
|
+ offload_to_mirror = dev->offload_to_mirror;
|
|
|
+ raid_map_helper(map, offload_to_mirror,
|
|
|
+ &map_index, ¤t_group);
|
|
|
+ /* set mirror group to use next time */
|
|
|
+ offload_to_mirror =
|
|
|
+ (offload_to_mirror >= map->layout_map_count - 1)
|
|
|
+ ? 0 : offload_to_mirror + 1;
|
|
|
+ /* FIXME: remove after debug/dev */
|
|
|
+ BUG_ON(offload_to_mirror >= map->layout_map_count);
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "DEBUG: Using physical disk map index %d from mirror group %d\n",
|
|
|
+ map_index, offload_to_mirror);
|
|
|
+ dev->offload_to_mirror = offload_to_mirror;
|
|
|
+ /* Avoid direct use of dev->offload_to_mirror within this
|
|
|
+ * function since multiple threads might simultaneously
|
|
|
+ * increment it beyond the range of dev->layout_map_count -1.
|
|
|
+ */
|
|
|
+ break;
|
|
|
+ case HPSA_RAID_5:
|
|
|
+ case HPSA_RAID_6:
|
|
|
+ if (map->layout_map_count <= 1)
|
|
|
+ break;
|
|
|
|
|
|
-sglist_finished:
|
|
|
+ /* Verify first and last block are in same RAID group */
|
|
|
+ r5or6_blocks_per_row =
|
|
|
+ map->strip_size * map->data_disks_per_row;
|
|
|
+ BUG_ON(r5or6_blocks_per_row == 0);
|
|
|
+ stripesize = r5or6_blocks_per_row * map->layout_map_count;
|
|
|
+#if BITS_PER_LONG == 32
|
|
|
+ tmpdiv = first_block;
|
|
|
+ first_group = do_div(tmpdiv, stripesize);
|
|
|
+ tmpdiv = first_group;
|
|
|
+ (void) do_div(tmpdiv, r5or6_blocks_per_row);
|
|
|
+ first_group = tmpdiv;
|
|
|
+ tmpdiv = last_block;
|
|
|
+ last_group = do_div(tmpdiv, stripesize);
|
|
|
+ tmpdiv = last_group;
|
|
|
+ (void) do_div(tmpdiv, r5or6_blocks_per_row);
|
|
|
+ last_group = tmpdiv;
|
|
|
+#else
|
|
|
+ first_group = (first_block % stripesize) / r5or6_blocks_per_row;
|
|
|
+ last_group = (last_block % stripesize) / r5or6_blocks_per_row;
|
|
|
+#endif
|
|
|
+ if (first_group != last_group)
|
|
|
+ return IO_ACCEL_INELIGIBLE;
|
|
|
+
|
|
|
+ /* Verify request is in a single row of RAID 5/6 */
|
|
|
+#if BITS_PER_LONG == 32
|
|
|
+ tmpdiv = first_block;
|
|
|
+ (void) do_div(tmpdiv, stripesize);
|
|
|
+ first_row = r5or6_first_row = r0_first_row = tmpdiv;
|
|
|
+ tmpdiv = last_block;
|
|
|
+ (void) do_div(tmpdiv, stripesize);
|
|
|
+ r5or6_last_row = r0_last_row = tmpdiv;
|
|
|
+#else
|
|
|
+ first_row = r5or6_first_row = r0_first_row =
|
|
|
+ first_block / stripesize;
|
|
|
+ r5or6_last_row = r0_last_row = last_block / stripesize;
|
|
|
+#endif
|
|
|
+ if (r5or6_first_row != r5or6_last_row)
|
|
|
+ return IO_ACCEL_INELIGIBLE;
|
|
|
+
|
|
|
+
|
|
|
+ /* Verify request is in a single column */
|
|
|
+#if BITS_PER_LONG == 32
|
|
|
+ tmpdiv = first_block;
|
|
|
+ first_row_offset = do_div(tmpdiv, stripesize);
|
|
|
+ tmpdiv = first_row_offset;
|
|
|
+ first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
|
|
|
+ r5or6_first_row_offset = first_row_offset;
|
|
|
+ tmpdiv = last_block;
|
|
|
+ r5or6_last_row_offset = do_div(tmpdiv, stripesize);
|
|
|
+ tmpdiv = r5or6_last_row_offset;
|
|
|
+ r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
|
|
|
+ tmpdiv = r5or6_first_row_offset;
|
|
|
+ (void) do_div(tmpdiv, map->strip_size);
|
|
|
+ first_column = r5or6_first_column = tmpdiv;
|
|
|
+ tmpdiv = r5or6_last_row_offset;
|
|
|
+ (void) do_div(tmpdiv, map->strip_size);
|
|
|
+ r5or6_last_column = tmpdiv;
|
|
|
+#else
|
|
|
+ first_row_offset = r5or6_first_row_offset =
|
|
|
+ (u32)((first_block % stripesize) %
|
|
|
+ r5or6_blocks_per_row);
|
|
|
+
|
|
|
+ r5or6_last_row_offset =
|
|
|
+ (u32)((last_block % stripesize) %
|
|
|
+ r5or6_blocks_per_row);
|
|
|
+
|
|
|
+ first_column = r5or6_first_column =
|
|
|
+ r5or6_first_row_offset / map->strip_size;
|
|
|
+ r5or6_last_column =
|
|
|
+ r5or6_last_row_offset / map->strip_size;
|
|
|
+#endif
|
|
|
+ if (r5or6_first_column != r5or6_last_column)
|
|
|
+ return IO_ACCEL_INELIGIBLE;
|
|
|
|
|
|
- cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
|
|
|
- cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
|
|
|
- return 0;
|
|
|
-}
|
|
|
+ /* Request is eligible */
|
|
|
+ map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
|
|
|
+ map->row_cnt;
|
|
|
|
|
|
+ map_index = (first_group *
|
|
|
+ (map->row_cnt * total_disks_per_row)) +
|
|
|
+ (map_row * total_disks_per_row) + first_column;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return IO_ACCEL_INELIGIBLE;
|
|
|
+ }
|
|
|
+
|
|
|
+ disk_handle = dd[map_index].ioaccel_handle;
|
|
|
+ disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
|
|
|
+ (first_row_offset - (first_column * map->strip_size));
|
|
|
+ disk_block_cnt = block_cnt;
|
|
|
+
|
|
|
+ /* handle differing logical/physical block sizes */
|
|
|
+ if (map->phys_blk_shift) {
|
|
|
+ disk_block <<= map->phys_blk_shift;
|
|
|
+ disk_block_cnt <<= map->phys_blk_shift;
|
|
|
+ }
|
|
|
+ BUG_ON(disk_block_cnt > 0xffff);
|
|
|
+
|
|
|
+ /* build the new CDB for the physical disk I/O */
|
|
|
+ if (disk_block > 0xffffffff) {
|
|
|
+ cdb[0] = is_write ? WRITE_16 : READ_16;
|
|
|
+ cdb[1] = 0;
|
|
|
+ cdb[2] = (u8) (disk_block >> 56);
|
|
|
+ cdb[3] = (u8) (disk_block >> 48);
|
|
|
+ cdb[4] = (u8) (disk_block >> 40);
|
|
|
+ cdb[5] = (u8) (disk_block >> 32);
|
|
|
+ cdb[6] = (u8) (disk_block >> 24);
|
|
|
+ cdb[7] = (u8) (disk_block >> 16);
|
|
|
+ cdb[8] = (u8) (disk_block >> 8);
|
|
|
+ cdb[9] = (u8) (disk_block);
|
|
|
+ cdb[10] = (u8) (disk_block_cnt >> 24);
|
|
|
+ cdb[11] = (u8) (disk_block_cnt >> 16);
|
|
|
+ cdb[12] = (u8) (disk_block_cnt >> 8);
|
|
|
+ cdb[13] = (u8) (disk_block_cnt);
|
|
|
+ cdb[14] = 0;
|
|
|
+ cdb[15] = 0;
|
|
|
+ cdb_len = 16;
|
|
|
+ } else {
|
|
|
+ cdb[0] = is_write ? WRITE_10 : READ_10;
|
|
|
+ cdb[1] = 0;
|
|
|
+ cdb[2] = (u8) (disk_block >> 24);
|
|
|
+ cdb[3] = (u8) (disk_block >> 16);
|
|
|
+ cdb[4] = (u8) (disk_block >> 8);
|
|
|
+ cdb[5] = (u8) (disk_block);
|
|
|
+ cdb[6] = 0;
|
|
|
+ cdb[7] = (u8) (disk_block_cnt >> 8);
|
|
|
+ cdb[8] = (u8) (disk_block_cnt);
|
|
|
+ cdb[9] = 0;
|
|
|
+ cdb_len = 10;
|
|
|
+ }
|
|
|
+ return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
|
|
|
+ dev->scsi3addr);
|
|
|
+}
|
|
|
|
|
|
static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
|
|
|
void (*done)(struct scsi_cmnd *))
|
|
|
@@ -2169,6 +3951,7 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
|
|
|
unsigned char scsi3addr[8];
|
|
|
struct CommandList *c;
|
|
|
unsigned long flags;
|
|
|
+ int rc = 0;
|
|
|
|
|
|
/* Get the ptr to our adapter structure out of cmd->host. */
|
|
|
h = sdev_to_hba(cmd->device);
|
|
|
@@ -2203,6 +3986,32 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
|
|
|
|
|
|
c->cmd_type = CMD_SCSI;
|
|
|
c->scsi_cmd = cmd;
|
|
|
+
|
|
|
+ /* Call alternate submit routine for I/O accelerated commands.
|
|
|
+ * Retries always go down the normal I/O path.
|
|
|
+ */
|
|
|
+ if (likely(cmd->retries == 0 &&
|
|
|
+ cmd->request->cmd_type == REQ_TYPE_FS &&
|
|
|
+ h->acciopath_status)) {
|
|
|
+ if (dev->offload_enabled) {
|
|
|
+ rc = hpsa_scsi_ioaccel_raid_map(h, c);
|
|
|
+ if (rc == 0)
|
|
|
+ return 0; /* Sent on ioaccel path */
|
|
|
+ if (rc < 0) { /* scsi_dma_map failed. */
|
|
|
+ cmd_free(h, c);
|
|
|
+ return SCSI_MLQUEUE_HOST_BUSY;
|
|
|
+ }
|
|
|
+ } else if (dev->ioaccel_handle) {
|
|
|
+ rc = hpsa_scsi_ioaccel_direct_map(h, c);
|
|
|
+ if (rc == 0)
|
|
|
+ return 0; /* Sent on direct map path */
|
|
|
+ if (rc < 0) { /* scsi_dma_map failed. */
|
|
|
+ cmd_free(h, c);
|
|
|
+ return SCSI_MLQUEUE_HOST_BUSY;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
c->Header.ReplyQueue = 0; /* unused in simple mode */
|
|
|
memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
|
|
|
c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
|
|
|
@@ -2262,11 +4071,38 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
|
|
|
|
|
|
static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
|
|
|
|
|
|
+static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Don't let rescans be initiated on a controller known
|
|
|
+ * to be locked up. If the controller locks up *during*
|
|
|
+ * a rescan, that thread is probably hosed, but at least
|
|
|
+ * we can prevent new rescan threads from piling up on a
|
|
|
+ * locked up controller.
|
|
|
+ */
|
|
|
+ spin_lock_irqsave(&h->lock, flags);
|
|
|
+ if (unlikely(h->lockup_detected)) {
|
|
|
+ spin_unlock_irqrestore(&h->lock, flags);
|
|
|
+ spin_lock_irqsave(&h->scan_lock, flags);
|
|
|
+ h->scan_finished = 1;
|
|
|
+ wake_up_all(&h->scan_wait_queue);
|
|
|
+ spin_unlock_irqrestore(&h->scan_lock, flags);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&h->lock, flags);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static void hpsa_scan_start(struct Scsi_Host *sh)
|
|
|
{
|
|
|
struct ctlr_info *h = shost_to_hba(sh);
|
|
|
unsigned long flags;
|
|
|
|
|
|
+ if (do_not_scan_if_controller_locked_up(h))
|
|
|
+ return;
|
|
|
+
|
|
|
/* wait until any scan already in progress is finished. */
|
|
|
while (1) {
|
|
|
spin_lock_irqsave(&h->scan_lock, flags);
|
|
|
@@ -2283,6 +4119,9 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
|
|
|
h->scan_finished = 0; /* mark scan as in progress */
|
|
|
spin_unlock_irqrestore(&h->scan_lock, flags);
|
|
|
|
|
|
+ if (do_not_scan_if_controller_locked_up(h))
|
|
|
+ return;
|
|
|
+
|
|
|
hpsa_update_scsi_devices(h, h->scsi_host->host_no);
|
|
|
|
|
|
spin_lock_irqsave(&h->scan_lock, flags);
|
|
|
@@ -2346,7 +4185,10 @@ static int hpsa_register_scsi(struct ctlr_info *h)
|
|
|
sh->max_lun = HPSA_MAX_LUN;
|
|
|
sh->max_id = HPSA_MAX_LUN;
|
|
|
sh->can_queue = h->nr_cmds;
|
|
|
- sh->cmd_per_lun = h->nr_cmds;
|
|
|
+ if (h->hba_mode_enabled)
|
|
|
+ sh->cmd_per_lun = 7;
|
|
|
+ else
|
|
|
+ sh->cmd_per_lun = h->nr_cmds;
|
|
|
sh->sg_tablesize = h->maxsgentries;
|
|
|
h->scsi_host = sh;
|
|
|
sh->hostdata[0] = (unsigned long) h;
|
|
|
@@ -2372,7 +4214,7 @@ static int hpsa_register_scsi(struct ctlr_info *h)
|
|
|
static int wait_for_device_to_become_ready(struct ctlr_info *h,
|
|
|
unsigned char lunaddr[])
|
|
|
{
|
|
|
- int rc = 0;
|
|
|
+ int rc;
|
|
|
int count = 0;
|
|
|
int waittime = 1; /* seconds */
|
|
|
struct CommandList *c;
|
|
|
@@ -2392,6 +4234,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
|
|
|
*/
|
|
|
msleep(1000 * waittime);
|
|
|
count++;
|
|
|
+ rc = 0; /* Device ready. */
|
|
|
|
|
|
/* Increase wait time with each try, up to a point. */
|
|
|
if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
|
|
|
@@ -2448,7 +4291,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
|
|
|
dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
|
|
|
h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
|
|
|
/* send a reset to the SCSI LUN which the command was sent to */
|
|
|
- rc = hpsa_send_reset(h, dev->scsi3addr);
|
|
|
+ rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
|
|
|
if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
|
|
|
return SUCCESS;
|
|
|
|
|
|
@@ -2471,12 +4314,36 @@ static void swizzle_abort_tag(u8 *tag)
|
|
|
tag[7] = original_tag[4];
|
|
|
}
|
|
|
|
|
|
+static void hpsa_get_tag(struct ctlr_info *h,
|
|
|
+ struct CommandList *c, u32 *taglower, u32 *tagupper)
|
|
|
+{
|
|
|
+ if (c->cmd_type == CMD_IOACCEL1) {
|
|
|
+ struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
|
|
|
+ &h->ioaccel_cmd_pool[c->cmdindex];
|
|
|
+ *tagupper = cm1->Tag.upper;
|
|
|
+ *taglower = cm1->Tag.lower;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ if (c->cmd_type == CMD_IOACCEL2) {
|
|
|
+ struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
|
|
|
+ &h->ioaccel2_cmd_pool[c->cmdindex];
|
|
|
+ /* upper tag not used in ioaccel2 mode */
|
|
|
+ memset(tagupper, 0, sizeof(*tagupper));
|
|
|
+ *taglower = cm2->Tag;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ *tagupper = c->Header.Tag.upper;
|
|
|
+ *taglower = c->Header.Tag.lower;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
|
|
|
struct CommandList *abort, int swizzle)
|
|
|
{
|
|
|
int rc = IO_OK;
|
|
|
struct CommandList *c;
|
|
|
struct ErrorInfo *ei;
|
|
|
+ u32 tagupper, taglower;
|
|
|
|
|
|
c = cmd_special_alloc(h);
|
|
|
if (c == NULL) { /* trouble... */
|
|
|
@@ -2490,8 +4357,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
|
|
|
if (swizzle)
|
|
|
swizzle_abort_tag(&c->Request.CDB[4]);
|
|
|
hpsa_scsi_do_simple_cmd_core(h, c);
|
|
|
+ hpsa_get_tag(h, abort, &taglower, &tagupper);
|
|
|
dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
|
|
|
- __func__, abort->Header.Tag.upper, abort->Header.Tag.lower);
|
|
|
+ __func__, tagupper, taglower);
|
|
|
/* no unmap needed here because no data xfer. */
|
|
|
|
|
|
ei = c->err_info;
|
|
|
@@ -2503,15 +4371,14 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
|
|
|
break;
|
|
|
default:
|
|
|
dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
|
|
|
- __func__, abort->Header.Tag.upper,
|
|
|
- abort->Header.Tag.lower);
|
|
|
- hpsa_scsi_interpret_error(c);
|
|
|
+ __func__, tagupper, taglower);
|
|
|
+ hpsa_scsi_interpret_error(h, c);
|
|
|
rc = -1;
|
|
|
break;
|
|
|
}
|
|
|
cmd_special_free(h, c);
|
|
|
- dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
|
|
|
- abort->Header.Tag.upper, abort->Header.Tag.lower);
|
|
|
+ dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
|
|
|
+ __func__, tagupper, taglower);
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
@@ -2565,6 +4432,83 @@ static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+/* ioaccel2 path firmware cannot handle abort task requests.
|
|
|
+ * Change abort requests to physical target reset, and send to the
|
|
|
+ * address of the physical disk used for the ioaccel 2 command.
|
|
|
+ * Return 0 on success (IO_OK)
|
|
|
+ * -1 on failure
|
|
|
+ */
|
|
|
+
|
|
|
+static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
|
|
|
+ unsigned char *scsi3addr, struct CommandList *abort)
|
|
|
+{
|
|
|
+ int rc = IO_OK;
|
|
|
+ struct scsi_cmnd *scmd; /* scsi command within request being aborted */
|
|
|
+ struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
|
|
|
+ unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
|
|
|
+ unsigned char *psa = &phys_scsi3addr[0];
|
|
|
+
|
|
|
+ /* Get a pointer to the hpsa logical device. */
|
|
|
+ scmd = (struct scsi_cmnd *) abort->scsi_cmd;
|
|
|
+ dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
|
|
|
+ if (dev == NULL) {
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "Cannot abort: no device pointer for command.\n");
|
|
|
+ return -1; /* not abortable */
|
|
|
+ }
|
|
|
+
|
|
|
+ if (h->raid_offload_debug > 0)
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
|
|
|
+ h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
|
|
|
+ scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
|
|
|
+ scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
|
|
|
+
|
|
|
+ if (!dev->offload_enabled) {
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
|
|
|
+ return -1; /* not abortable */
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Incoming scsi3addr is logical addr. We need physical disk addr. */
|
|
|
+ if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
|
|
|
+ dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
|
|
|
+ return -1; /* not abortable */
|
|
|
+ }
|
|
|
+
|
|
|
+ /* send the reset */
|
|
|
+ if (h->raid_offload_debug > 0)
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
|
|
|
+ psa[0], psa[1], psa[2], psa[3],
|
|
|
+ psa[4], psa[5], psa[6], psa[7]);
|
|
|
+ rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
|
|
|
+ if (rc != 0) {
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
|
|
|
+ psa[0], psa[1], psa[2], psa[3],
|
|
|
+ psa[4], psa[5], psa[6], psa[7]);
|
|
|
+ return rc; /* failed to reset */
|
|
|
+ }
|
|
|
+
|
|
|
+ /* wait for device to recover */
|
|
|
+ if (wait_for_device_to_become_ready(h, psa) != 0) {
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
|
|
|
+ psa[0], psa[1], psa[2], psa[3],
|
|
|
+ psa[4], psa[5], psa[6], psa[7]);
|
|
|
+ return -1; /* failed to recover */
|
|
|
+ }
|
|
|
+
|
|
|
+ /* device recovered */
|
|
|
+ dev_info(&h->pdev->dev,
|
|
|
+ "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
|
|
|
+ psa[0], psa[1], psa[2], psa[3],
|
|
|
+ psa[4], psa[5], psa[6], psa[7]);
|
|
|
+
|
|
|
+ return rc; /* success */
|
|
|
+}
|
|
|
+
|
|
|
/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
|
|
|
* tell which kind we're dealing with, so we send the abort both ways. There
|
|
|
* shouldn't be any collisions between swizzled and unswizzled tags due to the
|
|
|
@@ -2578,6 +4522,14 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h,
|
|
|
struct CommandList *c;
|
|
|
int rc = 0, rc2 = 0;
|
|
|
|
|
|
+ /* ioccelerator mode 2 commands should be aborted via the
|
|
|
+ * accelerated path, since RAID path is unaware of these commands,
|
|
|
+ * but underlying firmware can't handle abort TMF.
|
|
|
+ * Change abort to physical device reset.
|
|
|
+ */
|
|
|
+ if (abort->cmd_type == CMD_IOACCEL2)
|
|
|
+ return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
|
|
|
+
|
|
|
/* we do not expect to find the swizzled tag in our queue, but
|
|
|
* check anyway just to be sure the assumptions which make this
|
|
|
* the case haven't become wrong.
|
|
|
@@ -2616,6 +4568,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
|
|
|
struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
|
|
|
char msg[256]; /* For debug messaging. */
|
|
|
int ml = 0;
|
|
|
+ u32 tagupper, taglower;
|
|
|
|
|
|
/* Find the controller of the command to be aborted */
|
|
|
h = sdev_to_hba(sc->device);
|
|
|
@@ -2648,9 +4601,8 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
|
|
|
msg);
|
|
|
return FAILED;
|
|
|
}
|
|
|
-
|
|
|
- ml += sprintf(msg+ml, "Tag:0x%08x:%08x ",
|
|
|
- abort->Header.Tag.upper, abort->Header.Tag.lower);
|
|
|
+ hpsa_get_tag(h, abort, &taglower, &tagupper);
|
|
|
+ ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
|
|
|
as = (struct scsi_cmnd *) abort->scsi_cmd;
|
|
|
if (as != NULL)
|
|
|
ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
|
|
|
@@ -2776,6 +4728,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
|
|
|
return NULL;
|
|
|
memset(c, 0, sizeof(*c));
|
|
|
|
|
|
+ c->cmd_type = CMD_SCSI;
|
|
|
c->cmdindex = -1;
|
|
|
|
|
|
c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
|
|
|
@@ -3038,7 +4991,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|
|
c->SG[0].Addr.lower = temp64.val32.lower;
|
|
|
c->SG[0].Addr.upper = temp64.val32.upper;
|
|
|
c->SG[0].Len = iocommand.buf_size;
|
|
|
- c->SG[0].Ext = 0; /* we are not chaining*/
|
|
|
+ c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
|
|
|
}
|
|
|
hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
|
|
|
if (iocommand.buf_size > 0)
|
|
|
@@ -3168,8 +5121,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
|
|
|
c->SG[i].Addr.lower = temp64.val32.lower;
|
|
|
c->SG[i].Addr.upper = temp64.val32.upper;
|
|
|
c->SG[i].Len = buff_size[i];
|
|
|
- /* we are not chaining */
|
|
|
- c->SG[i].Ext = 0;
|
|
|
+ c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
|
|
|
}
|
|
|
}
|
|
|
hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
|
|
|
@@ -3304,7 +5256,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
|
|
|
}
|
|
|
|
|
|
static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
|
|
|
- void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
|
|
|
+ void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
|
|
|
int cmd_type)
|
|
|
{
|
|
|
int pci_dir = XFER_NONE;
|
|
|
@@ -3327,9 +5279,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
|
|
|
switch (cmd) {
|
|
|
case HPSA_INQUIRY:
|
|
|
/* are we trying to read a vital product page */
|
|
|
- if (page_code != 0) {
|
|
|
+ if (page_code & VPD_PAGE) {
|
|
|
c->Request.CDB[1] = 0x01;
|
|
|
- c->Request.CDB[2] = page_code;
|
|
|
+ c->Request.CDB[2] = (page_code & 0xff);
|
|
|
}
|
|
|
c->Request.CDBLen = 6;
|
|
|
c->Request.Type.Attribute = ATTR_SIMPLE;
|
|
|
@@ -3369,6 +5321,28 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
|
|
|
c->Request.Type.Direction = XFER_NONE;
|
|
|
c->Request.Timeout = 0;
|
|
|
break;
|
|
|
+ case HPSA_GET_RAID_MAP:
|
|
|
+ c->Request.CDBLen = 12;
|
|
|
+ c->Request.Type.Attribute = ATTR_SIMPLE;
|
|
|
+ c->Request.Type.Direction = XFER_READ;
|
|
|
+ c->Request.Timeout = 0;
|
|
|
+ c->Request.CDB[0] = HPSA_CISS_READ;
|
|
|
+ c->Request.CDB[1] = cmd;
|
|
|
+ c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
|
|
|
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
|
|
|
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
|
|
|
+ c->Request.CDB[9] = size & 0xFF;
|
|
|
+ break;
|
|
|
+ case BMIC_SENSE_CONTROLLER_PARAMETERS:
|
|
|
+ c->Request.CDBLen = 10;
|
|
|
+ c->Request.Type.Attribute = ATTR_SIMPLE;
|
|
|
+ c->Request.Type.Direction = XFER_READ;
|
|
|
+ c->Request.Timeout = 0;
|
|
|
+ c->Request.CDB[0] = BMIC_READ;
|
|
|
+ c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
|
|
|
+ c->Request.CDB[7] = (size >> 16) & 0xFF;
|
|
|
+ c->Request.CDB[8] = (size >> 8) & 0xFF;
|
|
|
+ break;
|
|
|
default:
|
|
|
dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
|
|
|
BUG();
|
|
|
@@ -3562,7 +5536,8 @@ static inline void finish_cmd(struct CommandList *c)
|
|
|
spin_unlock_irqrestore(&h->lock, flags);
|
|
|
|
|
|
dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
|
|
|
- if (likely(c->cmd_type == CMD_SCSI))
|
|
|
+ if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
|
|
|
+ || c->cmd_type == CMD_IOACCEL2))
|
|
|
complete_scsi_command(c);
|
|
|
else if (c->cmd_type == CMD_IOCTL_PEND)
|
|
|
complete(c->waiting);
|
|
|
@@ -4169,21 +6144,24 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
|
|
|
goto default_int_mode;
|
|
|
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
|
|
|
dev_info(&h->pdev->dev, "MSIX\n");
|
|
|
+ h->msix_vector = MAX_REPLY_QUEUES;
|
|
|
err = pci_enable_msix(h->pdev, hpsa_msix_entries,
|
|
|
- MAX_REPLY_QUEUES);
|
|
|
- if (!err) {
|
|
|
- for (i = 0; i < MAX_REPLY_QUEUES; i++)
|
|
|
- h->intr[i] = hpsa_msix_entries[i].vector;
|
|
|
- h->msix_vector = 1;
|
|
|
- return;
|
|
|
- }
|
|
|
+ h->msix_vector);
|
|
|
if (err > 0) {
|
|
|
dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
|
|
|
"available\n", err);
|
|
|
- goto default_int_mode;
|
|
|
+ h->msix_vector = err;
|
|
|
+ err = pci_enable_msix(h->pdev, hpsa_msix_entries,
|
|
|
+ h->msix_vector);
|
|
|
+ }
|
|
|
+ if (!err) {
|
|
|
+ for (i = 0; i < h->msix_vector; i++)
|
|
|
+ h->intr[i] = hpsa_msix_entries[i].vector;
|
|
|
+ return;
|
|
|
} else {
|
|
|
dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
|
|
|
err);
|
|
|
+ h->msix_vector = 0;
|
|
|
goto default_int_mode;
|
|
|
}
|
|
|
}
|
|
|
@@ -4336,6 +6314,7 @@ static void hpsa_find_board_params(struct ctlr_info *h)
|
|
|
hpsa_get_max_perf_mode_cmds(h);
|
|
|
h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
|
|
|
h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
|
|
|
+ h->fw_support = readl(&(h->cfgtable->misc_fw_support));
|
|
|
/*
|
|
|
* Limit in-command s/g elements to 32 save dma'able memory.
|
|
|
* Howvever spec says if 0, use 31
|
|
|
@@ -4352,6 +6331,10 @@ static void hpsa_find_board_params(struct ctlr_info *h)
|
|
|
|
|
|
/* Find out what task management functions are supported and cache */
|
|
|
h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
|
|
|
+ if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
|
|
|
+ dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
|
|
|
+ if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
|
|
|
+ dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
|
|
|
}
|
|
|
|
|
|
static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
|
|
|
@@ -4390,6 +6373,23 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
|
|
|
writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
|
|
|
}
|
|
|
|
|
|
+static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ u32 doorbell_value;
|
|
|
+ unsigned long flags;
|
|
|
+ /* wait until the clear_event_notify bit 6 is cleared by controller. */
|
|
|
+ for (i = 0; i < MAX_CONFIG_WAIT; i++) {
|
|
|
+ spin_lock_irqsave(&h->lock, flags);
|
|
|
+ doorbell_value = readl(h->vaddr + SA5_DOORBELL);
|
|
|
+ spin_unlock_irqrestore(&h->lock, flags);
|
|
|
+ if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
|
|
|
+ break;
|
|
|
+ /* delay and try again */
|
|
|
+ msleep(20);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
|
|
|
{
|
|
|
int i;
|
|
|
@@ -4420,18 +6420,20 @@ static int hpsa_enter_simple_mode(struct ctlr_info *h)
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
|
|
|
+
|
|
|
/* Update the field, and then ring the doorbell */
|
|
|
writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
|
|
|
+ writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
|
|
|
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
|
|
|
hpsa_wait_for_mode_change_ack(h);
|
|
|
print_cfg_table(&h->pdev->dev, h->cfgtable);
|
|
|
- if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
|
|
|
- dev_warn(&h->pdev->dev,
|
|
|
- "unable to get board into simple mode\n");
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
+ if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
|
|
|
+ goto error;
|
|
|
h->transMethod = CFGTBL_Trans_Simple;
|
|
|
return 0;
|
|
|
+error:
|
|
|
+ dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
|
|
|
+ return -ENODEV;
|
|
|
}
|
|
|
|
|
|
static int hpsa_pci_init(struct ctlr_info *h)
|
|
|
@@ -4577,11 +6579,19 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
|
|
|
pci_free_consistent(h->pdev,
|
|
|
h->nr_cmds * sizeof(struct CommandList),
|
|
|
h->cmd_pool, h->cmd_pool_dhandle);
|
|
|
+ if (h->ioaccel2_cmd_pool)
|
|
|
+ pci_free_consistent(h->pdev,
|
|
|
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
|
|
|
+ h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
|
|
|
if (h->errinfo_pool)
|
|
|
pci_free_consistent(h->pdev,
|
|
|
h->nr_cmds * sizeof(struct ErrorInfo),
|
|
|
h->errinfo_pool,
|
|
|
h->errinfo_pool_dhandle);
|
|
|
+ if (h->ioaccel_cmd_pool)
|
|
|
+ pci_free_consistent(h->pdev,
|
|
|
+ h->nr_cmds * sizeof(struct io_accel1_cmd),
|
|
|
+ h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
|
|
|
}
|
|
|
|
|
|
static int hpsa_request_irq(struct ctlr_info *h,
|
|
|
@@ -4597,15 +6607,15 @@ static int hpsa_request_irq(struct ctlr_info *h,
|
|
|
for (i = 0; i < MAX_REPLY_QUEUES; i++)
|
|
|
h->q[i] = (u8) i;
|
|
|
|
|
|
- if (h->intr_mode == PERF_MODE_INT && h->msix_vector) {
|
|
|
+ if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
|
|
|
/* If performant mode and MSI-X, use multiple reply queues */
|
|
|
- for (i = 0; i < MAX_REPLY_QUEUES; i++)
|
|
|
+ for (i = 0; i < h->msix_vector; i++)
|
|
|
rc = request_irq(h->intr[i], msixhandler,
|
|
|
0, h->devname,
|
|
|
&h->q[i]);
|
|
|
} else {
|
|
|
/* Use single reply pool */
|
|
|
- if (h->msix_vector || h->msi_vector) {
|
|
|
+ if (h->msix_vector > 0 || h->msi_vector) {
|
|
|
rc = request_irq(h->intr[h->intr_mode],
|
|
|
msixhandler, 0, h->devname,
|
|
|
&h->q[h->intr_mode]);
|
|
|
@@ -4658,7 +6668,7 @@ static void free_irqs(struct ctlr_info *h)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- for (i = 0; i < MAX_REPLY_QUEUES; i++)
|
|
|
+ for (i = 0; i < h->msix_vector; i++)
|
|
|
free_irq(h->intr[i], &h->q[i]);
|
|
|
}
|
|
|
|
|
|
@@ -4681,6 +6691,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
|
|
|
hpsa_free_irqs_and_disable_msix(h);
|
|
|
hpsa_free_sg_chain_blocks(h);
|
|
|
hpsa_free_cmd_pool(h);
|
|
|
+ kfree(h->ioaccel1_blockFetchTable);
|
|
|
kfree(h->blockFetchTable);
|
|
|
pci_free_consistent(h->pdev, h->reply_pool_size,
|
|
|
h->reply_pool, h->reply_pool_dhandle);
|
|
|
@@ -4760,6 +6771,92 @@ static void detect_controller_lockup(struct ctlr_info *h)
|
|
|
h->last_heartbeat_timestamp = now;
|
|
|
}
|
|
|
|
|
|
+static void hpsa_ack_ctlr_events(struct ctlr_info *h)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ char *event_type;
|
|
|
+
|
|
|
+ /* Clear the driver-requested rescan flag */
|
|
|
+ h->drv_req_rescan = 0;
|
|
|
+
|
|
|
+ /* Ask the controller to clear the events we're handling. */
|
|
|
+ if ((h->transMethod & (CFGTBL_Trans_io_accel1
|
|
|
+ | CFGTBL_Trans_io_accel2)) &&
|
|
|
+ (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
|
|
|
+ h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
|
|
|
+
|
|
|
+ if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
|
|
|
+ event_type = "state change";
|
|
|
+ if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
|
|
|
+ event_type = "configuration change";
|
|
|
+ /* Stop sending new RAID offload reqs via the IO accelerator */
|
|
|
+ scsi_block_requests(h->scsi_host);
|
|
|
+ for (i = 0; i < h->ndevices; i++)
|
|
|
+ h->dev[i]->offload_enabled = 0;
|
|
|
+ hpsa_drain_accel_commands(h);
|
|
|
+ /* Set 'accelerator path config change' bit */
|
|
|
+ dev_warn(&h->pdev->dev,
|
|
|
+ "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
|
|
|
+ h->events, event_type);
|
|
|
+ writel(h->events, &(h->cfgtable->clear_event_notify));
|
|
|
+ /* Set the "clear event notify field update" bit 6 */
|
|
|
+ writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
|
|
|
+ /* Wait until ctlr clears 'clear event notify field', bit 6 */
|
|
|
+ hpsa_wait_for_clear_event_notify_ack(h);
|
|
|
+ scsi_unblock_requests(h->scsi_host);
|
|
|
+ } else {
|
|
|
+ /* Acknowledge controller notification events. */
|
|
|
+ writel(h->events, &(h->cfgtable->clear_event_notify));
|
|
|
+ writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
|
|
|
+ hpsa_wait_for_clear_event_notify_ack(h);
|
|
|
+#if 0
|
|
|
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
|
|
|
+ hpsa_wait_for_mode_change_ack(h);
|
|
|
+#endif
|
|
|
+ }
|
|
|
+ return;
|
|
|
+}
|
|
|
+
|
|
|
+/* Check a register on the controller to see if there are configuration
|
|
|
+ * changes (added/changed/removed logical drives, etc.) which mean that
|
|
|
+ * we should rescan the controller for devices.
|
|
|
+ * Also check flag for driver-initiated rescan.
|
|
|
+ */
|
|
|
+static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
|
|
|
+{
|
|
|
+ if (h->drv_req_rescan)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ h->events = readl(&(h->cfgtable->event_notify));
|
|
|
+ return h->events & RESCAN_REQUIRED_EVENT_BITS;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Check if any of the offline devices have become ready
|
|
|
+ */
|
|
|
+static int hpsa_offline_devices_ready(struct ctlr_info *h)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct offline_device_entry *d;
|
|
|
+ struct list_head *this, *tmp;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&h->offline_device_lock, flags);
|
|
|
+ list_for_each_safe(this, tmp, &h->offline_device_list) {
|
|
|
+ d = list_entry(this, struct offline_device_entry,
|
|
|
+ offline_list);
|
|
|
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
|
|
|
+ if (!hpsa_volume_offline(h, d->scsi3addr))
|
|
|
+ return 1;
|
|
|
+ spin_lock_irqsave(&h->offline_device_lock, flags);
|
|
|
+ }
|
|
|
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
static void hpsa_monitor_ctlr_worker(struct work_struct *work)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
@@ -4768,6 +6865,15 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work)
|
|
|
detect_controller_lockup(h);
|
|
|
if (h->lockup_detected)
|
|
|
return;
|
|
|
+
|
|
|
+ if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
|
|
|
+ scsi_host_get(h->scsi_host);
|
|
|
+ h->drv_req_rescan = 0;
|
|
|
+ hpsa_ack_ctlr_events(h);
|
|
|
+ hpsa_scan_start(h->scsi_host);
|
|
|
+ scsi_host_put(h->scsi_host);
|
|
|
+ }
|
|
|
+
|
|
|
spin_lock_irqsave(&h->lock, flags);
|
|
|
if (h->remove_in_progress) {
|
|
|
spin_unlock_irqrestore(&h->lock, flags);
|
|
|
@@ -4807,7 +6913,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
* the 5 lower bits of the address are used by the hardware. and by
|
|
|
* the driver. See comments in hpsa.h for more info.
|
|
|
*/
|
|
|
-#define COMMANDLIST_ALIGNMENT 32
|
|
|
+#define COMMANDLIST_ALIGNMENT 128
|
|
|
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
|
|
|
h = kzalloc(sizeof(*h), GFP_KERNEL);
|
|
|
if (!h)
|
|
|
@@ -4817,7 +6923,9 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
|
|
|
INIT_LIST_HEAD(&h->cmpQ);
|
|
|
INIT_LIST_HEAD(&h->reqQ);
|
|
|
+ INIT_LIST_HEAD(&h->offline_device_list);
|
|
|
spin_lock_init(&h->lock);
|
|
|
+ spin_lock_init(&h->offline_device_lock);
|
|
|
spin_lock_init(&h->scan_lock);
|
|
|
spin_lock_init(&h->passthru_count_lock);
|
|
|
rc = hpsa_pci_init(h);
|
|
|
@@ -4859,6 +6967,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
|
|
|
pci_set_drvdata(pdev, h);
|
|
|
h->ndevices = 0;
|
|
|
+ h->hba_mode_enabled = 0;
|
|
|
h->scsi_host = NULL;
|
|
|
spin_lock_init(&h->devlock);
|
|
|
hpsa_put_ctlr_into_performant_mode(h);
|
|
|
@@ -4918,6 +7027,11 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
goto reinit_after_soft_reset;
|
|
|
}
|
|
|
|
|
|
+ /* Enable Accelerated IO path at driver layer */
|
|
|
+ h->acciopath_status = 1;
|
|
|
+
|
|
|
+ h->drv_req_rescan = 0;
|
|
|
+
|
|
|
/* Turn the interrupts on so we can service requests */
|
|
|
h->access.set_intr_mask(h, HPSA_INTR_ON);
|
|
|
|
|
|
@@ -5034,6 +7148,8 @@ static void hpsa_remove_one(struct pci_dev *pdev)
|
|
|
h->reply_pool, h->reply_pool_dhandle);
|
|
|
kfree(h->cmd_pool_bits);
|
|
|
kfree(h->blockFetchTable);
|
|
|
+ kfree(h->ioaccel1_blockFetchTable);
|
|
|
+ kfree(h->ioaccel2_blockFetchTable);
|
|
|
kfree(h->hba_inquiry_data);
|
|
|
pci_disable_device(pdev);
|
|
|
pci_release_regions(pdev);
|
|
|
@@ -5074,20 +7190,17 @@ static struct pci_driver hpsa_pci_driver = {
|
|
|
* bits of the command address.
|
|
|
*/
|
|
|
static void calc_bucket_map(int bucket[], int num_buckets,
|
|
|
- int nsgs, int *bucket_map)
|
|
|
+ int nsgs, int min_blocks, int *bucket_map)
|
|
|
{
|
|
|
int i, j, b, size;
|
|
|
|
|
|
- /* even a command with 0 SGs requires 4 blocks */
|
|
|
-#define MINIMUM_TRANSFER_BLOCKS 4
|
|
|
-#define NUM_BUCKETS 8
|
|
|
/* Note, bucket_map must have nsgs+1 entries. */
|
|
|
for (i = 0; i <= nsgs; i++) {
|
|
|
/* Compute size of a command with i SG entries */
|
|
|
- size = i + MINIMUM_TRANSFER_BLOCKS;
|
|
|
+ size = i + min_blocks;
|
|
|
b = num_buckets; /* Assume the biggest bucket */
|
|
|
/* Find the bucket that is just big enough */
|
|
|
- for (j = 0; j < 8; j++) {
|
|
|
+ for (j = 0; j < num_buckets; j++) {
|
|
|
if (bucket[j] >= size) {
|
|
|
b = j;
|
|
|
break;
|
|
|
@@ -5098,10 +7211,16 @@ static void calc_bucket_map(int bucket[], int num_buckets,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
|
|
|
+static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
|
|
|
{
|
|
|
int i;
|
|
|
unsigned long register_value;
|
|
|
+ unsigned long transMethod = CFGTBL_Trans_Performant |
|
|
|
+ (trans_support & CFGTBL_Trans_use_short_tags) |
|
|
|
+ CFGTBL_Trans_enable_directed_msix |
|
|
|
+ (trans_support & (CFGTBL_Trans_io_accel1 |
|
|
|
+ CFGTBL_Trans_io_accel2));
|
|
|
+ struct access_method access = SA5_performant_access;
|
|
|
|
|
|
/* This is a bit complicated. There are 8 registers on
|
|
|
* the controller which we write to to tell it 8 different
|
|
|
@@ -5121,6 +7240,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
|
|
|
* sizes for small commands, and fewer sizes for larger commands.
|
|
|
*/
|
|
|
int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
|
|
|
+#define MIN_IOACCEL2_BFT_ENTRY 5
|
|
|
+#define HPSA_IOACCEL2_HEADER_SZ 4
|
|
|
+ int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
|
|
|
+ 13, 14, 15, 16, 17, 18, 19,
|
|
|
+ HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
|
|
|
+ BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
|
|
|
+ BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
|
|
|
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
|
|
|
+ 16 * MIN_IOACCEL2_BFT_ENTRY);
|
|
|
+ BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
|
|
|
BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
|
|
|
/* 5 = 1 s/g entry or 4k
|
|
|
* 6 = 2 s/g entry or 8k
|
|
|
@@ -5133,7 +7262,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
|
|
|
|
|
|
bft[7] = SG_ENTRIES_IN_CMD + 4;
|
|
|
calc_bucket_map(bft, ARRAY_SIZE(bft),
|
|
|
- SG_ENTRIES_IN_CMD, h->blockFetchTable);
|
|
|
+ SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
|
|
|
for (i = 0; i < 8; i++)
|
|
|
writel(bft[i], &h->transtable->BlockFetch[i]);
|
|
|
|
|
|
@@ -5150,9 +7279,22 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
|
|
|
&h->transtable->RepQAddr[i].lower);
|
|
|
}
|
|
|
|
|
|
- writel(CFGTBL_Trans_Performant | use_short_tags |
|
|
|
- CFGTBL_Trans_enable_directed_msix,
|
|
|
- &(h->cfgtable->HostWrite.TransportRequest));
|
|
|
+ writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
|
|
|
+ writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
|
|
|
+ /*
|
|
|
+ * enable outbound interrupt coalescing in accelerator mode;
|
|
|
+ */
|
|
|
+ if (trans_support & CFGTBL_Trans_io_accel1) {
|
|
|
+ access = SA5_ioaccel_mode1_access;
|
|
|
+ writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
|
|
|
+ writel(4, &h->cfgtable->HostWrite.CoalIntCount);
|
|
|
+ } else {
|
|
|
+ if (trans_support & CFGTBL_Trans_io_accel2) {
|
|
|
+ access = SA5_ioaccel_mode2_access;
|
|
|
+ writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
|
|
|
+ writel(4, &h->cfgtable->HostWrite.CoalIntCount);
|
|
|
+ }
|
|
|
+ }
|
|
|
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
|
|
|
hpsa_wait_for_mode_change_ack(h);
|
|
|
register_value = readl(&(h->cfgtable->TransportActive));
|
|
|
@@ -5162,23 +7304,186 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
|
|
|
return;
|
|
|
}
|
|
|
/* Change the access methods to the performant access methods */
|
|
|
- h->access = SA5_performant_access;
|
|
|
- h->transMethod = CFGTBL_Trans_Performant;
|
|
|
+ h->access = access;
|
|
|
+ h->transMethod = transMethod;
|
|
|
+
|
|
|
+ if (!((trans_support & CFGTBL_Trans_io_accel1) ||
|
|
|
+ (trans_support & CFGTBL_Trans_io_accel2)))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (trans_support & CFGTBL_Trans_io_accel1) {
|
|
|
+ /* Set up I/O accelerator mode */
|
|
|
+ for (i = 0; i < h->nreply_queues; i++) {
|
|
|
+ writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
|
|
|
+ h->reply_queue[i].current_entry =
|
|
|
+ readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
|
|
|
+ }
|
|
|
+ bft[7] = h->ioaccel_maxsg + 8;
|
|
|
+ calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
|
|
|
+ h->ioaccel1_blockFetchTable);
|
|
|
+
|
|
|
+ /* initialize all reply queue entries to unused */
|
|
|
+ memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
|
|
|
+ h->reply_pool_size);
|
|
|
+
|
|
|
+ /* set all the constant fields in the accelerator command
|
|
|
+ * frames once at init time to save CPU cycles later.
|
|
|
+ */
|
|
|
+ for (i = 0; i < h->nr_cmds; i++) {
|
|
|
+ struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
|
|
|
+
|
|
|
+ cp->function = IOACCEL1_FUNCTION_SCSIIO;
|
|
|
+ cp->err_info = (u32) (h->errinfo_pool_dhandle +
|
|
|
+ (i * sizeof(struct ErrorInfo)));
|
|
|
+ cp->err_info_len = sizeof(struct ErrorInfo);
|
|
|
+ cp->sgl_offset = IOACCEL1_SGLOFFSET;
|
|
|
+ cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
|
|
|
+ cp->timeout_sec = 0;
|
|
|
+ cp->ReplyQueue = 0;
|
|
|
+ cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
|
|
|
+ DIRECT_LOOKUP_BIT;
|
|
|
+ cp->Tag.upper = 0;
|
|
|
+ cp->host_addr.lower =
|
|
|
+ (u32) (h->ioaccel_cmd_pool_dhandle +
|
|
|
+ (i * sizeof(struct io_accel1_cmd)));
|
|
|
+ cp->host_addr.upper = 0;
|
|
|
+ }
|
|
|
+ } else if (trans_support & CFGTBL_Trans_io_accel2) {
|
|
|
+ u64 cfg_offset, cfg_base_addr_index;
|
|
|
+ u32 bft2_offset, cfg_base_addr;
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
|
|
|
+ &cfg_base_addr_index, &cfg_offset);
|
|
|
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
|
|
|
+ bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
|
|
|
+ calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
|
|
|
+ 4, h->ioaccel2_blockFetchTable);
|
|
|
+ bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
|
|
|
+ BUILD_BUG_ON(offsetof(struct CfgTable,
|
|
|
+ io_accel_request_size_offset) != 0xb8);
|
|
|
+ h->ioaccel2_bft2_regs =
|
|
|
+ remap_pci_mem(pci_resource_start(h->pdev,
|
|
|
+ cfg_base_addr_index) +
|
|
|
+ cfg_offset + bft2_offset,
|
|
|
+ ARRAY_SIZE(bft2) *
|
|
|
+ sizeof(*h->ioaccel2_bft2_regs));
|
|
|
+ for (i = 0; i < ARRAY_SIZE(bft2); i++)
|
|
|
+ writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
|
|
|
+ }
|
|
|
+ writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
|
|
|
+ hpsa_wait_for_mode_change_ack(h);
|
|
|
+}
|
|
|
+
|
|
|
+static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
|
|
|
+{
|
|
|
+ h->ioaccel_maxsg =
|
|
|
+ readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
|
|
|
+ if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
|
|
|
+ h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
|
|
|
+
|
|
|
+ /* Command structures must be aligned on a 128-byte boundary
|
|
|
+ * because the 7 lower bits of the address are used by the
|
|
|
+ * hardware.
|
|
|
+ */
|
|
|
+#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
|
|
|
+ BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
|
|
|
+ IOACCEL1_COMMANDLIST_ALIGNMENT);
|
|
|
+ h->ioaccel_cmd_pool =
|
|
|
+ pci_alloc_consistent(h->pdev,
|
|
|
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
|
|
|
+ &(h->ioaccel_cmd_pool_dhandle));
|
|
|
+
|
|
|
+ h->ioaccel1_blockFetchTable =
|
|
|
+ kmalloc(((h->ioaccel_maxsg + 1) *
|
|
|
+ sizeof(u32)), GFP_KERNEL);
|
|
|
+
|
|
|
+ if ((h->ioaccel_cmd_pool == NULL) ||
|
|
|
+ (h->ioaccel1_blockFetchTable == NULL))
|
|
|
+ goto clean_up;
|
|
|
+
|
|
|
+ memset(h->ioaccel_cmd_pool, 0,
|
|
|
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
|
|
|
+ return 0;
|
|
|
+
|
|
|
+clean_up:
|
|
|
+ if (h->ioaccel_cmd_pool)
|
|
|
+ pci_free_consistent(h->pdev,
|
|
|
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
|
|
|
+ h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
|
|
|
+ kfree(h->ioaccel1_blockFetchTable);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
|
|
|
+{
|
|
|
+ /* Allocate ioaccel2 mode command blocks and block fetch table */
|
|
|
+
|
|
|
+ h->ioaccel_maxsg =
|
|
|
+ readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
|
|
|
+ if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
|
|
|
+ h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
|
|
|
+
|
|
|
+#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
|
|
|
+ BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
|
|
|
+ IOACCEL2_COMMANDLIST_ALIGNMENT);
|
|
|
+ h->ioaccel2_cmd_pool =
|
|
|
+ pci_alloc_consistent(h->pdev,
|
|
|
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
|
|
|
+ &(h->ioaccel2_cmd_pool_dhandle));
|
|
|
+
|
|
|
+ h->ioaccel2_blockFetchTable =
|
|
|
+ kmalloc(((h->ioaccel_maxsg + 1) *
|
|
|
+ sizeof(u32)), GFP_KERNEL);
|
|
|
+
|
|
|
+ if ((h->ioaccel2_cmd_pool == NULL) ||
|
|
|
+ (h->ioaccel2_blockFetchTable == NULL))
|
|
|
+ goto clean_up;
|
|
|
+
|
|
|
+ memset(h->ioaccel2_cmd_pool, 0,
|
|
|
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
|
|
|
+ return 0;
|
|
|
+
|
|
|
+clean_up:
|
|
|
+ if (h->ioaccel2_cmd_pool)
|
|
|
+ pci_free_consistent(h->pdev,
|
|
|
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
|
|
|
+ h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
|
|
|
+ kfree(h->ioaccel2_blockFetchTable);
|
|
|
+ return 1;
|
|
|
}
|
|
|
|
|
|
static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|
|
|
{
|
|
|
u32 trans_support;
|
|
|
+ unsigned long transMethod = CFGTBL_Trans_Performant |
|
|
|
+ CFGTBL_Trans_use_short_tags;
|
|
|
int i;
|
|
|
|
|
|
if (hpsa_simple_mode)
|
|
|
return;
|
|
|
|
|
|
+ /* Check for I/O accelerator mode support */
|
|
|
+ if (trans_support & CFGTBL_Trans_io_accel1) {
|
|
|
+ transMethod |= CFGTBL_Trans_io_accel1 |
|
|
|
+ CFGTBL_Trans_enable_directed_msix;
|
|
|
+ if (hpsa_alloc_ioaccel_cmd_and_bft(h))
|
|
|
+ goto clean_up;
|
|
|
+ } else {
|
|
|
+ if (trans_support & CFGTBL_Trans_io_accel2) {
|
|
|
+ transMethod |= CFGTBL_Trans_io_accel2 |
|
|
|
+ CFGTBL_Trans_enable_directed_msix;
|
|
|
+ if (ioaccel2_alloc_cmds_and_bft(h))
|
|
|
+ goto clean_up;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* TODO, check that this next line h->nreply_queues is correct */
|
|
|
trans_support = readl(&(h->cfgtable->TransportSupport));
|
|
|
if (!(trans_support & PERFORMANT_MODE))
|
|
|
return;
|
|
|
|
|
|
- h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1;
|
|
|
+ h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
|
|
|
hpsa_get_max_perf_mode_cmds(h);
|
|
|
/* Performant mode ring buffer and supporting data structures */
|
|
|
h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
|
|
|
@@ -5200,9 +7505,7 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|
|
|
|| (h->blockFetchTable == NULL))
|
|
|
goto clean_up;
|
|
|
|
|
|
- hpsa_enter_performant_mode(h,
|
|
|
- trans_support & CFGTBL_Trans_use_short_tags);
|
|
|
-
|
|
|
+ hpsa_enter_performant_mode(h, trans_support);
|
|
|
return;
|
|
|
|
|
|
clean_up:
|
|
|
@@ -5212,6 +7515,31 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|
|
|
kfree(h->blockFetchTable);
|
|
|
}
|
|
|
|
|
|
+static int is_accelerated_cmd(struct CommandList *c)
|
|
|
+{
|
|
|
+ return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
|
|
|
+}
|
|
|
+
|
|
|
+static void hpsa_drain_accel_commands(struct ctlr_info *h)
|
|
|
+{
|
|
|
+ struct CommandList *c = NULL;
|
|
|
+ unsigned long flags;
|
|
|
+ int accel_cmds_out;
|
|
|
+
|
|
|
+ do { /* wait for all outstanding commands to drain out */
|
|
|
+ accel_cmds_out = 0;
|
|
|
+ spin_lock_irqsave(&h->lock, flags);
|
|
|
+ list_for_each_entry(c, &h->cmpQ, list)
|
|
|
+ accel_cmds_out += is_accelerated_cmd(c);
|
|
|
+ list_for_each_entry(c, &h->reqQ, list)
|
|
|
+ accel_cmds_out += is_accelerated_cmd(c);
|
|
|
+ spin_unlock_irqrestore(&h->lock, flags);
|
|
|
+ if (accel_cmds_out <= 0)
|
|
|
+ break;
|
|
|
+ msleep(100);
|
|
|
+ } while (1);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* This is it. Register the PCI driver information for the cards we control
|
|
|
* the OS will call our registered routines when it finds one of our cards.
|
|
|
@@ -5226,5 +7554,83 @@ static void __exit hpsa_cleanup(void)
|
|
|
pci_unregister_driver(&hpsa_pci_driver);
|
|
|
}
|
|
|
|
|
|
+static void __attribute__((unused)) verify_offsets(void)
|
|
|
+{
|
|
|
+#define VERIFY_OFFSET(member, offset) \
|
|
|
+ BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
|
|
|
+
|
|
|
+ VERIFY_OFFSET(structure_size, 0);
|
|
|
+ VERIFY_OFFSET(volume_blk_size, 4);
|
|
|
+ VERIFY_OFFSET(volume_blk_cnt, 8);
|
|
|
+ VERIFY_OFFSET(phys_blk_shift, 16);
|
|
|
+ VERIFY_OFFSET(parity_rotation_shift, 17);
|
|
|
+ VERIFY_OFFSET(strip_size, 18);
|
|
|
+ VERIFY_OFFSET(disk_starting_blk, 20);
|
|
|
+ VERIFY_OFFSET(disk_blk_cnt, 28);
|
|
|
+ VERIFY_OFFSET(data_disks_per_row, 36);
|
|
|
+ VERIFY_OFFSET(metadata_disks_per_row, 38);
|
|
|
+ VERIFY_OFFSET(row_cnt, 40);
|
|
|
+ VERIFY_OFFSET(layout_map_count, 42);
|
|
|
+ VERIFY_OFFSET(flags, 44);
|
|
|
+ VERIFY_OFFSET(dekindex, 46);
|
|
|
+ /* VERIFY_OFFSET(reserved, 48 */
|
|
|
+ VERIFY_OFFSET(data, 64);
|
|
|
+
|
|
|
+#undef VERIFY_OFFSET
|
|
|
+
|
|
|
+#define VERIFY_OFFSET(member, offset) \
|
|
|
+ BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
|
|
|
+
|
|
|
+ VERIFY_OFFSET(IU_type, 0);
|
|
|
+ VERIFY_OFFSET(direction, 1);
|
|
|
+ VERIFY_OFFSET(reply_queue, 2);
|
|
|
+ /* VERIFY_OFFSET(reserved1, 3); */
|
|
|
+ VERIFY_OFFSET(scsi_nexus, 4);
|
|
|
+ VERIFY_OFFSET(Tag, 8);
|
|
|
+ VERIFY_OFFSET(cdb, 16);
|
|
|
+ VERIFY_OFFSET(cciss_lun, 32);
|
|
|
+ VERIFY_OFFSET(data_len, 40);
|
|
|
+ VERIFY_OFFSET(cmd_priority_task_attr, 44);
|
|
|
+ VERIFY_OFFSET(sg_count, 45);
|
|
|
+ /* VERIFY_OFFSET(reserved3 */
|
|
|
+ VERIFY_OFFSET(err_ptr, 48);
|
|
|
+ VERIFY_OFFSET(err_len, 56);
|
|
|
+ /* VERIFY_OFFSET(reserved4 */
|
|
|
+ VERIFY_OFFSET(sg, 64);
|
|
|
+
|
|
|
+#undef VERIFY_OFFSET
|
|
|
+
|
|
|
+#define VERIFY_OFFSET(member, offset) \
|
|
|
+ BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
|
|
|
+
|
|
|
+ VERIFY_OFFSET(dev_handle, 0x00);
|
|
|
+ VERIFY_OFFSET(reserved1, 0x02);
|
|
|
+ VERIFY_OFFSET(function, 0x03);
|
|
|
+ VERIFY_OFFSET(reserved2, 0x04);
|
|
|
+ VERIFY_OFFSET(err_info, 0x0C);
|
|
|
+ VERIFY_OFFSET(reserved3, 0x10);
|
|
|
+ VERIFY_OFFSET(err_info_len, 0x12);
|
|
|
+ VERIFY_OFFSET(reserved4, 0x13);
|
|
|
+ VERIFY_OFFSET(sgl_offset, 0x14);
|
|
|
+ VERIFY_OFFSET(reserved5, 0x15);
|
|
|
+ VERIFY_OFFSET(transfer_len, 0x1C);
|
|
|
+ VERIFY_OFFSET(reserved6, 0x20);
|
|
|
+ VERIFY_OFFSET(io_flags, 0x24);
|
|
|
+ VERIFY_OFFSET(reserved7, 0x26);
|
|
|
+ VERIFY_OFFSET(LUN, 0x34);
|
|
|
+ VERIFY_OFFSET(control, 0x3C);
|
|
|
+ VERIFY_OFFSET(CDB, 0x40);
|
|
|
+ VERIFY_OFFSET(reserved8, 0x50);
|
|
|
+ VERIFY_OFFSET(host_context_flags, 0x60);
|
|
|
+ VERIFY_OFFSET(timeout_sec, 0x62);
|
|
|
+ VERIFY_OFFSET(ReplyQueue, 0x64);
|
|
|
+ VERIFY_OFFSET(reserved9, 0x65);
|
|
|
+ VERIFY_OFFSET(Tag, 0x68);
|
|
|
+ VERIFY_OFFSET(host_addr, 0x70);
|
|
|
+ VERIFY_OFFSET(CISS_LUN, 0x78);
|
|
|
+ VERIFY_OFFSET(SG, 0x78 + 8);
|
|
|
+#undef VERIFY_OFFSET
|
|
|
+}
|
|
|
+
|
|
|
module_init(hpsa_init);
|
|
|
module_exit(hpsa_cleanup);
|