Browse Source

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (40 commits)
  [SCSI] 3w-9xxx fix bug in sgl loading
  [SCSI] fcoe, libfc: adds enable/disable for fcoe interface
  [SCSI] libfc: reduce hold time on SCSI host lock
  [SCSI] libfc: remote port gets stuck in restart state without really restarting
  [SCSI] pm8001: misc code cleanup
  [SCSI] pm8001: enable read HBA SAS address from VPD
  [SCSI] pm8001: do not reset local sata as it will not be found if reset
  [SCSI] pm8001: bit set pm8001_ha->flags
  [SCSI] pm8001:fix potential NULL pointer dereference
  [SCSI] pm8001: set SSC down-spreading only to get less errors on some 6G device.
  [SCSI] pm8001: fix endian issues with SAS address
  [SCSI] pm8001: enhance error handle for IO patch
  [SCSI] pm8001: Fix for sata io circular lock dependency.
  [SCSI] hpsa: add driver for HP Smart Array controllers.
  [SCSI] cxgb3i: always use negative errno in case of error
  [SCSI] bnx2i: minor code cleanup and update driver version
  [SCSI] bnx2i: Task management ABORT TASK fixes
  [SCSI] bnx2i: update CQ arming algorith for 5771x chipsets
  [SCSI] bnx2i: Adjust sq_size module parametr to power of 2 only if a non-zero value is specified
  [SCSI] bnx2i: Add 5771E device support to bnx2i driver
  ...
Linus Torvalds 16 năm trước cách đây
mục cha
commit
fc6f0700d5
49 tập tin đã thay đổi với 4914 bổ sung360 xóa
  1. 3 3
      drivers/message/fusion/mptbase.c
  2. 1 0
      drivers/misc/enclosure.c
  3. 7 4
      drivers/scsi/3w-9xxx.c
  4. 10 0
      drivers/scsi/Kconfig
  5. 1 0
      drivers/scsi/Makefile
  6. 8 4
      drivers/scsi/be2iscsi/be_cmds.c
  7. 1 0
      drivers/scsi/bnx2i/bnx2i.h
  8. 39 12
      drivers/scsi/bnx2i/bnx2i_hwi.c
  9. 12 4
      drivers/scsi/bnx2i/bnx2i_init.c
  10. 0 2
      drivers/scsi/bnx2i/bnx2i_iscsi.c
  11. 12 12
      drivers/scsi/cxgb3i/cxgb3i_offload.c
  12. 2 2
      drivers/scsi/cxgb3i/cxgb3i_pdu.c
  13. 2 0
      drivers/scsi/device_handler/scsi_dh_rdac.c
  14. 131 4
      drivers/scsi/fcoe/fcoe.c
  15. 3531 0
      drivers/scsi/hpsa.c
  16. 273 0
      drivers/scsi/hpsa.h
  17. 326 0
      drivers/scsi/hpsa_cmd.h
  18. 1 0
      drivers/scsi/ipr.c
  19. 36 29
      drivers/scsi/libfc/fc_fcp.c
  20. 6 1
      drivers/scsi/libfc/fc_lport.c
  21. 1 0
      drivers/scsi/libfc/fc_rport.c
  22. 12 4
      drivers/scsi/lpfc/lpfc_init.c
  23. 9 5
      drivers/scsi/megaraid/megaraid_sas.c
  24. 5 0
      drivers/scsi/mpt2sas/mpt2sas_base.c
  25. 1 0
      drivers/scsi/mvsas/mv_init.c
  26. 47 41
      drivers/scsi/osd/osd_initiator.c
  27. 0 10
      drivers/scsi/pm8001/pm8001_ctl.h
  28. 92 57
      drivers/scsi/pm8001/pm8001_hwi.c
  29. 1 2
      drivers/scsi/pm8001/pm8001_hwi.h
  30. 14 5
      drivers/scsi/pm8001/pm8001_init.c
  31. 53 4
      drivers/scsi/pm8001/pm8001_sas.c
  32. 24 8
      drivers/scsi/pm8001/pm8001_sas.h
  33. 25 9
      drivers/scsi/pmcraid.c
  34. 4 1
      drivers/scsi/pmcraid.h
  35. 2 3
      drivers/scsi/qla2xxx/qla_def.h
  36. 0 2
      drivers/scsi/qla2xxx/qla_gbl.h
  37. 11 1
      drivers/scsi/qla2xxx/qla_init.c
  38. 10 93
      drivers/scsi/qla2xxx/qla_isr.c
  39. 4 0
      drivers/scsi/qla2xxx/qla_mid.c
  40. 54 21
      drivers/scsi/qla2xxx/qla_os.c
  41. 1 1
      drivers/scsi/qla2xxx/qla_version.h
  42. 1 0
      drivers/scsi/scsi_lib.c
  43. 14 3
      drivers/scsi/scsi_transport_fc.c
  44. 107 0
      drivers/scsi/sd.c
  45. 2 0
      drivers/scsi/sd.h
  46. 12 11
      drivers/scsi/st.c
  47. 1 0
      drivers/scsi/st.h
  48. 2 0
      include/linux/enclosure.h
  49. 3 2
      include/scsi/osd_initiator.h

+ 3 - 3
drivers/message/fusion/mptbase.c

@@ -1587,7 +1587,7 @@ mpt_mapresources(MPT_ADAPTER *ioc)
 {
 	u8		__iomem *mem;
 	int		 ii;
-	unsigned long	 mem_phys;
+	resource_size_t	 mem_phys;
 	unsigned long	 port;
 	u32		 msize;
 	u32		 psize;
@@ -1677,8 +1677,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
 		return -EINVAL;
 	}
 	ioc->memmap = mem;
-	dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %lx\n",
-	    ioc->name, mem, mem_phys));
+	dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
+	    ioc->name, mem, (unsigned long long)mem_phys));
 
 	ioc->mem_phys = mem_phys;
 	ioc->chip = (SYSIF_REGS __iomem *)mem;

+ 1 - 0
drivers/misc/enclosure.c

@@ -391,6 +391,7 @@ static const char *const enclosure_status [] = {
 	[ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
 	[ENCLOSURE_STATUS_UNKNOWN] = "unknown",
 	[ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
+	[ENCLOSURE_STATUS_MAX] = NULL,
 };
 
 static const char *const enclosure_type [] = {

+ 7 - 4
drivers/scsi/3w-9xxx.c

@@ -76,6 +76,7 @@
                  Fix bug in twa_get_param() on 4GB+.
                  Use pci_resource_len() for ioremap().
    2.26.02.012 - Add power management support.
+   2.26.02.013 - Fix bug in twa_load_sgl().
 */
 
 #include <linux/module.h>
@@ -100,7 +101,7 @@
 #include "3w-9xxx.h"
 
 /* Globals */
-#define TW_DRIVER_VERSION "2.26.02.012"
+#define TW_DRIVER_VERSION "2.26.02.013"
 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
 static unsigned int twa_device_extension_count;
 static int twa_major = -1;
@@ -1382,10 +1383,12 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
 		newcommand = &full_command_packet->command.newcommand;
 		newcommand->request_id__lunl =
 			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
-		newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
-		newcommand->sg_list[0].length = cpu_to_le32(length);
+		if (length) {
+			newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+			newcommand->sg_list[0].length = cpu_to_le32(length);
+		}
 		newcommand->sgl_entries__lunh =
-			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), 1));
+			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
 	} else {
 		oldcommand = &full_command_packet->command.oldcommand;
 		oldcommand->request_id = request_id;

+ 10 - 0
drivers/scsi/Kconfig

@@ -388,6 +388,16 @@ config BLK_DEV_3W_XXXX_RAID
 	  Please read the comments at the top of
 	  <file:drivers/scsi/3w-xxxx.c>.
 
+config SCSI_HPSA
+	tristate "HP Smart Array SCSI driver"
+	depends on PCI && SCSI
+	help
+	  This driver supports HP Smart Array Controllers (circa 2009).
+	  It is a SCSI alternative to the cciss driver, which is a block
+	  driver.  Anyone wishing to use HP Smart Array controllers who
+	  would prefer the devices be presented to linux as SCSI devices,
+	  rather than as generic block devices should say Y here.
+
 config SCSI_3W_9XXX
 	tristate "3ware 9xxx SATA-RAID support"
 	depends on PCI && SCSI

+ 1 - 0
drivers/scsi/Makefile

@@ -91,6 +91,7 @@ obj-$(CONFIG_SCSI_BFA_FC)	+= bfa/
 obj-$(CONFIG_SCSI_PAS16)	+= pas16.o
 obj-$(CONFIG_SCSI_T128)		+= t128.o
 obj-$(CONFIG_SCSI_DMX3191D)	+= dmx3191d.o
+obj-$(CONFIG_SCSI_HPSA)		+= hpsa.o
 obj-$(CONFIG_SCSI_DTC3280)	+= dtc.o
 obj-$(CONFIG_SCSI_SYM53C8XX_2)	+= sym53c8xx_2/
 obj-$(CONFIG_SCSI_ZALON)	+= zalon7xx.o

+ 8 - 4
drivers/scsi/be2iscsi/be_cmds.c

@@ -135,11 +135,15 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
 	while ((compl = be_mcc_compl_get(phba))) {
 		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
 			/* Interpret flags as an async trailer */
-			BUG_ON(!is_link_state_evt(compl->flags));
+			if (is_link_state_evt(compl->flags))
+				/* Interpret compl as a async link evt */
+				beiscsi_async_link_state_process(phba,
+				   (struct be_async_event_link_state *) compl);
+			else
+				SE_DEBUG(DBG_LVL_1,
+					 " Unsupported Async Event, flags"
+					 " = 0x%08x \n", compl->flags);
 
-			/* Interpret compl as a async link evt */
-			beiscsi_async_link_state_process(phba,
-				(struct be_async_event_link_state *) compl);
 		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
 				status = be_mcc_compl_process(ctrl, compl);
 				atomic_dec(&phba->ctrl.mcc_obj.q.used);

+ 1 - 0
drivers/scsi/bnx2i/bnx2i.h

@@ -684,6 +684,7 @@ extern unsigned int error_mask1, error_mask2;
 extern u64 iscsi_error_mask;
 extern unsigned int en_tcp_dack;
 extern unsigned int event_coal_div;
+extern unsigned int event_coal_min;
 
 extern struct scsi_transport_template *bnx2i_scsi_xport_template;
 extern struct iscsi_transport bnx2i_iscsi_transport;

+ 39 - 12
drivers/scsi/bnx2i/bnx2i_hwi.c

@@ -133,20 +133,38 @@ void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
 {
 	struct bnx2i_5771x_cq_db *cq_db;
 	u16 cq_index;
+	u16 next_index;
+	u32 num_active_cmds;
 
+
+	/* Coalesce CQ entries only on 10G devices */
 	if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
 		return;
 
+	/* Do not update CQ DB multiple times before firmware writes
+	 * '0xFFFF' to CQDB->SQN field. Deviation may cause spurious
+	 * interrupts and other unwanted results
+	 */
+	cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
+	if (cq_db->sqn[0] && cq_db->sqn[0] != 0xFFFF)
+		return;
+
 	if (action == CNIC_ARM_CQE) {
-		cq_index = ep->qp.cqe_exp_seq_sn +
-			   ep->num_active_cmds / event_coal_div;
-		cq_index %= (ep->qp.cqe_size * 2 + 1);
-		if (!cq_index) {
+		num_active_cmds = ep->num_active_cmds;
+		if (num_active_cmds <= event_coal_min)
+			next_index = 1;
+		else
+			next_index = event_coal_min +
+				(num_active_cmds - event_coal_min) / event_coal_div;
+		if (!next_index)
+			next_index = 1;
+		cq_index = ep->qp.cqe_exp_seq_sn + next_index - 1;
+		if (cq_index > ep->qp.cqe_size * 2)
+			cq_index -= ep->qp.cqe_size * 2;
+		if (!cq_index)
 			cq_index = 1;
-			cq_db = (struct bnx2i_5771x_cq_db *)
-					ep->qp.cq_pgtbl_virt;
-			cq_db->sqn[0] = cq_index;
-		}
+
+		cq_db->sqn[0] = cq_index;
 	}
 }
 
@@ -366,6 +384,7 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
 	struct bnx2i_cmd *bnx2i_cmd;
 	struct bnx2i_tmf_request *tmfabort_wqe;
 	u32 dword;
+	u32 scsi_lun[2];
 
 	bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
 	tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
@@ -376,27 +395,35 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
 	tmfabort_wqe->op_attr = 0;
 	tmfabort_wqe->op_attr =
 		ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
-	tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
-	tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
 
 	tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
 	tmfabort_wqe->reserved2 = 0;
 	tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
 
 	ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
-	if (!ctask || ctask->sc)
+	if (!ctask || !ctask->sc)
 		/*
 		 * the iscsi layer must have completed the cmd while this
 		 * was starting up.
+		 *
+		 * Note: In the case of a SCSI cmd timeout, the task's sc
+		 *       is still active; hence ctask->sc != 0
+		 *       In this case, the task must be aborted
 		 */
 		return 0;
+
 	ref_sc = ctask->sc;
 
+	/* Retrieve LUN directly from the ref_sc */
+	int_to_scsilun(ref_sc->device->lun, (struct scsi_lun *) scsi_lun);
+	tmfabort_wqe->lun[0] = be32_to_cpu(scsi_lun[0]);
+	tmfabort_wqe->lun[1] = be32_to_cpu(scsi_lun[1]);
+
 	if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
 		dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
 	else
 		dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
-	tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
+	tmfabort_wqe->ref_itt = (dword | (tmfabort_hdr->rtt & ISCSI_ITT_MASK));
 	tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
 
 	tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;

+ 12 - 4
drivers/scsi/bnx2i/bnx2i_init.c

@@ -17,8 +17,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
 static u32 adapter_count;
 
 #define DRV_MODULE_NAME		"bnx2i"
-#define DRV_MODULE_VERSION	"2.0.1e"
-#define DRV_MODULE_RELDATE	"June 22, 2009"
+#define DRV_MODULE_VERSION	"2.1.0"
+#define DRV_MODULE_RELDATE	"Dec 06, 2009"
 
 static char version[] __devinitdata =
 		"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -32,6 +32,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
 
 static DEFINE_MUTEX(bnx2i_dev_lock);
 
+unsigned int event_coal_min = 24;
+module_param(event_coal_min, int, 0664);
+MODULE_PARM_DESC(event_coal_min, "Event Coalescing Minimum Commands");
+
 unsigned int event_coal_div = 1;
 module_param(event_coal_div, int, 0664);
 MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
@@ -83,8 +87,12 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
 		set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
 		hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
 	} else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
-		   hba->pci_did == PCI_DEVICE_ID_NX2_57711)
+		   hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
+		   hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
 		set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
+	else
+		printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
+				  hba->pci_did);
 }
 
 
@@ -363,7 +371,7 @@ static int __init bnx2i_mod_init(void)
 
 	printk(KERN_INFO "%s", version);
 
-	if (!is_power_of_2(sq_size))
+	if (sq_size && !is_power_of_2(sq_size))
 		sq_size = roundup_pow_of_two(sq_size);
 
 	mutex_init(&bnx2i_dev_lock);

+ 0 - 2
drivers/scsi/bnx2i/bnx2i_iscsi.c

@@ -485,7 +485,6 @@ static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
 		struct iscsi_task *task = session->cmds[i];
 		struct bnx2i_cmd *cmd = task->dd_data;
 
-		/* Anil */
 		task->hdr = &cmd->hdr;
 		task->hdr_max = sizeof(struct iscsi_hdr);
 
@@ -765,7 +764,6 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
 	hba->pci_svid = hba->pcidev->subsystem_vendor;
 	hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
 	hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
-	bnx2i_identify_device(hba);
 
 	bnx2i_identify_device(hba);
 	bnx2i_setup_host_queue_size(hba, shost);

+ 12 - 12
drivers/scsi/cxgb3i/cxgb3i_offload.c

@@ -291,7 +291,7 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
 	c3cn_hold(c3cn);
 	spin_lock_bh(&c3cn->lock);
 	if (c3cn->state == C3CN_STATE_CONNECTING)
-		fail_act_open(c3cn, EHOSTUNREACH);
+		fail_act_open(c3cn, -EHOSTUNREACH);
 	spin_unlock_bh(&c3cn->lock);
 	c3cn_put(c3cn);
 	__kfree_skb(skb);
@@ -792,18 +792,18 @@ static int act_open_rpl_status_to_errno(int status)
 {
 	switch (status) {
 	case CPL_ERR_CONN_RESET:
-		return ECONNREFUSED;
+		return -ECONNREFUSED;
 	case CPL_ERR_ARP_MISS:
-		return EHOSTUNREACH;
+		return -EHOSTUNREACH;
 	case CPL_ERR_CONN_TIMEDOUT:
-		return ETIMEDOUT;
+		return -ETIMEDOUT;
 	case CPL_ERR_TCAM_FULL:
-		return ENOMEM;
+		return -ENOMEM;
 	case CPL_ERR_CONN_EXIST:
 		cxgb3i_log_error("ACTIVE_OPEN_RPL: 4-tuple in use\n");
-		return EADDRINUSE;
+		return -EADDRINUSE;
 	default:
-		return EIO;
+		return -EIO;
 	}
 }
 
@@ -817,7 +817,7 @@ static void act_open_retry_timer(unsigned long data)
 	spin_lock_bh(&c3cn->lock);
 	skb = alloc_skb(sizeof(struct cpl_act_open_req), GFP_ATOMIC);
 	if (!skb)
-		fail_act_open(c3cn, ENOMEM);
+		fail_act_open(c3cn, -ENOMEM);
 	else {
 		skb->sk = (struct sock *)c3cn;
 		set_arp_failure_handler(skb, act_open_req_arp_failure);
@@ -966,14 +966,14 @@ static int abort_status_to_errno(struct s3_conn *c3cn, int abort_reason,
 	case CPL_ERR_BAD_SYN: /* fall through */
 	case CPL_ERR_CONN_RESET:
 		return c3cn->state > C3CN_STATE_ESTABLISHED ?
-			EPIPE : ECONNRESET;
+			-EPIPE : -ECONNRESET;
 	case CPL_ERR_XMIT_TIMEDOUT:
 	case CPL_ERR_PERSIST_TIMEDOUT:
 	case CPL_ERR_FINWAIT2_TIMEDOUT:
 	case CPL_ERR_KEEPALIVE_TIMEDOUT:
-		return ETIMEDOUT;
+		return -ETIMEDOUT;
 	default:
-		return EIO;
+		return -EIO;
 	}
 }
 
@@ -1563,7 +1563,7 @@ static int initiate_act_open(struct s3_conn *c3cn, struct net_device *dev)
 	s3_free_atid(cdev, c3cn->tid);
 	c3cn->tid = 0;
 out_err:
-	return -1;
+	return -EINVAL;
 }
 
 

+ 2 - 2
drivers/scsi/cxgb3i/cxgb3i_pdu.c

@@ -388,8 +388,8 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
 	if (err > 0) {
 		int pdulen = err;
 
-	cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
-			task, skb, skb->len, skb->data_len, err);
+		cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
+				task, skb, skb->len, skb->data_len, err);
 
 		if (task->conn->hdrdgst_en)
 			pdulen += ISCSI_DIGEST_SIZE;

+ 2 - 0
drivers/scsi/device_handler/scsi_dh_rdac.c

@@ -748,6 +748,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
 	{"IBM", "1724"},
 	{"IBM", "1726"},
 	{"IBM", "1742"},
+	{"IBM", "1745"},
+	{"IBM", "1746"},
 	{"IBM", "1814"},
 	{"IBM", "1815"},
 	{"IBM", "1818"},

+ 131 - 4
drivers/scsi/fcoe/fcoe.c

@@ -101,6 +101,8 @@ static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
 
 static int fcoe_create(const char *, struct kernel_param *);
 static int fcoe_destroy(const char *, struct kernel_param *);
+static int fcoe_enable(const char *, struct kernel_param *);
+static int fcoe_disable(const char *, struct kernel_param *);
 
 static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
 				      u32 did, struct fc_frame *,
@@ -115,10 +117,16 @@ static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
 
 module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
 __MODULE_PARM_TYPE(create, "string");
-MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
+MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
 module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
 __MODULE_PARM_TYPE(destroy, "string");
-MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
+MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
+module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(enable, "string");
+MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
+module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(disable, "string");
+MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
 
 /* notification function for packets from net device */
 static struct notifier_block fcoe_notifier = {
@@ -544,6 +552,23 @@ static void fcoe_queue_timer(ulong lport)
 	fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
 }
 
+/**
+ * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
+ * @netdev: the associated net device
+ * @wwn: the output WWN
+ * @type: the type of WWN (WWPN or WWNN)
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
+{
+	const struct net_device_ops *ops = netdev->netdev_ops;
+
+	if (ops->ndo_fcoe_get_wwn)
+		return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
+	return -EINVAL;
+}
+
 /**
  * fcoe_netdev_config() - Set up net devive for SW FCoE
  * @lport:  The local port that is associated with the net device
@@ -611,9 +636,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
 		 */
 		if (netdev->priv_flags & IFF_802_1Q_VLAN)
 			vid = vlan_dev_vlan_id(netdev);
-		wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
+
+		if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
+			wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
 		fc_set_wwnn(lport, wwnn);
-		wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 2, vid);
+		if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
+			wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
+						 2, vid);
 		fc_set_wwpn(lport, wwpn);
 	}
 
@@ -1837,6 +1866,104 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer)
 	return NULL;
 }
 
+/**
+ * fcoe_disable() - Disables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be disabled
+ * @kp:	    The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_disable(const char *buffer, struct kernel_param *kp)
+{
+	struct fcoe_interface *fcoe;
+	struct net_device *netdev;
+	int rc = 0;
+
+	mutex_lock(&fcoe_config_mutex);
+#ifdef CONFIG_FCOE_MODULE
+	/*
+	 * Make sure the module has been initialized, and is not about to be
+	 * removed.  Module paramter sysfs files are writable before the
+	 * module_init function is called and after module_exit.
+	 */
+	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+		rc = -ENODEV;
+		goto out_nodev;
+	}
+#endif
+
+	netdev = fcoe_if_to_netdev(buffer);
+	if (!netdev) {
+		rc = -ENODEV;
+		goto out_nodev;
+	}
+
+	rtnl_lock();
+	fcoe = fcoe_hostlist_lookup_port(netdev);
+	rtnl_unlock();
+
+	if (fcoe)
+		fc_fabric_logoff(fcoe->ctlr.lp);
+	else
+		rc = -ENODEV;
+
+	dev_put(netdev);
+out_nodev:
+	mutex_unlock(&fcoe_config_mutex);
+	return rc;
+}
+
+/**
+ * fcoe_enable() - Enables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be enabled
+ * @kp:     The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_enable(const char *buffer, struct kernel_param *kp)
+{
+	struct fcoe_interface *fcoe;
+	struct net_device *netdev;
+	int rc = 0;
+
+	mutex_lock(&fcoe_config_mutex);
+#ifdef CONFIG_FCOE_MODULE
+	/*
+	 * Make sure the module has been initialized, and is not about to be
+	 * removed.  Module paramter sysfs files are writable before the
+	 * module_init function is called and after module_exit.
+	 */
+	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+		rc = -ENODEV;
+		goto out_nodev;
+	}
+#endif
+
+	netdev = fcoe_if_to_netdev(buffer);
+	if (!netdev) {
+		rc = -ENODEV;
+		goto out_nodev;
+	}
+
+	rtnl_lock();
+	fcoe = fcoe_hostlist_lookup_port(netdev);
+	rtnl_unlock();
+
+	if (fcoe)
+		rc = fc_fabric_login(fcoe->ctlr.lp);
+	else
+		rc = -ENODEV;
+
+	dev_put(netdev);
+out_nodev:
+	mutex_unlock(&fcoe_config_mutex);
+	return rc;
+}
+
 /**
  * fcoe_destroy() - Destroy a FCoE interface
  * @buffer: The name of the Ethernet interface to be destroyed

+ 3531 - 0
drivers/scsi/hpsa.c

@@ -0,0 +1,3531 @@
+/*
+ *    Disk Array driver for HP Smart Array SAS controllers
+ *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; version 2 of the License.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/timer.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/compat.h>
+#include <linux/blktrace_api.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <linux/cciss_ioctl.h>
+#include <linux/string.h>
+#include <linux/bitmap.h>
+#include <asm/atomic.h>
+#include <linux/kthread.h>
+#include "hpsa_cmd.h"
+#include "hpsa.h"
+
+/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
+#define HPSA_DRIVER_VERSION "1.0.0"
+#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
+
+/* How long to wait (in milliseconds) for board to go into simple mode */
+#define MAX_CONFIG_WAIT 30000
+#define MAX_IOCTL_CONFIG_WAIT 1000
+
+/*define how many times we will try a command because of bus resets */
+#define MAX_CMD_RETRIES 3
+
+/* Embedded module documentation macros - see modules.h */
+MODULE_AUTHOR("Hewlett-Packard Company");
+MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
+	HPSA_DRIVER_VERSION);
+MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
+MODULE_VERSION(HPSA_DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+static int hpsa_allow_any;
+module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(hpsa_allow_any,
+		"Allow hpsa driver to access unknown HP Smart Array hardware");
+
+/* define the PCI info for the cards we can control */
+static const struct pci_device_id hpsa_pci_device_id[] = {
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3223},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x3234},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSC,     0x103C, 0x323D},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324a},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324b},
+	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,             PCI_ANY_ID, PCI_ANY_ID,
+		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
+
+/*  board_id = Subsystem Device ID & Vendor ID
+ *  product = Marketing Name for the board
+ *  access = Address of the struct of function pointers
+ */
+static struct board_type products[] = {
+	{0x3223103C, "Smart Array P800", &SA5_access},
+	{0x3234103C, "Smart Array P400", &SA5_access},
+	{0x323d103c, "Smart Array P700M", &SA5_access},
+	{0x3241103C, "Smart Array P212", &SA5_access},
+	{0x3243103C, "Smart Array P410", &SA5_access},
+	{0x3245103C, "Smart Array P410i", &SA5_access},
+	{0x3247103C, "Smart Array P411", &SA5_access},
+	{0x3249103C, "Smart Array P812", &SA5_access},
+	{0x324a103C, "Smart Array P712m", &SA5_access},
+	{0x324b103C, "Smart Array P711m", &SA5_access},
+	{0xFFFF103C, "Unknown Smart Array", &SA5_access},
+};
+
+static int number_of_controllers;
+
+static irqreturn_t do_hpsa_intr(int irq, void *dev_id);
+static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
+static void start_io(struct ctlr_info *h);
+
+#ifdef CONFIG_COMPAT
+static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
+#endif
+
+static void cmd_free(struct ctlr_info *h, struct CommandList *c);
+static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
+static struct CommandList *cmd_alloc(struct ctlr_info *h);
+static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
+static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
+	void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
+	int cmd_type);
+
+static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
+		void (*done)(struct scsi_cmnd *));
+
+static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
+static int hpsa_slave_alloc(struct scsi_device *sdev);
+static void hpsa_slave_destroy(struct scsi_device *sdev);
+
+static ssize_t raid_level_show(struct device *dev,
+	struct device_attribute *attr, char *buf);
+static ssize_t lunid_show(struct device *dev,
+	struct device_attribute *attr, char *buf);
+static ssize_t unique_id_show(struct device *dev,
+	struct device_attribute *attr, char *buf);
+static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
+static ssize_t host_store_rescan(struct device *dev,
+	 struct device_attribute *attr, const char *buf, size_t count);
+static int check_for_unit_attention(struct ctlr_info *h,
+	struct CommandList *c);
+static void check_ioctl_unit_attention(struct ctlr_info *h,
+	struct CommandList *c);
+
+static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
+static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+
+static struct device_attribute *hpsa_sdev_attrs[] = {
+	&dev_attr_raid_level,
+	&dev_attr_lunid,
+	&dev_attr_unique_id,
+	NULL,
+};
+
+static struct device_attribute *hpsa_shost_attrs[] = {
+	&dev_attr_rescan,
+	NULL,
+};
+
+static struct scsi_host_template hpsa_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= "hpsa",
+	.proc_name		= "hpsa",
+	.queuecommand		= hpsa_scsi_queue_command,
+	.can_queue		= 512,
+	.this_id		= -1,
+	.sg_tablesize		= MAXSGENTRIES,
+	.cmd_per_lun		= 512,
+	.use_clustering		= ENABLE_CLUSTERING,
+	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
+	.ioctl			= hpsa_ioctl,
+	.slave_alloc		= hpsa_slave_alloc,
+	.slave_destroy		= hpsa_slave_destroy,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= hpsa_compat_ioctl,
+#endif
+	.sdev_attrs = hpsa_sdev_attrs,
+	.shost_attrs = hpsa_shost_attrs,
+};
+
+static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
+{
+	unsigned long *priv = shost_priv(sdev->host);
+	return (struct ctlr_info *) *priv;
+}
+
+static struct task_struct *hpsa_scan_thread;
+static DEFINE_MUTEX(hpsa_scan_mutex);
+static LIST_HEAD(hpsa_scan_q);
+static int hpsa_scan_func(void *data);
+
+/**
+ * add_to_scan_list() - add controller to rescan queue
+ * @h:		      Pointer to the controller.
+ *
+ * Adds the controller to the rescan queue if not already on the queue.
+ *
+ * returns 1 if added to the queue, 0 if skipped (could be on the
+ * queue already, or the controller could be initializing or shutting
+ * down).
+ **/
+static int add_to_scan_list(struct ctlr_info *h)
+{
+	struct ctlr_info *test_h;
+	int found = 0;
+	int ret = 0;
+
+	if (h->busy_initializing)
+		return 0;
+
+	/*
+	 * If we don't get the lock, it means the driver is unloading
+	 * and there's no point in scheduling a new scan.
+	 */
+	if (!mutex_trylock(&h->busy_shutting_down))
+		return 0;
+
+	mutex_lock(&hpsa_scan_mutex);
+	list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
+		if (test_h == h) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found && !h->busy_scanning) {
+		INIT_COMPLETION(h->scan_wait);
+		list_add_tail(&h->scan_list, &hpsa_scan_q);
+		ret = 1;
+	}
+	mutex_unlock(&hpsa_scan_mutex);
+	mutex_unlock(&h->busy_shutting_down);
+
+	return ret;
+}
+
+/**
+ * remove_from_scan_list() - remove controller from rescan queue
+ * @h:			   Pointer to the controller.
+ *
+ * Removes the controller from the rescan queue if present. Blocks if
+ * the controller is currently conducting a rescan.  The controller
+ * can be in one of three states:
+ * 1. Doesn't need a scan
+ * 2. On the scan list, but not scanning yet (we remove it)
+ * 3. Busy scanning (and not on the list). In this case we want to wait for
+ *    the scan to complete to make sure the scanning thread for this
+ *    controller is completely idle.
+ **/
+static void remove_from_scan_list(struct ctlr_info *h)
+{
+	struct ctlr_info *test_h, *tmp_h;
+
+	mutex_lock(&hpsa_scan_mutex);
+	list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
+		if (test_h == h) { /* state 2. */
+			list_del(&h->scan_list);
+			complete_all(&h->scan_wait);
+			mutex_unlock(&hpsa_scan_mutex);
+			return;
+		}
+	}
+	if (h->busy_scanning) { /* state 3. */
+		mutex_unlock(&hpsa_scan_mutex);
+		wait_for_completion(&h->scan_wait);
+	} else { /* state 1, nothing to do. */
+		mutex_unlock(&hpsa_scan_mutex);
+	}
+}
+
+/* hpsa_scan_func() - kernel thread used to rescan controllers
+ * @data:	 Ignored.
+ *
+ * A kernel thread used scan for drive topology changes on
+ * controllers. The thread processes only one controller at a time
+ * using a queue.  Controllers are added to the queue using
+ * add_to_scan_list() and removed from the queue either after done
+ * processing or using remove_from_scan_list().
+ *
+ * returns 0.
+ **/
+static int hpsa_scan_func(__attribute__((unused)) void *data)
+{
+	struct ctlr_info *h;
+	int host_no;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+		if (kthread_should_stop())
+			break;
+
+		while (1) {
+			mutex_lock(&hpsa_scan_mutex);
+			if (list_empty(&hpsa_scan_q)) {
+				mutex_unlock(&hpsa_scan_mutex);
+				break;
+			}
+			h = list_entry(hpsa_scan_q.next, struct ctlr_info,
+					scan_list);
+			list_del(&h->scan_list);
+			h->busy_scanning = 1;
+			mutex_unlock(&hpsa_scan_mutex);
+			host_no = h->scsi_host ?  h->scsi_host->host_no : -1;
+			hpsa_update_scsi_devices(h, host_no);
+			complete_all(&h->scan_wait);
+			mutex_lock(&hpsa_scan_mutex);
+			h->busy_scanning = 0;
+			mutex_unlock(&hpsa_scan_mutex);
+		}
+	}
+	return 0;
+}
+
+static int check_for_unit_attention(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
+		return 0;
+
+	switch (c->err_info->SenseInfo[12]) {
+	case STATE_CHANGED:
+		dev_warn(&h->pdev->dev, "hpsa%d: a state change "
+			"detected, command retried\n", h->ctlr);
+		break;
+	case LUN_FAILED:
+		dev_warn(&h->pdev->dev, "hpsa%d: LUN failure "
+			"detected, action required\n", h->ctlr);
+		break;
+	case REPORT_LUNS_CHANGED:
+		dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
+			"changed\n", h->ctlr);
+	/*
+	 * Here, we could call add_to_scan_list and wake up the scan thread,
+	 * except that it's quite likely that we will get more than one
+	 * REPORT_LUNS_CHANGED condition in quick succession, which means
+	 * that those which occur after the first one will likely happen
+	 * *during* the hpsa_scan_thread's rescan.  And the rescan code is not
+	 * robust enough to restart in the middle, undoing what it has already
+	 * done, and it's not clear that it's even possible to do this, since
+	 * part of what it does is notify the SCSI mid layer, which starts
+	 * doing it's own i/o to read partition tables and so on, and the
+	 * driver doesn't have visibility to know what might need undoing.
+	 * In any event, if possible, it is horribly complicated to get right
+	 * so we just don't do it for now.
+	 *
+	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
+	 */
+		break;
+	case POWER_OR_RESET:
+		dev_warn(&h->pdev->dev, "hpsa%d: a power on "
+			"or device reset detected\n", h->ctlr);
+		break;
+	case UNIT_ATTENTION_CLEARED:
+		dev_warn(&h->pdev->dev, "hpsa%d: unit attention "
+		    "cleared by another initiator\n", h->ctlr);
+		break;
+	default:
+		dev_warn(&h->pdev->dev, "hpsa%d: unknown "
+			"unit attention detected\n", h->ctlr);
+		break;
+	}
+	return 1;
+}
+
+static ssize_t host_store_rescan(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct ctlr_info *h;
+	struct Scsi_Host *shost = class_to_shost(dev);
+	unsigned long *priv = shost_priv(shost);
+	h = (struct ctlr_info *) *priv;
+	if (add_to_scan_list(h)) {
+		wake_up_process(hpsa_scan_thread);
+		wait_for_completion_interruptible(&h->scan_wait);
+	}
+	return count;
+}
+
+/* Enqueuing and dequeuing functions for cmdlists. */
+static inline void addQ(struct hlist_head *list, struct CommandList *c)
+{
+	hlist_add_head(&c->list, list);
+}
+
+static void enqueue_cmd_and_start_io(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&h->lock, flags);
+	addQ(&h->reqQ, c);
+	h->Qdepth++;
+	start_io(h);
+	spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static inline void removeQ(struct CommandList *c)
+{
+	if (WARN_ON(hlist_unhashed(&c->list)))
+		return;
+	hlist_del_init(&c->list);
+}
+
+static inline int is_hba_lunid(unsigned char scsi3addr[])
+{
+	return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
+}
+
+static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
+{
+	return (scsi3addr[3] & 0xC0) == 0x40;
+}
+
+static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
+	"UNKNOWN"
+};
+#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
+
+static ssize_t raid_level_show(struct device *dev,
+	     struct device_attribute *attr, char *buf)
+{
+	ssize_t l = 0;
+	int rlevel;
+	struct ctlr_info *h;
+	struct scsi_device *sdev;
+	struct hpsa_scsi_dev_t *hdev;
+	unsigned long flags;
+
+	sdev = to_scsi_device(dev);
+	h = sdev_to_hba(sdev);
+	spin_lock_irqsave(&h->lock, flags);
+	hdev = sdev->hostdata;
+	if (!hdev) {
+		spin_unlock_irqrestore(&h->lock, flags);
+		return -ENODEV;
+	}
+
+	/* Is this even a logical drive? */
+	if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
+		spin_unlock_irqrestore(&h->lock, flags);
+		l = snprintf(buf, PAGE_SIZE, "N/A\n");
+		return l;
+	}
+
+	rlevel = hdev->raid_level;
+	spin_unlock_irqrestore(&h->lock, flags);
+	if (rlevel < 0 || rlevel > RAID_UNKNOWN)
+		rlevel = RAID_UNKNOWN;
+	l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
+	return l;
+}
+
+static ssize_t lunid_show(struct device *dev,
+	     struct device_attribute *attr, char *buf)
+{
+	struct ctlr_info *h;
+	struct scsi_device *sdev;
+	struct hpsa_scsi_dev_t *hdev;
+	unsigned long flags;
+	unsigned char lunid[8];
+
+	sdev = to_scsi_device(dev);
+	h = sdev_to_hba(sdev);
+	spin_lock_irqsave(&h->lock, flags);
+	hdev = sdev->hostdata;
+	if (!hdev) {
+		spin_unlock_irqrestore(&h->lock, flags);
+		return -ENODEV;
+	}
+	memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
+	spin_unlock_irqrestore(&h->lock, flags);
+	return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+		lunid[0], lunid[1], lunid[2], lunid[3],
+		lunid[4], lunid[5], lunid[6], lunid[7]);
+}
+
+static ssize_t unique_id_show(struct device *dev,
+	     struct device_attribute *attr, char *buf)
+{
+	struct ctlr_info *h;
+	struct scsi_device *sdev;
+	struct hpsa_scsi_dev_t *hdev;
+	unsigned long flags;
+	unsigned char sn[16];
+
+	sdev = to_scsi_device(dev);
+	h = sdev_to_hba(sdev);
+	spin_lock_irqsave(&h->lock, flags);
+	hdev = sdev->hostdata;
+	if (!hdev) {
+		spin_unlock_irqrestore(&h->lock, flags);
+		return -ENODEV;
+	}
+	memcpy(sn, hdev->device_id, sizeof(sn));
+	spin_unlock_irqrestore(&h->lock, flags);
+	return snprintf(buf, 16 * 2 + 2,
+			"%02X%02X%02X%02X%02X%02X%02X%02X"
+			"%02X%02X%02X%02X%02X%02X%02X%02X\n",
+			sn[0], sn[1], sn[2], sn[3],
+			sn[4], sn[5], sn[6], sn[7],
+			sn[8], sn[9], sn[10], sn[11],
+			sn[12], sn[13], sn[14], sn[15]);
+}
+
+static int hpsa_find_target_lun(struct ctlr_info *h,
+	unsigned char scsi3addr[], int bus, int *target, int *lun)
+{
+	/* finds an unused bus, target, lun for a new physical device
+	 * assumes h->devlock is held
+	 */
+	int i, found = 0;
+	DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA);
+
+	memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3);
+
+	for (i = 0; i < h->ndevices; i++) {
+		if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
+			set_bit(h->dev[i]->target, lun_taken);
+	}
+
+	for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) {
+		if (!test_bit(i, lun_taken)) {
+			/* *bus = 1; */
+			*target = i;
+			*lun = 0;
+			found = 1;
+			break;
+		}
+	}
+	return !found;
+}
+
+/* Add an entry into h->dev[] array. */
+static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
+		struct hpsa_scsi_dev_t *device,
+		struct hpsa_scsi_dev_t *added[], int *nadded)
+{
+	/* assumes h->devlock is held */
+	int n = h->ndevices;
+	int i;
+	unsigned char addr1[8], addr2[8];
+	struct hpsa_scsi_dev_t *sd;
+
+	if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) {
+		dev_err(&h->pdev->dev, "too many devices, some will be "
+			"inaccessible.\n");
+		return -1;
+	}
+
+	/* physical devices do not have lun or target assigned until now. */
+	if (device->lun != -1)
+		/* Logical device, lun is already assigned. */
+		goto lun_assigned;
+
+	/* If this device a non-zero lun of a multi-lun device
+	 * byte 4 of the 8-byte LUN addr will contain the logical
+	 * unit no, zero otherise.
+	 */
+	if (device->scsi3addr[4] == 0) {
+		/* This is not a non-zero lun of a multi-lun device */
+		if (hpsa_find_target_lun(h, device->scsi3addr,
+			device->bus, &device->target, &device->lun) != 0)
+			return -1;
+		goto lun_assigned;
+	}
+
+	/* This is a non-zero lun of a multi-lun device.
+	 * Search through our list and find the device which
+	 * has the same 8 byte LUN address, excepting byte 4.
+	 * Assign the same bus and target for this new LUN.
+	 * Use the logical unit number from the firmware.
+	 */
+	memcpy(addr1, device->scsi3addr, 8);
+	addr1[4] = 0;
+	for (i = 0; i < n; i++) {
+		sd = h->dev[i];
+		memcpy(addr2, sd->scsi3addr, 8);
+		addr2[4] = 0;
+		/* differ only in byte 4? */
+		if (memcmp(addr1, addr2, 8) == 0) {
+			device->bus = sd->bus;
+			device->target = sd->target;
+			device->lun = device->scsi3addr[4];
+			break;
+		}
+	}
+	if (device->lun == -1) {
+		dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
+			" suspect firmware bug or unsupported hardware "
+			"configuration.\n");
+			return -1;
+	}
+
+lun_assigned:
+
+	h->dev[n] = device;
+	h->ndevices++;
+	added[*nadded] = device;
+	(*nadded)++;
+
+	/* initially, (before registering with scsi layer) we don't
+	 * know our hostno and we don't want to print anything first
+	 * time anyway (the scsi layer's inquiries will show that info)
+	 */
+	/* if (hostno != -1) */
+		dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
+			scsi_device_type(device->devtype), hostno,
+			device->bus, device->target, device->lun);
+	return 0;
+}
+
+/* Remove an entry from h->dev[] array. */
+static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
+	struct hpsa_scsi_dev_t *removed[], int *nremoved)
+{
+	/* assumes h->devlock is held */
+	int i;
+	struct hpsa_scsi_dev_t *sd;
+
+	if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA)
+		BUG();
+
+	sd = h->dev[entry];
+	removed[*nremoved] = h->dev[entry];
+	(*nremoved)++;
+
+	for (i = entry; i < h->ndevices-1; i++)
+		h->dev[i] = h->dev[i+1];
+	h->ndevices--;
+	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
+		scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
+		sd->lun);
+}
+
+#define SCSI3ADDR_EQ(a, b) ( \
+	(a)[7] == (b)[7] && \
+	(a)[6] == (b)[6] && \
+	(a)[5] == (b)[5] && \
+	(a)[4] == (b)[4] && \
+	(a)[3] == (b)[3] && \
+	(a)[2] == (b)[2] && \
+	(a)[1] == (b)[1] && \
+	(a)[0] == (b)[0])
+
+static void fixup_botched_add(struct ctlr_info *h,
+	struct hpsa_scsi_dev_t *added)
+{
+	/* called when scsi_add_device fails in order to re-adjust
+	 * h->dev[] to match the mid layer's view.
+	 */
+	unsigned long flags;
+	int i, j;
+
+	spin_lock_irqsave(&h->lock, flags);
+	for (i = 0; i < h->ndevices; i++) {
+		if (h->dev[i] == added) {
+			for (j = i; j < h->ndevices-1; j++)
+				h->dev[j] = h->dev[j+1];
+			h->ndevices--;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&h->lock, flags);
+	kfree(added);
+}
+
+static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
+	struct hpsa_scsi_dev_t *dev2)
+{
+	if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
+		(dev1->lun != -1 && dev2->lun != -1)) &&
+		dev1->devtype != 0x0C)
+		return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
+
+	/* we compare everything except lun and target as these
+	 * are not yet assigned.  Compare parts likely
+	 * to differ first
+	 */
+	if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
+		sizeof(dev1->scsi3addr)) != 0)
+		return 0;
+	if (memcmp(dev1->device_id, dev2->device_id,
+		sizeof(dev1->device_id)) != 0)
+		return 0;
+	if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
+		return 0;
+	if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
+		return 0;
+	if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
+		return 0;
+	if (dev1->devtype != dev2->devtype)
+		return 0;
+	if (dev1->raid_level != dev2->raid_level)
+		return 0;
+	if (dev1->bus != dev2->bus)
+		return 0;
+	return 1;
+}
+
+/* Find needle in haystack.  If exact match found, return DEVICE_SAME,
+ * and return needle location in *index.  If scsi3addr matches, but not
+ * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
+ * location in *index.  If needle not found, return DEVICE_NOT_FOUND.
+ */
+static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
+	struct hpsa_scsi_dev_t *haystack[], int haystack_size,
+	int *index)
+{
+	int i;
+#define DEVICE_NOT_FOUND 0
+#define DEVICE_CHANGED 1
+#define DEVICE_SAME 2
+	for (i = 0; i < haystack_size; i++) {
+		if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
+			*index = i;
+			if (device_is_the_same(needle, haystack[i]))
+				return DEVICE_SAME;
+			else
+				return DEVICE_CHANGED;
+		}
+	}
+	*index = -1;
+	return DEVICE_NOT_FOUND;
+}
+
+static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
+	struct hpsa_scsi_dev_t *sd[], int nsds)
+{
+	/* sd contains scsi3 addresses and devtypes, and inquiry
+	 * data.  This function takes what's in sd to be the current
+	 * reality and updates h->dev[] to reflect that reality.
+	 */
+	int i, entry, device_change, changes = 0;
+	struct hpsa_scsi_dev_t *csd;
+	unsigned long flags;
+	struct hpsa_scsi_dev_t **added, **removed;
+	int nadded, nremoved;
+	struct Scsi_Host *sh = NULL;
+
+	added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA,
+		GFP_KERNEL);
+	removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA,
+		GFP_KERNEL);
+
+	if (!added || !removed) {
+		dev_warn(&h->pdev->dev, "out of memory in "
+			"adjust_hpsa_scsi_table\n");
+		goto free_and_out;
+	}
+
+	spin_lock_irqsave(&h->devlock, flags);
+
+	/* find any devices in h->dev[] that are not in
+	 * sd[] and remove them from h->dev[], and for any
+	 * devices which have changed, remove the old device
+	 * info and add the new device info.
+	 */
+	i = 0;
+	nremoved = 0;
+	nadded = 0;
+	while (i < h->ndevices) {
+		csd = h->dev[i];
+		device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
+		if (device_change == DEVICE_NOT_FOUND) {
+			changes++;
+			hpsa_scsi_remove_entry(h, hostno, i,
+				removed, &nremoved);
+			continue; /* remove ^^^, hence i not incremented */
+		} else if (device_change == DEVICE_CHANGED) {
+			changes++;
+			hpsa_scsi_remove_entry(h, hostno, i,
+				removed, &nremoved);
+			(void) hpsa_scsi_add_entry(h, hostno, sd[entry],
+				added, &nadded);
+			/* add can't fail, we just removed one. */
+			sd[entry] = NULL; /* prevent it from being freed */
+		}
+		i++;
+	}
+
+	/* Now, make sure every device listed in sd[] is also
+	 * listed in h->dev[], adding them if they aren't found
+	 */
+
+	for (i = 0; i < nsds; i++) {
+		if (!sd[i]) /* if already added above. */
+			continue;
+		device_change = hpsa_scsi_find_entry(sd[i], h->dev,
+					h->ndevices, &entry);
+		if (device_change == DEVICE_NOT_FOUND) {
+			changes++;
+			if (hpsa_scsi_add_entry(h, hostno, sd[i],
+				added, &nadded) != 0)
+				break;
+			sd[i] = NULL; /* prevent from being freed later. */
+		} else if (device_change == DEVICE_CHANGED) {
+			/* should never happen... */
+			changes++;
+			dev_warn(&h->pdev->dev,
+				"device unexpectedly changed.\n");
+			/* but if it does happen, we just ignore that device */
+		}
+	}
+	spin_unlock_irqrestore(&h->devlock, flags);
+
+	/* Don't notify scsi mid layer of any changes the first time through
+	 * (or if there are no changes) scsi_scan_host will do it later the
+	 * first time through.
+	 */
+	if (hostno == -1 || !changes)
+		goto free_and_out;
+
+	sh = h->scsi_host;
+	/* Notify scsi mid layer of any removed devices */
+	for (i = 0; i < nremoved; i++) {
+		struct scsi_device *sdev =
+			scsi_device_lookup(sh, removed[i]->bus,
+				removed[i]->target, removed[i]->lun);
+		if (sdev != NULL) {
+			scsi_remove_device(sdev);
+			scsi_device_put(sdev);
+		} else {
+			/* We don't expect to get here.
+			 * future cmds to this device will get selection
+			 * timeout as if the device was gone.
+			 */
+			dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
+				" for removal.", hostno, removed[i]->bus,
+				removed[i]->target, removed[i]->lun);
+		}
+		kfree(removed[i]);
+		removed[i] = NULL;
+	}
+
+	/* Notify scsi mid layer of any added devices */
+	for (i = 0; i < nadded; i++) {
+		if (scsi_add_device(sh, added[i]->bus,
+			added[i]->target, added[i]->lun) == 0)
+			continue;
+		dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
+			"device not added.\n", hostno, added[i]->bus,
+			added[i]->target, added[i]->lun);
+		/* now we have to remove it from h->dev,
+		 * since it didn't get added to scsi mid layer
+		 */
+		fixup_botched_add(h, added[i]);
+	}
+
+free_and_out:
+	kfree(added);
+	kfree(removed);
+	return 0;
+}
+
+/*
+ * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
+ * Assume's h->devlock is held.
+ */
+static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
+	int bus, int target, int lun)
+{
+	int i;
+	struct hpsa_scsi_dev_t *sd;
+
+	for (i = 0; i < h->ndevices; i++) {
+		sd = h->dev[i];
+		if (sd->bus == bus && sd->target == target && sd->lun == lun)
+			return sd;
+	}
+	return NULL;
+}
+
+/* link sdev->hostdata to our per-device structure. */
+static int hpsa_slave_alloc(struct scsi_device *sdev)
+{
+	struct hpsa_scsi_dev_t *sd;
+	unsigned long flags;
+	struct ctlr_info *h;
+
+	h = sdev_to_hba(sdev);
+	spin_lock_irqsave(&h->devlock, flags);
+	sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
+		sdev_id(sdev), sdev->lun);
+	if (sd != NULL)
+		sdev->hostdata = sd;
+	spin_unlock_irqrestore(&h->devlock, flags);
+	return 0;
+}
+
+static void hpsa_slave_destroy(struct scsi_device *sdev)
+{
+	return; /* nothing to do. */
+}
+
+static void hpsa_scsi_setup(struct ctlr_info *h)
+{
+	h->ndevices = 0;
+	h->scsi_host = NULL;
+	spin_lock_init(&h->devlock);
+	return;
+}
+
+static void complete_scsi_command(struct CommandList *cp,
+	int timeout, __u32 tag)
+{
+	struct scsi_cmnd *cmd;
+	struct ctlr_info *h;
+	struct ErrorInfo *ei;
+
+	unsigned char sense_key;
+	unsigned char asc;      /* additional sense code */
+	unsigned char ascq;     /* additional sense code qualifier */
+
+	ei = cp->err_info;
+	cmd = (struct scsi_cmnd *) cp->scsi_cmd;
+	h = cp->h;
+
+	scsi_dma_unmap(cmd); /* undo the DMA mappings */
+
+	cmd->result = (DID_OK << 16); 		/* host byte */
+	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
+	cmd->result |= (ei->ScsiStatus << 1);
+
+	/* copy the sense data whether we need to or not. */
+	memcpy(cmd->sense_buffer, ei->SenseInfo,
+		ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
+			SCSI_SENSE_BUFFERSIZE :
+			ei->SenseLen);
+	scsi_set_resid(cmd, ei->ResidualCnt);
+
+	if (ei->CommandStatus == 0) {
+		cmd->scsi_done(cmd);
+		cmd_free(h, cp);
+		return;
+	}
+
+	/* an error has occurred */
+	switch (ei->CommandStatus) {
+
+	case CMD_TARGET_STATUS:
+		if (ei->ScsiStatus) {
+			/* Get sense key */
+			sense_key = 0xf & ei->SenseInfo[2];
+			/* Get additional sense code */
+			asc = ei->SenseInfo[12];
+			/* Get addition sense code qualifier */
+			ascq = ei->SenseInfo[13];
+		}
+
+		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
+			if (check_for_unit_attention(h, cp)) {
+				cmd->result = DID_SOFT_ERROR << 16;
+				break;
+			}
+			if (sense_key == ILLEGAL_REQUEST) {
+				/*
+				 * SCSI REPORT_LUNS is commonly unsupported on
+				 * Smart Array.  Suppress noisy complaint.
+				 */
+				if (cp->Request.CDB[0] == REPORT_LUNS)
+					break;
+
+				/* If ASC/ASCQ indicate Logical Unit
+				 * Not Supported condition,
+				 */
+				if ((asc == 0x25) && (ascq == 0x0)) {
+					dev_warn(&h->pdev->dev, "cp %p "
+						"has check condition\n", cp);
+					break;
+				}
+			}
+
+			if (sense_key == NOT_READY) {
+				/* If Sense is Not Ready, Logical Unit
+				 * Not ready, Manual Intervention
+				 * required
+				 */
+				if ((asc == 0x04) && (ascq == 0x03)) {
+					cmd->result = DID_NO_CONNECT << 16;
+					dev_warn(&h->pdev->dev, "cp %p "
+						"has check condition: unit "
+						"not ready, manual "
+						"intervention required\n", cp);
+					break;
+				}
+			}
+
+
+			/* Must be some other type of check condition */
+			dev_warn(&h->pdev->dev, "cp %p has check condition: "
+					"unknown type: "
+					"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
+					"Returning result: 0x%x, "
+					"cmd=[%02x %02x %02x %02x %02x "
+					"%02x %02x %02x %02x %02x]\n",
+					cp, sense_key, asc, ascq,
+					cmd->result,
+					cmd->cmnd[0], cmd->cmnd[1],
+					cmd->cmnd[2], cmd->cmnd[3],
+					cmd->cmnd[4], cmd->cmnd[5],
+					cmd->cmnd[6], cmd->cmnd[7],
+					cmd->cmnd[8], cmd->cmnd[9]);
+			break;
+		}
+
+
+		/* Problem was not a check condition
+		 * Pass it up to the upper layers...
+		 */
+		if (ei->ScsiStatus) {
+			dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
+				"Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
+				"Returning result: 0x%x\n",
+				cp, ei->ScsiStatus,
+				sense_key, asc, ascq,
+				cmd->result);
+		} else {  /* scsi status is zero??? How??? */
+			dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
+				"Returning no connection.\n", cp),
+
+			/* Ordinarily, this case should never happen,
+			 * but there is a bug in some released firmware
+			 * revisions that allows it to happen if, for
+			 * example, a 4100 backplane loses power and
+			 * the tape drive is in it.  We assume that
+			 * it's a fatal error of some kind because we
+			 * can't show that it wasn't. We will make it
+			 * look like selection timeout since that is
+			 * the most common reason for this to occur,
+			 * and it's severe enough.
+			 */
+
+			cmd->result = DID_NO_CONNECT << 16;
+		}
+		break;
+
+	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+		break;
+	case CMD_DATA_OVERRUN:
+		dev_warn(&h->pdev->dev, "cp %p has"
+			" completed with data overrun "
+			"reported\n", cp);
+		break;
+	case CMD_INVALID: {
+		/* print_bytes(cp, sizeof(*cp), 1, 0);
+		print_cmd(cp); */
+		/* We get CMD_INVALID if you address a non-existent device
+		 * instead of a selection timeout (no response).  You will
+		 * see this if you yank out a drive, then try to access it.
+		 * This is kind of a shame because it means that any other
+		 * CMD_INVALID (e.g. driver bug) will get interpreted as a
+		 * missing target. */
+		cmd->result = DID_NO_CONNECT << 16;
+	}
+		break;
+	case CMD_PROTOCOL_ERR:
+		dev_warn(&h->pdev->dev, "cp %p has "
+			"protocol error \n", cp);
+		break;
+	case CMD_HARDWARE_ERR:
+		cmd->result = DID_ERROR << 16;
+		dev_warn(&h->pdev->dev, "cp %p had  hardware error\n", cp);
+		break;
+	case CMD_CONNECTION_LOST:
+		cmd->result = DID_ERROR << 16;
+		dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
+		break;
+	case CMD_ABORTED:
+		cmd->result = DID_ABORT << 16;
+		dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
+				cp, ei->ScsiStatus);
+		break;
+	case CMD_ABORT_FAILED:
+		cmd->result = DID_ERROR << 16;
+		dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
+		break;
+	case CMD_UNSOLICITED_ABORT:
+		cmd->result = DID_ABORT << 16;
+		dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited "
+			"abort\n", cp);
+		break;
+	case CMD_TIMEOUT:
+		cmd->result = DID_TIME_OUT << 16;
+		dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
+		break;
+	default:
+		cmd->result = DID_ERROR << 16;
+		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
+				cp, ei->CommandStatus);
+	}
+	cmd->scsi_done(cmd);
+	cmd_free(h, cp);
+}
+
+static int hpsa_scsi_detect(struct ctlr_info *h)
+{
+	struct Scsi_Host *sh;
+	int error;
+
+	sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
+	if (sh == NULL)
+		goto fail;
+
+	sh->io_port = 0;
+	sh->n_io_port = 0;
+	sh->this_id = -1;
+	sh->max_channel = 3;
+	sh->max_cmd_len = MAX_COMMAND_SIZE;
+	sh->max_lun = HPSA_MAX_LUN;
+	sh->max_id = HPSA_MAX_LUN;
+	h->scsi_host = sh;
+	sh->hostdata[0] = (unsigned long) h;
+	sh->irq = h->intr[SIMPLE_MODE_INT];
+	sh->unique_id = sh->irq;
+	error = scsi_add_host(sh, &h->pdev->dev);
+	if (error)
+		goto fail_host_put;
+	scsi_scan_host(sh);
+	return 0;
+
+ fail_host_put:
+	dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
+		" failed for controller %d\n", h->ctlr);
+	scsi_host_put(sh);
+	return -1;
+ fail:
+	dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
+		" failed for controller %d\n", h->ctlr);
+	return -1;
+}
+
+static void hpsa_pci_unmap(struct pci_dev *pdev,
+	struct CommandList *c, int sg_used, int data_direction)
+{
+	int i;
+	union u64bit addr64;
+
+	for (i = 0; i < sg_used; i++) {
+		addr64.val32.lower = c->SG[i].Addr.lower;
+		addr64.val32.upper = c->SG[i].Addr.upper;
+		pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
+			data_direction);
+	}
+}
+
+static void hpsa_map_one(struct pci_dev *pdev,
+		struct CommandList *cp,
+		unsigned char *buf,
+		size_t buflen,
+		int data_direction)
+{
+	__u64 addr64;
+
+	if (buflen == 0 || data_direction == PCI_DMA_NONE) {
+		cp->Header.SGList = 0;
+		cp->Header.SGTotal = 0;
+		return;
+	}
+
+	addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
+	cp->SG[0].Addr.lower =
+	  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+	cp->SG[0].Addr.upper =
+	  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+	cp->SG[0].Len = buflen;
+	cp->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
+	cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
+}
+
+static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	DECLARE_COMPLETION_ONSTACK(wait);
+
+	c->waiting = &wait;
+	enqueue_cmd_and_start_io(h, c);
+	wait_for_completion(&wait);
+}
+
+static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
+	struct CommandList *c, int data_direction)
+{
+	int retry_count = 0;
+
+	do {
+		memset(c->err_info, 0, sizeof(c->err_info));
+		hpsa_scsi_do_simple_cmd_core(h, c);
+		retry_count++;
+	} while (check_for_unit_attention(h, c) && retry_count <= 3);
+	hpsa_pci_unmap(h->pdev, c, 1, data_direction);
+}
+
+static void hpsa_scsi_interpret_error(struct CommandList *cp)
+{
+	struct ErrorInfo *ei;
+	struct device *d = &cp->h->pdev->dev;
+
+	ei = cp->err_info;
+	switch (ei->CommandStatus) {
+	case CMD_TARGET_STATUS:
+		dev_warn(d, "cmd %p has completed with errors\n", cp);
+		dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
+				ei->ScsiStatus);
+		if (ei->ScsiStatus == 0)
+			dev_warn(d, "SCSI status is abnormally zero.  "
+			"(probably indicates selection timeout "
+			"reported incorrectly due to a known "
+			"firmware bug, circa July, 2001.)\n");
+		break;
+	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+			dev_info(d, "UNDERRUN\n");
+		break;
+	case CMD_DATA_OVERRUN:
+		dev_warn(d, "cp %p has completed with data overrun\n", cp);
+		break;
+	case CMD_INVALID: {
+		/* controller unfortunately reports SCSI passthru's
+		 * to non-existent targets as invalid commands.
+		 */
+		dev_warn(d, "cp %p is reported invalid (probably means "
+			"target device no longer present)\n", cp);
+		/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
+		print_cmd(cp);  */
+		}
+		break;
+	case CMD_PROTOCOL_ERR:
+		dev_warn(d, "cp %p has protocol error \n", cp);
+		break;
+	case CMD_HARDWARE_ERR:
+		/* cmd->result = DID_ERROR << 16; */
+		dev_warn(d, "cp %p had hardware error\n", cp);
+		break;
+	case CMD_CONNECTION_LOST:
+		dev_warn(d, "cp %p had connection lost\n", cp);
+		break;
+	case CMD_ABORTED:
+		dev_warn(d, "cp %p was aborted\n", cp);
+		break;
+	case CMD_ABORT_FAILED:
+		dev_warn(d, "cp %p reports abort failed\n", cp);
+		break;
+	case CMD_UNSOLICITED_ABORT:
+		dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
+		break;
+	case CMD_TIMEOUT:
+		dev_warn(d, "cp %p timed out\n", cp);
+		break;
+	default:
+		dev_warn(d, "cp %p returned unknown status %x\n", cp,
+				ei->CommandStatus);
+	}
+}
+
+static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
+			unsigned char page, unsigned char *buf,
+			unsigned char bufsize)
+{
+	int rc = IO_OK;
+	struct CommandList *c;
+	struct ErrorInfo *ei;
+
+	c = cmd_special_alloc(h);
+
+	if (c == NULL) {			/* trouble... */
+		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+		return -1;
+	}
+
+	fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
+	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+	ei = c->err_info;
+	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
+		hpsa_scsi_interpret_error(c);
+		rc = -1;
+	}
+	cmd_special_free(h, c);
+	return rc;
+}
+
+static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
+{
+	int rc = IO_OK;
+	struct CommandList *c;
+	struct ErrorInfo *ei;
+
+	c = cmd_special_alloc(h);
+
+	if (c == NULL) {			/* trouble... */
+		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+		return -1;
+	}
+
+	fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
+	hpsa_scsi_do_simple_cmd_core(h, c);
+	/* no unmap needed here because no data xfer. */
+
+	ei = c->err_info;
+	if (ei->CommandStatus != 0) {
+		hpsa_scsi_interpret_error(c);
+		rc = -1;
+	}
+	cmd_special_free(h, c);
+	return rc;
+}
+
+static void hpsa_get_raid_level(struct ctlr_info *h,
+	unsigned char *scsi3addr, unsigned char *raid_level)
+{
+	int rc;
+	unsigned char *buf;
+
+	*raid_level = RAID_UNKNOWN;
+	buf = kzalloc(64, GFP_KERNEL);
+	if (!buf)
+		return;
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
+	if (rc == 0)
+		*raid_level = buf[8];
+	if (*raid_level > RAID_UNKNOWN)
+		*raid_level = RAID_UNKNOWN;
+	kfree(buf);
+	return;
+}
+
+/* Get the device id from inquiry page 0x83 */
+static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
+	unsigned char *device_id, int buflen)
+{
+	int rc;
+	unsigned char *buf;
+
+	if (buflen > 16)
+		buflen = 16;
+	buf = kzalloc(64, GFP_KERNEL);
+	if (!buf)
+		return -1;
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
+	if (rc == 0)
+		memcpy(device_id, &buf[8], buflen);
+	kfree(buf);
+	return rc != 0;
+}
+
+static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
+		struct ReportLUNdata *buf, int bufsize,
+		int extended_response)
+{
+	int rc = IO_OK;
+	struct CommandList *c;
+	unsigned char scsi3addr[8];
+	struct ErrorInfo *ei;
+
+	c = cmd_special_alloc(h);
+	if (c == NULL) {			/* trouble... */
+		dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+		return -1;
+	}
+
+	memset(&scsi3addr[0], 0, 8); /* address the controller */
+
+	fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
+		buf, bufsize, 0, scsi3addr, TYPE_CMD);
+	if (extended_response)
+		c->Request.CDB[1] = extended_response;
+	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
+	ei = c->err_info;
+	if (ei->CommandStatus != 0 &&
+	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
+		hpsa_scsi_interpret_error(c);
+		rc = -1;
+	}
+	cmd_special_free(h, c);
+	return rc;
+}
+
+static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
+		struct ReportLUNdata *buf,
+		int bufsize, int extended_response)
+{
+	return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
+}
+
+static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
+		struct ReportLUNdata *buf, int bufsize)
+{
+	return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
+}
+
+static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
+	int bus, int target, int lun)
+{
+	device->bus = bus;
+	device->target = target;
+	device->lun = lun;
+}
+
+static int hpsa_update_device_info(struct ctlr_info *h,
+	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device)
+{
+#define OBDR_TAPE_INQ_SIZE 49
+	unsigned char *inq_buff = NULL;
+
+	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
+	if (!inq_buff)
+		goto bail_out;
+
+	memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
+	/* Do an inquiry to the device to see what it is. */
+	if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
+		(unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
+		/* Inquiry failed (msg printed already) */
+		dev_err(&h->pdev->dev,
+			"hpsa_update_device_info: inquiry failed\n");
+		goto bail_out;
+	}
+
+	/* As a side effect, record the firmware version number
+	 * if we happen to be talking to the RAID controller.
+	 */
+	if (is_hba_lunid(scsi3addr))
+		memcpy(h->firm_ver, &inq_buff[32], 4);
+
+	this_device->devtype = (inq_buff[0] & 0x1f);
+	memcpy(this_device->scsi3addr, scsi3addr, 8);
+	memcpy(this_device->vendor, &inq_buff[8],
+		sizeof(this_device->vendor));
+	memcpy(this_device->model, &inq_buff[16],
+		sizeof(this_device->model));
+	memcpy(this_device->revision, &inq_buff[32],
+		sizeof(this_device->revision));
+	memset(this_device->device_id, 0,
+		sizeof(this_device->device_id));
+	hpsa_get_device_id(h, scsi3addr, this_device->device_id,
+		sizeof(this_device->device_id));
+
+	if (this_device->devtype == TYPE_DISK &&
+		is_logical_dev_addr_mode(scsi3addr))
+		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
+	else
+		this_device->raid_level = RAID_UNKNOWN;
+
+	kfree(inq_buff);
+	return 0;
+
+bail_out:
+	kfree(inq_buff);
+	return 1;
+}
+
+static unsigned char *msa2xxx_model[] = {
+	"MSA2012",
+	"MSA2024",
+	"MSA2312",
+	"MSA2324",
+	NULL,
+};
+
+static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
+{
+	int i;
+
+	for (i = 0; msa2xxx_model[i]; i++)
+		if (strncmp(device->model, msa2xxx_model[i],
+			strlen(msa2xxx_model[i])) == 0)
+			return 1;
+	return 0;
+}
+
+/* Helper function to assign bus, target, lun mapping of devices.
+ * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical
+ * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
+ * Logical drive target and lun are assigned at this time, but
+ * physical device lun and target assignment are deferred (assigned
+ * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
+ */
+static void figure_bus_target_lun(struct ctlr_info *h,
+	__u8 *lunaddrbytes, int *bus, int *target, int *lun,
+	struct hpsa_scsi_dev_t *device)
+{
+
+	__u32 lunid;
+
+	if (is_logical_dev_addr_mode(lunaddrbytes)) {
+		/* logical device */
+		memcpy(&lunid, lunaddrbytes, sizeof(lunid));
+		lunid = le32_to_cpu(lunid);
+
+		if (is_msa2xxx(h, device)) {
+			*bus = 1;
+			*target = (lunid >> 16) & 0x3fff;
+			*lun = lunid & 0x00ff;
+		} else {
+			*bus = 0;
+			*lun = 0;
+			*target = lunid & 0x3fff;
+		}
+	} else {
+		/* physical device */
+		if (is_hba_lunid(lunaddrbytes))
+			*bus = 3;
+		else
+			*bus = 2;
+		*target = -1;
+		*lun = -1; /* we will fill these in later. */
+	}
+}
+
+/*
+ * If there is no lun 0 on a target, linux won't find any devices.
+ * For the MSA2xxx boxes, we have to manually detect the enclosure
+ * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
+ * it for some reason.  *tmpdevice is the target we're adding,
+ * this_device is a pointer into the current element of currentsd[]
+ * that we're building up in update_scsi_devices(), below.
+ * lunzerobits is a bitmap that tracks which targets already have a
+ * lun 0 assigned.
+ * Returns 1 if an enclosure was added, 0 if not.
+ */
+static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
+	struct hpsa_scsi_dev_t *tmpdevice,
+	struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes,
+	int bus, int target, int lun, unsigned long lunzerobits[],
+	int *nmsa2xxx_enclosures)
+{
+	unsigned char scsi3addr[8];
+
+	if (test_bit(target, lunzerobits))
+		return 0; /* There is already a lun 0 on this target. */
+
+	if (!is_logical_dev_addr_mode(lunaddrbytes))
+		return 0; /* It's the logical targets that may lack lun 0. */
+
+	if (!is_msa2xxx(h, tmpdevice))
+		return 0; /* It's only the MSA2xxx that have this problem. */
+
+	if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
+		return 0;
+
+	if (is_hba_lunid(scsi3addr))
+		return 0; /* Don't add the RAID controller here. */
+
+#define MAX_MSA2XXX_ENCLOSURES 32
+	if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) {
+		dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX "
+			"enclosures exceeded.  Check your hardware "
+			"configuration.");
+		return 0;
+	}
+
+	memset(scsi3addr, 0, 8);
+	scsi3addr[3] = target;
+	if (hpsa_update_device_info(h, scsi3addr, this_device))
+		return 0;
+	(*nmsa2xxx_enclosures)++;
+	hpsa_set_bus_target_lun(this_device, bus, target, 0);
+	set_bit(target, lunzerobits);
+	return 1;
+}
+
+/*
+ * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
+ * logdev.  The number of luns in physdev and logdev are returned in
+ * *nphysicals and *nlogicals, respectively.
+ * Returns 0 on success, -1 otherwise.
+ */
+static int hpsa_gather_lun_info(struct ctlr_info *h,
+	int reportlunsize,
+	struct ReportLUNdata *physdev, __u32 *nphysicals,
+	struct ReportLUNdata *logdev, __u32 *nlogicals)
+{
+	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
+		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
+		return -1;
+	}
+	memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals));
+	*nphysicals = be32_to_cpu(*nphysicals) / 8;
+#ifdef DEBUG
+	dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals);
+#endif
+	if (*nphysicals > HPSA_MAX_PHYS_LUN) {
+		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
+			"  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
+			*nphysicals - HPSA_MAX_PHYS_LUN);
+		*nphysicals = HPSA_MAX_PHYS_LUN;
+	}
+	if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
+		dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
+		return -1;
+	}
+	memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals));
+	*nlogicals = be32_to_cpu(*nlogicals) / 8;
+#ifdef DEBUG
+	dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals);
+#endif
+	/* Reject Logicals in excess of our max capability. */
+	if (*nlogicals > HPSA_MAX_LUN) {
+		dev_warn(&h->pdev->dev,
+			"maximum logical LUNs (%d) exceeded.  "
+			"%d LUNs ignored.\n", HPSA_MAX_LUN,
+			*nlogicals - HPSA_MAX_LUN);
+			*nlogicals = HPSA_MAX_LUN;
+	}
+	if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
+		dev_warn(&h->pdev->dev,
+			"maximum logical + physical LUNs (%d) exceeded. "
+			"%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
+			*nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
+		*nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
+	}
+	return 0;
+}
+
+static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
+{
+	/* the idea here is we could get notified
+	 * that some devices have changed, so we do a report
+	 * physical luns and report logical luns cmd, and adjust
+	 * our list of devices accordingly.
+	 *
+	 * The scsi3addr's of devices won't change so long as the
+	 * adapter is not reset.  That means we can rescan and
+	 * tell which devices we already know about, vs. new
+	 * devices, vs.  disappearing devices.
+	 */
+	struct ReportLUNdata *physdev_list = NULL;
+	struct ReportLUNdata *logdev_list = NULL;
+	unsigned char *inq_buff = NULL;
+	__u32 nphysicals = 0;
+	__u32 nlogicals = 0;
+	__u32 ndev_allocated = 0;
+	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
+	int ncurrent = 0;
+	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
+	int i, nmsa2xxx_enclosures, ndevs_to_allocate;
+	int bus, target, lun;
+	DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR);
+
+	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA,
+		GFP_KERNEL);
+	physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
+	logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
+	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
+	tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
+
+	if (!currentsd || !physdev_list || !logdev_list ||
+		!inq_buff || !tmpdevice) {
+		dev_err(&h->pdev->dev, "out of memory\n");
+		goto out;
+	}
+	memset(lunzerobits, 0, sizeof(lunzerobits));
+
+	if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
+			logdev_list, &nlogicals))
+		goto out;
+
+	/* We might see up to 32 MSA2xxx enclosures, actually 8 of them
+	 * but each of them 4 times through different paths.  The plus 1
+	 * is for the RAID controller.
+	 */
+	ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1;
+
+	/* Allocate the per device structures */
+	for (i = 0; i < ndevs_to_allocate; i++) {
+		currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
+		if (!currentsd[i]) {
+			dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
+				__FILE__, __LINE__);
+			goto out;
+		}
+		ndev_allocated++;
+	}
+
+	/* adjust our table of devices */
+	nmsa2xxx_enclosures = 0;
+	for (i = 0; i < nphysicals + nlogicals + 1; i++) {
+		__u8 *lunaddrbytes;
+
+		/* Figure out where the LUN ID info is coming from */
+		if (i < nphysicals)
+			lunaddrbytes = &physdev_list->LUN[i][0];
+		else
+			if (i < nphysicals + nlogicals)
+				lunaddrbytes =
+					&logdev_list->LUN[i-nphysicals][0];
+			else /* jam in the RAID controller at the end */
+				lunaddrbytes = RAID_CTLR_LUNID;
+
+		/* skip masked physical devices. */
+		if (lunaddrbytes[3] & 0xC0 && i < nphysicals)
+			continue;
+
+		/* Get device type, vendor, model, device id */
+		if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice))
+			continue; /* skip it if we can't talk to it. */
+		figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun,
+			tmpdevice);
+		this_device = currentsd[ncurrent];
+
+		/*
+		 * For the msa2xxx boxes, we have to insert a LUN 0 which
+		 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
+		 * is nonetheless an enclosure device there.  We have to
+		 * present that otherwise linux won't find anything if
+		 * there is no lun 0.
+		 */
+		if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device,
+				lunaddrbytes, bus, target, lun, lunzerobits,
+				&nmsa2xxx_enclosures)) {
+			ncurrent++;
+			this_device = currentsd[ncurrent];
+		}
+
+		*this_device = *tmpdevice;
+		hpsa_set_bus_target_lun(this_device, bus, target, lun);
+
+		switch (this_device->devtype) {
+		case TYPE_ROM: {
+			/* We don't *really* support actual CD-ROM devices,
+			 * just "One Button Disaster Recovery" tape drive
+			 * which temporarily pretends to be a CD-ROM drive.
+			 * So we check that the device is really an OBDR tape
+			 * device by checking for "$DR-10" in bytes 43-48 of
+			 * the inquiry data.
+			 */
+				char obdr_sig[7];
+#define OBDR_TAPE_SIG "$DR-10"
+				strncpy(obdr_sig, &inq_buff[43], 6);
+				obdr_sig[6] = '\0';
+				if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
+					/* Not OBDR device, ignore it. */
+					break;
+			}
+			ncurrent++;
+			break;
+		case TYPE_DISK:
+			if (i < nphysicals)
+				break;
+			ncurrent++;
+			break;
+		case TYPE_TAPE:
+		case TYPE_MEDIUM_CHANGER:
+			ncurrent++;
+			break;
+		case TYPE_RAID:
+			/* Only present the Smartarray HBA as a RAID controller.
+			 * If it's a RAID controller other than the HBA itself
+			 * (an external RAID controller, MSA500 or similar)
+			 * don't present it.
+			 */
+			if (!is_hba_lunid(lunaddrbytes))
+				break;
+			ncurrent++;
+			break;
+		default:
+			break;
+		}
+		if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA)
+			break;
+	}
+	adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
+out:
+	kfree(tmpdevice);
+	for (i = 0; i < ndev_allocated; i++)
+		kfree(currentsd[i]);
+	kfree(currentsd);
+	kfree(inq_buff);
+	kfree(physdev_list);
+	kfree(logdev_list);
+	return;
+}
+
+/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
+ * dma mapping  and fills in the scatter gather entries of the
+ * hpsa command, cp.
+ */
+static int hpsa_scatter_gather(struct pci_dev *pdev,
+		struct CommandList *cp,
+		struct scsi_cmnd *cmd)
+{
+	unsigned int len;
+	struct scatterlist *sg;
+	__u64 addr64;
+	int use_sg, i;
+
+	BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
+
+	use_sg = scsi_dma_map(cmd);
+	if (use_sg < 0)
+		return use_sg;
+
+	if (!use_sg)
+		goto sglist_finished;
+
+	scsi_for_each_sg(cmd, sg, use_sg, i) {
+		addr64 = (__u64) sg_dma_address(sg);
+		len  = sg_dma_len(sg);
+		cp->SG[i].Addr.lower =
+			(__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+		cp->SG[i].Addr.upper =
+			(__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+		cp->SG[i].Len = len;
+		cp->SG[i].Ext = 0;  /* we are not chaining */
+	}
+
+sglist_finished:
+
+	cp->Header.SGList = (__u8) use_sg;   /* no. SGs contig in this cmd */
+	cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
+	return 0;
+}
+
+
+static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
+	void (*done)(struct scsi_cmnd *))
+{
+	struct ctlr_info *h;
+	struct hpsa_scsi_dev_t *dev;
+	unsigned char scsi3addr[8];
+	struct CommandList *c;
+	unsigned long flags;
+
+	/* Get the ptr to our adapter structure out of cmd->host. */
+	h = sdev_to_hba(cmd->device);
+	dev = cmd->device->hostdata;
+	if (!dev) {
+		cmd->result = DID_NO_CONNECT << 16;
+		done(cmd);
+		return 0;
+	}
+	memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
+
+	/* Need a lock as this is being allocated from the pool */
+	spin_lock_irqsave(&h->lock, flags);
+	c = cmd_alloc(h);
+	spin_unlock_irqrestore(&h->lock, flags);
+	if (c == NULL) {			/* trouble... */
+		dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	/* Fill in the command list header */
+
+	cmd->scsi_done = done;    /* save this for use by completion code */
+
+	/* save c in case we have to abort it  */
+	cmd->host_scribble = (unsigned char *) c;
+
+	c->cmd_type = CMD_SCSI;
+	c->scsi_cmd = cmd;
+	c->Header.ReplyQueue = 0;  /* unused in simple mode */
+	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+	c->Header.Tag.lower = c->busaddr;  /* Use k. address of cmd as tag */
+
+	/* Fill in the request block... */
+
+	c->Request.Timeout = 0;
+	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
+	BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
+	c->Request.CDBLen = cmd->cmd_len;
+	memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
+	c->Request.Type.Type = TYPE_CMD;
+	c->Request.Type.Attribute = ATTR_SIMPLE;
+	switch (cmd->sc_data_direction) {
+	case DMA_TO_DEVICE:
+		c->Request.Type.Direction = XFER_WRITE;
+		break;
+	case DMA_FROM_DEVICE:
+		c->Request.Type.Direction = XFER_READ;
+		break;
+	case DMA_NONE:
+		c->Request.Type.Direction = XFER_NONE;
+		break;
+	case DMA_BIDIRECTIONAL:
+		/* This can happen if a buggy application does a scsi passthru
+		 * and sets both inlen and outlen to non-zero. ( see
+		 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
+		 */
+
+		c->Request.Type.Direction = XFER_RSVD;
+		/* This is technically wrong, and hpsa controllers should
+		 * reject it with CMD_INVALID, which is the most correct
+		 * response, but non-fibre backends appear to let it
+		 * slide by, and give the same results as if this field
+		 * were set correctly.  Either way is acceptable for
+		 * our purposes here.
+		 */
+
+		break;
+
+	default:
+		dev_err(&h->pdev->dev, "unknown data direction: %d\n",
+			cmd->sc_data_direction);
+		BUG();
+		break;
+	}
+
+	if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */
+		cmd_free(h, c);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+	enqueue_cmd_and_start_io(h, c);
+	/* the cmd'll come back via intr handler in complete_scsi_command()  */
+	return 0;
+}
+
+static void hpsa_unregister_scsi(struct ctlr_info *h)
+{
+	/* we are being forcibly unloaded, and may not refuse. */
+	scsi_remove_host(h->scsi_host);
+	scsi_host_put(h->scsi_host);
+	h->scsi_host = NULL;
+}
+
+static int hpsa_register_scsi(struct ctlr_info *h)
+{
+	int rc;
+
+	hpsa_update_scsi_devices(h, -1);
+	rc = hpsa_scsi_detect(h);
+	if (rc != 0)
+		dev_err(&h->pdev->dev, "hpsa_register_scsi: failed"
+			" hpsa_scsi_detect(), rc is %d\n", rc);
+	return rc;
+}
+
+static int wait_for_device_to_become_ready(struct ctlr_info *h,
+	unsigned char lunaddr[])
+{
+	int rc = 0;
+	int count = 0;
+	int waittime = 1; /* seconds */
+	struct CommandList *c;
+
+	c = cmd_special_alloc(h);
+	if (!c) {
+		dev_warn(&h->pdev->dev, "out of memory in "
+			"wait_for_device_to_become_ready.\n");
+		return IO_ERROR;
+	}
+
+	/* Send test unit ready until device ready, or give up. */
+	while (count < HPSA_TUR_RETRY_LIMIT) {
+
+		/* Wait for a bit.  do this first, because if we send
+		 * the TUR right away, the reset will just abort it.
+		 */
+		msleep(1000 * waittime);
+		count++;
+
+		/* Increase wait time with each try, up to a point. */
+		if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
+			waittime = waittime * 2;
+
+		/* Send the Test Unit Ready */
+		fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
+		hpsa_scsi_do_simple_cmd_core(h, c);
+		/* no unmap needed here because no data xfer. */
+
+		if (c->err_info->CommandStatus == CMD_SUCCESS)
+			break;
+
+		if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+			c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
+			(c->err_info->SenseInfo[2] == NO_SENSE ||
+			c->err_info->SenseInfo[2] == UNIT_ATTENTION))
+			break;
+
+		dev_warn(&h->pdev->dev, "waiting %d secs "
+			"for device to become ready.\n", waittime);
+		rc = 1; /* device not ready. */
+	}
+
+	if (rc)
+		dev_warn(&h->pdev->dev, "giving up on device.\n");
+	else
+		dev_warn(&h->pdev->dev, "device is ready.\n");
+
+	cmd_special_free(h, c);
+	return rc;
+}
+
+/* Need at least one of these error handlers to keep ../scsi/hosts.c from
+ * complaining.  Doing a host- or bus-reset can't do anything good here.
+ */
+static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
+{
+	int rc;
+	struct ctlr_info *h;
+	struct hpsa_scsi_dev_t *dev;
+
+	/* find the controller to which the command to be aborted was sent */
+	h = sdev_to_hba(scsicmd->device);
+	if (h == NULL) /* paranoia */
+		return FAILED;
+	dev_warn(&h->pdev->dev, "resetting drive\n");
+
+	dev = scsicmd->device->hostdata;
+	if (!dev) {
+		dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
+			"device lookup failed.\n");
+		return FAILED;
+	}
+	/* send a reset to the SCSI LUN which the command was sent to */
+	rc = hpsa_send_reset(h, dev->scsi3addr);
+	if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
+		return SUCCESS;
+
+	dev_warn(&h->pdev->dev, "resetting device failed.\n");
+	return FAILED;
+}
+
+/*
+ * For operations that cannot sleep, a command block is allocated at init,
+ * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
+ * which ones are free or in use.  Lock must be held when calling this.
+ * cmd_free() is the complement.
+ */
+static struct CommandList *cmd_alloc(struct ctlr_info *h)
+{
+	struct CommandList *c;
+	int i;
+	union u64bit temp64;
+	dma_addr_t cmd_dma_handle, err_dma_handle;
+
+	do {
+		i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
+		if (i == h->nr_cmds)
+			return NULL;
+	} while (test_and_set_bit
+		 (i & (BITS_PER_LONG - 1),
+		  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+	c = h->cmd_pool + i;
+	memset(c, 0, sizeof(*c));
+	cmd_dma_handle = h->cmd_pool_dhandle
+	    + i * sizeof(*c);
+	c->err_info = h->errinfo_pool + i;
+	memset(c->err_info, 0, sizeof(*c->err_info));
+	err_dma_handle = h->errinfo_pool_dhandle
+	    + i * sizeof(*c->err_info);
+	h->nr_allocs++;
+
+	c->cmdindex = i;
+
+	INIT_HLIST_NODE(&c->list);
+	c->busaddr = (__u32) cmd_dma_handle;
+	temp64.val = (__u64) err_dma_handle;
+	c->ErrDesc.Addr.lower = temp64.val32.lower;
+	c->ErrDesc.Addr.upper = temp64.val32.upper;
+	c->ErrDesc.Len = sizeof(*c->err_info);
+
+	c->h = h;
+	return c;
+}
+
+/* For operations that can wait for kmalloc to possibly sleep,
+ * this routine can be called. Lock need not be held to call
+ * cmd_special_alloc. cmd_special_free() is the complement.
+ */
+static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
+{
+	struct CommandList *c;
+	union u64bit temp64;
+	dma_addr_t cmd_dma_handle, err_dma_handle;
+
+	c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
+	if (c == NULL)
+		return NULL;
+	memset(c, 0, sizeof(*c));
+
+	c->cmdindex = -1;
+
+	c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
+		    &err_dma_handle);
+
+	if (c->err_info == NULL) {
+		pci_free_consistent(h->pdev,
+			sizeof(*c), c, cmd_dma_handle);
+		return NULL;
+	}
+	memset(c->err_info, 0, sizeof(*c->err_info));
+
+	INIT_HLIST_NODE(&c->list);
+	c->busaddr = (__u32) cmd_dma_handle;
+	temp64.val = (__u64) err_dma_handle;
+	c->ErrDesc.Addr.lower = temp64.val32.lower;
+	c->ErrDesc.Addr.upper = temp64.val32.upper;
+	c->ErrDesc.Len = sizeof(*c->err_info);
+
+	c->h = h;
+	return c;
+}
+
+static void cmd_free(struct ctlr_info *h, struct CommandList *c)
+{
+	int i;
+
+	i = c - h->cmd_pool;
+	clear_bit(i & (BITS_PER_LONG - 1),
+		  h->cmd_pool_bits + (i / BITS_PER_LONG));
+	h->nr_frees++;
+}
+
+static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
+{
+	union u64bit temp64;
+
+	temp64.val32.lower = c->ErrDesc.Addr.lower;
+	temp64.val32.upper = c->ErrDesc.Addr.upper;
+	pci_free_consistent(h->pdev, sizeof(*c->err_info),
+			    c->err_info, (dma_addr_t) temp64.val);
+	pci_free_consistent(h->pdev, sizeof(*c),
+			    c, (dma_addr_t) c->busaddr);
+}
+
+#ifdef CONFIG_COMPAT
+
+static int do_ioctl(struct scsi_device *dev, int cmd, void *arg)
+{
+	int ret;
+
+	lock_kernel();
+	ret = hpsa_ioctl(dev, cmd, arg);
+	unlock_kernel();
+	return ret;
+}
+
+static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg);
+static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
+	int cmd, void *arg);
+
+static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
+{
+	switch (cmd) {
+	case CCISS_GETPCIINFO:
+	case CCISS_GETINTINFO:
+	case CCISS_SETINTINFO:
+	case CCISS_GETNODENAME:
+	case CCISS_SETNODENAME:
+	case CCISS_GETHEARTBEAT:
+	case CCISS_GETBUSTYPES:
+	case CCISS_GETFIRMVER:
+	case CCISS_GETDRIVVER:
+	case CCISS_REVALIDVOLS:
+	case CCISS_DEREGDISK:
+	case CCISS_REGNEWDISK:
+	case CCISS_REGNEWD:
+	case CCISS_RESCANDISK:
+	case CCISS_GETLUNINFO:
+		return do_ioctl(dev, cmd, arg);
+
+	case CCISS_PASSTHRU32:
+		return hpsa_ioctl32_passthru(dev, cmd, arg);
+	case CCISS_BIG_PASSTHRU32:
+		return hpsa_ioctl32_big_passthru(dev, cmd, arg);
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
+{
+	IOCTL32_Command_struct __user *arg32 =
+	    (IOCTL32_Command_struct __user *) arg;
+	IOCTL_Command_struct arg64;
+	IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
+	int err;
+	u32 cp;
+
+	err = 0;
+	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+			   sizeof(arg64.LUN_info));
+	err |= copy_from_user(&arg64.Request, &arg32->Request,
+			   sizeof(arg64.Request));
+	err |= copy_from_user(&arg64.error_info, &arg32->error_info,
+			   sizeof(arg64.error_info));
+	err |= get_user(arg64.buf_size, &arg32->buf_size);
+	err |= get_user(cp, &arg32->buf);
+	arg64.buf = compat_ptr(cp);
+	err |= copy_to_user(p, &arg64, sizeof(arg64));
+
+	if (err)
+		return -EFAULT;
+
+	err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p);
+	if (err)
+		return err;
+	err |= copy_in_user(&arg32->error_info, &p->error_info,
+			 sizeof(arg32->error_info));
+	if (err)
+		return -EFAULT;
+	return err;
+}
+
+static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
+	int cmd, void *arg)
+{
+	BIG_IOCTL32_Command_struct __user *arg32 =
+	    (BIG_IOCTL32_Command_struct __user *) arg;
+	BIG_IOCTL_Command_struct arg64;
+	BIG_IOCTL_Command_struct __user *p =
+	    compat_alloc_user_space(sizeof(arg64));
+	int err;
+	u32 cp;
+
+	err = 0;
+	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
+			   sizeof(arg64.LUN_info));
+	err |= copy_from_user(&arg64.Request, &arg32->Request,
+			   sizeof(arg64.Request));
+	err |= copy_from_user(&arg64.error_info, &arg32->error_info,
+			   sizeof(arg64.error_info));
+	err |= get_user(arg64.buf_size, &arg32->buf_size);
+	err |= get_user(arg64.malloc_size, &arg32->malloc_size);
+	err |= get_user(cp, &arg32->buf);
+	arg64.buf = compat_ptr(cp);
+	err |= copy_to_user(p, &arg64, sizeof(arg64));
+
+	if (err)
+		return -EFAULT;
+
+	err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
+	if (err)
+		return err;
+	err |= copy_in_user(&arg32->error_info, &p->error_info,
+			 sizeof(arg32->error_info));
+	if (err)
+		return -EFAULT;
+	return err;
+}
+#endif
+
+static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
+{
+	struct hpsa_pci_info pciinfo;
+
+	if (!argp)
+		return -EINVAL;
+	pciinfo.domain = pci_domain_nr(h->pdev->bus);
+	pciinfo.bus = h->pdev->bus->number;
+	pciinfo.dev_fn = h->pdev->devfn;
+	pciinfo.board_id = h->board_id;
+	if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
+		return -EFAULT;
+	return 0;
+}
+
+static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
+{
+	DriverVer_type DriverVer;
+	unsigned char vmaj, vmin, vsubmin;
+	int rc;
+
+	rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
+		&vmaj, &vmin, &vsubmin);
+	if (rc != 3) {
+		dev_info(&h->pdev->dev, "driver version string '%s' "
+			"unrecognized.", HPSA_DRIVER_VERSION);
+		vmaj = 0;
+		vmin = 0;
+		vsubmin = 0;
+	}
+	DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
+	if (!argp)
+		return -EINVAL;
+	if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
+		return -EFAULT;
+	return 0;
+}
+
+static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+{
+	IOCTL_Command_struct iocommand;
+	struct CommandList *c;
+	char *buff = NULL;
+	union u64bit temp64;
+
+	if (!argp)
+		return -EINVAL;
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+	if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
+		return -EFAULT;
+	if ((iocommand.buf_size < 1) &&
+	    (iocommand.Request.Type.Direction != XFER_NONE)) {
+		return -EINVAL;
+	}
+	if (iocommand.buf_size > 0) {
+		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
+		if (buff == NULL)
+			return -EFAULT;
+	}
+	if (iocommand.Request.Type.Direction == XFER_WRITE) {
+		/* Copy the data into the buffer we created */
+		if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
+			kfree(buff);
+			return -EFAULT;
+		}
+	} else
+		memset(buff, 0, iocommand.buf_size);
+	c = cmd_special_alloc(h);
+	if (c == NULL) {
+		kfree(buff);
+		return -ENOMEM;
+	}
+	/* Fill in the command type */
+	c->cmd_type = CMD_IOCTL_PEND;
+	/* Fill in Command Header */
+	c->Header.ReplyQueue = 0; /* unused in simple mode */
+	if (iocommand.buf_size > 0) {	/* buffer to fill */
+		c->Header.SGList = 1;
+		c->Header.SGTotal = 1;
+	} else	{ /* no buffers to fill */
+		c->Header.SGList = 0;
+		c->Header.SGTotal = 0;
+	}
+	memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
+	/* use the kernel address the cmd block for tag */
+	c->Header.Tag.lower = c->busaddr;
+
+	/* Fill in Request block */
+	memcpy(&c->Request, &iocommand.Request,
+		sizeof(c->Request));
+
+	/* Fill in the scatter gather information */
+	if (iocommand.buf_size > 0) {
+		temp64.val = pci_map_single(h->pdev, buff,
+			iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+		c->SG[0].Addr.lower = temp64.val32.lower;
+		c->SG[0].Addr.upper = temp64.val32.upper;
+		c->SG[0].Len = iocommand.buf_size;
+		c->SG[0].Ext = 0; /* we are not chaining*/
+	}
+	hpsa_scsi_do_simple_cmd_core(h, c);
+	hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+	check_ioctl_unit_attention(h, c);
+
+	/* Copy the error information out */
+	memcpy(&iocommand.error_info, c->err_info,
+		sizeof(iocommand.error_info));
+	if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
+		kfree(buff);
+		cmd_special_free(h, c);
+		return -EFAULT;
+	}
+
+	if (iocommand.Request.Type.Direction == XFER_READ) {
+		/* Copy the data out of the buffer we created */
+		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
+			kfree(buff);
+			cmd_special_free(h, c);
+			return -EFAULT;
+		}
+	}
+	kfree(buff);
+	cmd_special_free(h, c);
+	return 0;
+}
+
+static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
+{
+	BIG_IOCTL_Command_struct *ioc;
+	struct CommandList *c;
+	unsigned char **buff = NULL;
+	int *buff_size = NULL;
+	union u64bit temp64;
+	BYTE sg_used = 0;
+	int status = 0;
+	int i;
+	__u32 left;
+	__u32 sz;
+	BYTE __user *data_ptr;
+
+	if (!argp)
+		return -EINVAL;
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+	ioc = (BIG_IOCTL_Command_struct *)
+	    kmalloc(sizeof(*ioc), GFP_KERNEL);
+	if (!ioc) {
+		status = -ENOMEM;
+		goto cleanup1;
+	}
+	if (copy_from_user(ioc, argp, sizeof(*ioc))) {
+		status = -EFAULT;
+		goto cleanup1;
+	}
+	if ((ioc->buf_size < 1) &&
+	    (ioc->Request.Type.Direction != XFER_NONE)) {
+		status = -EINVAL;
+		goto cleanup1;
+	}
+	/* Check kmalloc limits  using all SGs */
+	if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
+		status = -EINVAL;
+		goto cleanup1;
+	}
+	if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
+		status = -EINVAL;
+		goto cleanup1;
+	}
+	buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
+	if (!buff) {
+		status = -ENOMEM;
+		goto cleanup1;
+	}
+	buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
+	if (!buff_size) {
+		status = -ENOMEM;
+		goto cleanup1;
+	}
+	left = ioc->buf_size;
+	data_ptr = ioc->buf;
+	while (left) {
+		sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
+		buff_size[sg_used] = sz;
+		buff[sg_used] = kmalloc(sz, GFP_KERNEL);
+		if (buff[sg_used] == NULL) {
+			status = -ENOMEM;
+			goto cleanup1;
+		}
+		if (ioc->Request.Type.Direction == XFER_WRITE) {
+			if (copy_from_user(buff[sg_used], data_ptr, sz)) {
+				status = -ENOMEM;
+				goto cleanup1;
+			}
+		} else
+			memset(buff[sg_used], 0, sz);
+		left -= sz;
+		data_ptr += sz;
+		sg_used++;
+	}
+	c = cmd_special_alloc(h);
+	if (c == NULL) {
+		status = -ENOMEM;
+		goto cleanup1;
+	}
+	c->cmd_type = CMD_IOCTL_PEND;
+	c->Header.ReplyQueue = 0;
+
+	if (ioc->buf_size > 0) {
+		c->Header.SGList = sg_used;
+		c->Header.SGTotal = sg_used;
+	} else {
+		c->Header.SGList = 0;
+		c->Header.SGTotal = 0;
+	}
+	memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
+	c->Header.Tag.lower = c->busaddr;
+	memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
+	if (ioc->buf_size > 0) {
+		int i;
+		for (i = 0; i < sg_used; i++) {
+			temp64.val = pci_map_single(h->pdev, buff[i],
+				    buff_size[i], PCI_DMA_BIDIRECTIONAL);
+			c->SG[i].Addr.lower = temp64.val32.lower;
+			c->SG[i].Addr.upper = temp64.val32.upper;
+			c->SG[i].Len = buff_size[i];
+			/* we are not chaining */
+			c->SG[i].Ext = 0;
+		}
+	}
+	hpsa_scsi_do_simple_cmd_core(h, c);
+	hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+	check_ioctl_unit_attention(h, c);
+	/* Copy the error information out */
+	memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
+	if (copy_to_user(argp, ioc, sizeof(*ioc))) {
+		cmd_special_free(h, c);
+		status = -EFAULT;
+		goto cleanup1;
+	}
+	if (ioc->Request.Type.Direction == XFER_READ) {
+		/* Copy the data out of the buffer we created */
+		BYTE __user *ptr = ioc->buf;
+		for (i = 0; i < sg_used; i++) {
+			if (copy_to_user(ptr, buff[i], buff_size[i])) {
+				cmd_special_free(h, c);
+				status = -EFAULT;
+				goto cleanup1;
+			}
+			ptr += buff_size[i];
+		}
+	}
+	cmd_special_free(h, c);
+	status = 0;
+cleanup1:
+	if (buff) {
+		for (i = 0; i < sg_used; i++)
+			kfree(buff[i]);
+		kfree(buff);
+	}
+	kfree(buff_size);
+	kfree(ioc);
+	return status;
+}
+
+static void check_ioctl_unit_attention(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
+			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
+		(void) check_for_unit_attention(h, c);
+}
+/*
+ * ioctl
+ */
+static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
+{
+	struct ctlr_info *h;
+	void __user *argp = (void __user *)arg;
+
+	h = sdev_to_hba(dev);
+
+	switch (cmd) {
+	case CCISS_DEREGDISK:
+	case CCISS_REGNEWDISK:
+	case CCISS_REGNEWD:
+		hpsa_update_scsi_devices(h, dev->host->host_no);
+		return 0;
+	case CCISS_GETPCIINFO:
+		return hpsa_getpciinfo_ioctl(h, argp);
+	case CCISS_GETDRIVVER:
+		return hpsa_getdrivver_ioctl(h, argp);
+	case CCISS_PASSTHRU:
+		return hpsa_passthru_ioctl(h, argp);
+	case CCISS_BIG_PASSTHRU:
+		return hpsa_big_passthru_ioctl(h, argp);
+	default:
+		return -ENOTTY;
+	}
+}
+
+static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h,
+	void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr,
+	int cmd_type)
+{
+	int pci_dir = XFER_NONE;
+
+	c->cmd_type = CMD_IOCTL_PEND;
+	c->Header.ReplyQueue = 0;
+	if (buff != NULL && size > 0) {
+		c->Header.SGList = 1;
+		c->Header.SGTotal = 1;
+	} else {
+		c->Header.SGList = 0;
+		c->Header.SGTotal = 0;
+	}
+	c->Header.Tag.lower = c->busaddr;
+	memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
+
+	c->Request.Type.Type = cmd_type;
+	if (cmd_type == TYPE_CMD) {
+		switch (cmd) {
+		case HPSA_INQUIRY:
+			/* are we trying to read a vital product page */
+			if (page_code != 0) {
+				c->Request.CDB[1] = 0x01;
+				c->Request.CDB[2] = page_code;
+			}
+			c->Request.CDBLen = 6;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_READ;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] = HPSA_INQUIRY;
+			c->Request.CDB[4] = size & 0xFF;
+			break;
+		case HPSA_REPORT_LOG:
+		case HPSA_REPORT_PHYS:
+			/* Talking to controller so It's a physical command
+			   mode = 00 target = 0.  Nothing to write.
+			 */
+			c->Request.CDBLen = 12;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_READ;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] = cmd;
+			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
+			c->Request.CDB[7] = (size >> 16) & 0xFF;
+			c->Request.CDB[8] = (size >> 8) & 0xFF;
+			c->Request.CDB[9] = size & 0xFF;
+			break;
+
+		case HPSA_READ_CAPACITY:
+			c->Request.CDBLen = 10;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_READ;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] = cmd;
+			break;
+		case HPSA_CACHE_FLUSH:
+			c->Request.CDBLen = 12;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_WRITE;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] = BMIC_WRITE;
+			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+			break;
+		case TEST_UNIT_READY:
+			c->Request.CDBLen = 6;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_NONE;
+			c->Request.Timeout = 0;
+			break;
+		default:
+			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
+			BUG();
+			return;
+		}
+	} else if (cmd_type == TYPE_MSG) {
+		switch (cmd) {
+
+		case  HPSA_DEVICE_RESET_MSG:
+			c->Request.CDBLen = 16;
+			c->Request.Type.Type =  1; /* It is a MSG not a CMD */
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_NONE;
+			c->Request.Timeout = 0; /* Don't time out */
+			c->Request.CDB[0] =  0x01; /* RESET_MSG is 0x01 */
+			c->Request.CDB[1] = 0x03;  /* Reset target above */
+			/* If bytes 4-7 are zero, it means reset the */
+			/* LunID device */
+			c->Request.CDB[4] = 0x00;
+			c->Request.CDB[5] = 0x00;
+			c->Request.CDB[6] = 0x00;
+			c->Request.CDB[7] = 0x00;
+		break;
+
+		default:
+			dev_warn(&h->pdev->dev, "unknown message type %d\n",
+				cmd);
+			BUG();
+		}
+	} else {
+		dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
+		BUG();
+	}
+
+	switch (c->Request.Type.Direction) {
+	case XFER_READ:
+		pci_dir = PCI_DMA_FROMDEVICE;
+		break;
+	case XFER_WRITE:
+		pci_dir = PCI_DMA_TODEVICE;
+		break;
+	case XFER_NONE:
+		pci_dir = PCI_DMA_NONE;
+		break;
+	default:
+		pci_dir = PCI_DMA_BIDIRECTIONAL;
+	}
+
+	hpsa_map_one(h->pdev, c, buff, size, pci_dir);
+
+	return;
+}
+
+/*
+ * Map (physical) PCI mem into (virtual) kernel space
+ */
+static void __iomem *remap_pci_mem(ulong base, ulong size)
+{
+	ulong page_base = ((ulong) base) & PAGE_MASK;
+	ulong page_offs = ((ulong) base) - page_base;
+	void __iomem *page_remapped = ioremap(page_base, page_offs + size);
+
+	return page_remapped ? (page_remapped + page_offs) : NULL;
+}
+
+/* Takes cmds off the submission queue and sends them to the hardware,
+ * then puts them on the queue of cmds waiting for completion.
+ */
+static void start_io(struct ctlr_info *h)
+{
+	struct CommandList *c;
+
+	while (!hlist_empty(&h->reqQ)) {
+		c = hlist_entry(h->reqQ.first, struct CommandList, list);
+		/* can't do anything if fifo is full */
+		if ((h->access.fifo_full(h))) {
+			dev_warn(&h->pdev->dev, "fifo full\n");
+			break;
+		}
+
+		/* Get the first entry from the Request Q */
+		removeQ(c);
+		h->Qdepth--;
+
+		/* Tell the controller execute command */
+		h->access.submit_command(h, c);
+
+		/* Put job onto the completed Q */
+		addQ(&h->cmpQ, c);
+	}
+}
+
+static inline unsigned long get_next_completion(struct ctlr_info *h)
+{
+	return h->access.command_completed(h);
+}
+
+static inline int interrupt_pending(struct ctlr_info *h)
+{
+	return h->access.intr_pending(h);
+}
+
+static inline long interrupt_not_for_us(struct ctlr_info *h)
+{
+	return ((h->access.intr_pending(h) == 0) ||
+		 (h->interrupts_enabled == 0));
+}
+
+static inline int bad_tag(struct ctlr_info *h, __u32 tag_index,
+	__u32 raw_tag)
+{
+	if (unlikely(tag_index >= h->nr_cmds)) {
+		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
+		return 1;
+	}
+	return 0;
+}
+
+static inline void finish_cmd(struct CommandList *c, __u32 raw_tag)
+{
+	removeQ(c);
+	if (likely(c->cmd_type == CMD_SCSI))
+		complete_scsi_command(c, 0, raw_tag);
+	else if (c->cmd_type == CMD_IOCTL_PEND)
+		complete(c->waiting);
+}
+
+static irqreturn_t do_hpsa_intr(int irq, void *dev_id)
+{
+	struct ctlr_info *h = dev_id;
+	struct CommandList *c;
+	unsigned long flags;
+	__u32 raw_tag, tag, tag_index;
+	struct hlist_node *tmp;
+
+	if (interrupt_not_for_us(h))
+		return IRQ_NONE;
+	spin_lock_irqsave(&h->lock, flags);
+	while (interrupt_pending(h)) {
+		while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) {
+			if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) {
+				tag_index = HPSA_TAG_TO_INDEX(raw_tag);
+				if (bad_tag(h, tag_index, raw_tag))
+					return IRQ_HANDLED;
+				c = h->cmd_pool + tag_index;
+				finish_cmd(c, raw_tag);
+				continue;
+			}
+			tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag);
+			c = NULL;
+			hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+				if (c->busaddr == tag) {
+					finish_cmd(c, raw_tag);
+					break;
+				}
+			}
+		}
+	}
+	spin_unlock_irqrestore(&h->lock, flags);
+	return IRQ_HANDLED;
+}
+
+/* Send a message CDB to the firmware. */
+static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
+						unsigned char type)
+{
+	struct Command {
+		struct CommandListHeader CommandHeader;
+		struct RequestBlock Request;
+		struct ErrDescriptor ErrorDescriptor;
+	};
+	struct Command *cmd;
+	static const size_t cmd_sz = sizeof(*cmd) +
+					sizeof(cmd->ErrorDescriptor);
+	dma_addr_t paddr64;
+	uint32_t paddr32, tag;
+	void __iomem *vaddr;
+	int i, err;
+
+	vaddr = pci_ioremap_bar(pdev, 0);
+	if (vaddr == NULL)
+		return -ENOMEM;
+
+	/* The Inbound Post Queue only accepts 32-bit physical addresses for the
+	 * CCISS commands, so they must be allocated from the lower 4GiB of
+	 * memory.
+	 */
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (err) {
+		iounmap(vaddr);
+		return -ENOMEM;
+	}
+
+	cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
+	if (cmd == NULL) {
+		iounmap(vaddr);
+		return -ENOMEM;
+	}
+
+	/* This must fit, because of the 32-bit consistent DMA mask.  Also,
+	 * although there's no guarantee, we assume that the address is at
+	 * least 4-byte aligned (most likely, it's page-aligned).
+	 */
+	paddr32 = paddr64;
+
+	cmd->CommandHeader.ReplyQueue = 0;
+	cmd->CommandHeader.SGList = 0;
+	cmd->CommandHeader.SGTotal = 0;
+	cmd->CommandHeader.Tag.lower = paddr32;
+	cmd->CommandHeader.Tag.upper = 0;
+	memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
+
+	cmd->Request.CDBLen = 16;
+	cmd->Request.Type.Type = TYPE_MSG;
+	cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
+	cmd->Request.Type.Direction = XFER_NONE;
+	cmd->Request.Timeout = 0; /* Don't time out */
+	cmd->Request.CDB[0] = opcode;
+	cmd->Request.CDB[1] = type;
+	memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
+	cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
+	cmd->ErrorDescriptor.Addr.upper = 0;
+	cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
+
+	writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
+
+	for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
+		tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
+		if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32)
+			break;
+		msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
+	}
+
+	iounmap(vaddr);
+
+	/* we leak the DMA buffer here ... no choice since the controller could
+	 *  still complete the command.
+	 */
+	if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
+		dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
+			opcode, type);
+		return -ETIMEDOUT;
+	}
+
+	pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
+
+	if (tag & HPSA_ERROR_BIT) {
+		dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
+			opcode, type);
+		return -EIO;
+	}
+
+	dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
+		opcode, type);
+	return 0;
+}
+
+#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
+#define hpsa_noop(p) hpsa_message(p, 3, 0)
+
+static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
+{
+/* the #defines are stolen from drivers/pci/msi.h. */
+#define msi_control_reg(base)		(base + PCI_MSI_FLAGS)
+#define PCI_MSIX_FLAGS_ENABLE		(1 << 15)
+
+	int pos;
+	u16 control = 0;
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+	if (pos) {
+		pci_read_config_word(pdev, msi_control_reg(pos), &control);
+		if (control & PCI_MSI_FLAGS_ENABLE) {
+			dev_info(&pdev->dev, "resetting MSI\n");
+			pci_write_config_word(pdev, msi_control_reg(pos),
+					control & ~PCI_MSI_FLAGS_ENABLE);
+		}
+	}
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+	if (pos) {
+		pci_read_config_word(pdev, msi_control_reg(pos), &control);
+		if (control & PCI_MSIX_FLAGS_ENABLE) {
+			dev_info(&pdev->dev, "resetting MSI-X\n");
+			pci_write_config_word(pdev, msi_control_reg(pos),
+					control & ~PCI_MSIX_FLAGS_ENABLE);
+		}
+	}
+
+	return 0;
+}
+
+/* This does a hard reset of the controller using PCI power management
+ * states.
+ */
+static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev)
+{
+	u16 pmcsr, saved_config_space[32];
+	int i, pos;
+
+	dev_info(&pdev->dev, "using PCI PM to reset controller\n");
+
+	/* This is very nearly the same thing as
+	 *
+	 * pci_save_state(pci_dev);
+	 * pci_set_power_state(pci_dev, PCI_D3hot);
+	 * pci_set_power_state(pci_dev, PCI_D0);
+	 * pci_restore_state(pci_dev);
+	 *
+	 * but we can't use these nice canned kernel routines on
+	 * kexec, because they also check the MSI/MSI-X state in PCI
+	 * configuration space and do the wrong thing when it is
+	 * set/cleared.  Also, the pci_save/restore_state functions
+	 * violate the ordering requirements for restoring the
+	 * configuration space from the CCISS document (see the
+	 * comment below).  So we roll our own ....
+	 */
+
+	for (i = 0; i < 32; i++)
+		pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (pos == 0) {
+		dev_err(&pdev->dev,
+			"hpsa_reset_controller: PCI PM not supported\n");
+		return -ENODEV;
+	}
+
+	/* Quoting from the Open CISS Specification: "The Power
+	 * Management Control/Status Register (CSR) controls the power
+	 * state of the device.  The normal operating state is D0,
+	 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
+	 * the controller, place the interface device in D3 then to
+	 * D0, this causes a secondary PCI reset which will reset the
+	 * controller."
+	 */
+
+	/* enter the D3hot power management state */
+	pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
+	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+	pmcsr |= PCI_D3hot;
+	pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+
+	msleep(500);
+
+	/* enter the D0 power management state */
+	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+	pmcsr |= PCI_D0;
+	pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+
+	msleep(500);
+
+	/* Restore the PCI configuration space.  The Open CISS
+	 * Specification says, "Restore the PCI Configuration
+	 * Registers, offsets 00h through 60h. It is important to
+	 * restore the command register, 16-bits at offset 04h,
+	 * last. Do not restore the configuration status register,
+	 * 16-bits at offset 06h."  Note that the offset is 2*i.
+	 */
+	for (i = 0; i < 32; i++) {
+		if (i == 2 || i == 3)
+			continue;
+		pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+	}
+	wmb();
+	pci_write_config_word(pdev, 4, saved_config_space[2]);
+
+	return 0;
+}
+
+/*
+ *  We cannot read the structure directly, for portability we must use
+ *   the io functions.
+ *   This is for debug only.
+ */
+#ifdef HPSA_DEBUG
+static void print_cfg_table(struct device *dev, struct CfgTable *tb)
+{
+	int i;
+	char temp_name[17];
+
+	dev_info(dev, "Controller Configuration information\n");
+	dev_info(dev, "------------------------------------\n");
+	for (i = 0; i < 4; i++)
+		temp_name[i] = readb(&(tb->Signature[i]));
+	temp_name[4] = '\0';
+	dev_info(dev, "   Signature = %s\n", temp_name);
+	dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
+	dev_info(dev, "   Transport methods supported = 0x%x\n",
+	       readl(&(tb->TransportSupport)));
+	dev_info(dev, "   Transport methods active = 0x%x\n",
+	       readl(&(tb->TransportActive)));
+	dev_info(dev, "   Requested transport Method = 0x%x\n",
+	       readl(&(tb->HostWrite.TransportRequest)));
+	dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
+	       readl(&(tb->HostWrite.CoalIntDelay)));
+	dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
+	       readl(&(tb->HostWrite.CoalIntCount)));
+	dev_info(dev, "   Max outstanding commands = 0x%d\n",
+	       readl(&(tb->CmdsOutMax)));
+	dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
+	for (i = 0; i < 16; i++)
+		temp_name[i] = readb(&(tb->ServerName[i]));
+	temp_name[16] = '\0';
+	dev_info(dev, "   Server Name = %s\n", temp_name);
+	dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
+		readl(&(tb->HeartBeat)));
+}
+#endif				/* HPSA_DEBUG */
+
+static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
+{
+	int i, offset, mem_type, bar_type;
+
+	if (pci_bar_addr == PCI_BASE_ADDRESS_0)	/* looking for BAR zero? */
+		return 0;
+	offset = 0;
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+		bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
+		if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
+			offset += 4;
+		else {
+			mem_type = pci_resource_flags(pdev, i) &
+			    PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+			switch (mem_type) {
+			case PCI_BASE_ADDRESS_MEM_TYPE_32:
+			case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+				offset += 4;	/* 32 bit */
+				break;
+			case PCI_BASE_ADDRESS_MEM_TYPE_64:
+				offset += 8;
+				break;
+			default:	/* reserved in PCI 2.2 */
+				dev_warn(&pdev->dev,
+				       "base address is invalid\n");
+				return -1;
+				break;
+			}
+		}
+		if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
+			return i + 1;
+	}
+	return -1;
+}
+
+/* If MSI/MSI-X is supported by the kernel we will try to enable it on
+ * controllers that are capable. If not, we use IO-APIC mode.
+ */
+
+static void __devinit hpsa_interrupt_mode(struct ctlr_info *h,
+					   struct pci_dev *pdev, __u32 board_id)
+{
+#ifdef CONFIG_PCI_MSI
+	int err;
+	struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
+	{0, 2}, {0, 3}
+	};
+
+	/* Some boards advertise MSI but don't really support it */
+	if ((board_id == 0x40700E11) ||
+	    (board_id == 0x40800E11) ||
+	    (board_id == 0x40820E11) || (board_id == 0x40830E11))
+		goto default_int_mode;
+	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
+		dev_info(&pdev->dev, "MSIX\n");
+		err = pci_enable_msix(pdev, hpsa_msix_entries, 4);
+		if (!err) {
+			h->intr[0] = hpsa_msix_entries[0].vector;
+			h->intr[1] = hpsa_msix_entries[1].vector;
+			h->intr[2] = hpsa_msix_entries[2].vector;
+			h->intr[3] = hpsa_msix_entries[3].vector;
+			h->msix_vector = 1;
+			return;
+		}
+		if (err > 0) {
+			dev_warn(&pdev->dev, "only %d MSI-X vectors "
+			       "available\n", err);
+			goto default_int_mode;
+		} else {
+			dev_warn(&pdev->dev, "MSI-X init failed %d\n",
+			       err);
+			goto default_int_mode;
+		}
+	}
+	if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
+		dev_info(&pdev->dev, "MSI\n");
+		if (!pci_enable_msi(pdev))
+			h->msi_vector = 1;
+		else
+			dev_warn(&pdev->dev, "MSI init failed\n");
+	}
+default_int_mode:
+#endif				/* CONFIG_PCI_MSI */
+	/* if we get here we're going to use the default interrupt mode */
+	h->intr[SIMPLE_MODE_INT] = pdev->irq;
+	return;
+}
+
+static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+{
+	ushort subsystem_vendor_id, subsystem_device_id, command;
+	__u32 board_id, scratchpad = 0;
+	__u64 cfg_offset;
+	__u32 cfg_base_addr;
+	__u64 cfg_base_addr_index;
+	int i, prod_index, err;
+
+	subsystem_vendor_id = pdev->subsystem_vendor;
+	subsystem_device_id = pdev->subsystem_device;
+	board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
+		    subsystem_vendor_id);
+
+	for (i = 0; i < ARRAY_SIZE(products); i++)
+		if (board_id == products[i].board_id)
+			break;
+
+	prod_index = i;
+
+	if (prod_index == ARRAY_SIZE(products)) {
+		prod_index--;
+		if (subsystem_vendor_id != PCI_VENDOR_ID_HP ||
+				!hpsa_allow_any) {
+			dev_warn(&pdev->dev, "unrecognized board ID:"
+				" 0x%08lx, ignoring.\n",
+				(unsigned long) board_id);
+			return -ENODEV;
+		}
+	}
+	/* check to see if controller has been disabled
+	 * BEFORE trying to enable it
+	 */
+	(void)pci_read_config_word(pdev, PCI_COMMAND, &command);
+	if (!(command & 0x02)) {
+		dev_warn(&pdev->dev, "controller appears to be disabled\n");
+		return -ENODEV;
+	}
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_warn(&pdev->dev, "unable to enable PCI device\n");
+		return err;
+	}
+
+	err = pci_request_regions(pdev, "hpsa");
+	if (err) {
+		dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
+		return err;
+	}
+
+	/* If the kernel supports MSI/MSI-X we will try to enable that,
+	 * else we use the IO-APIC interrupt assigned to us by system ROM.
+	 */
+	hpsa_interrupt_mode(h, pdev, board_id);
+
+	/* find the memory BAR */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
+			break;
+	}
+	if (i == DEVICE_COUNT_RESOURCE) {
+		dev_warn(&pdev->dev, "no memory BAR found\n");
+		err = -ENODEV;
+		goto err_out_free_res;
+	}
+
+	h->paddr = pci_resource_start(pdev, i); /* addressing mode bits
+						 * already removed
+						 */
+
+	h->vaddr = remap_pci_mem(h->paddr, 0x250);
+
+	/* Wait for the board to become ready.  */
+	for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
+		scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+		if (scratchpad == HPSA_FIRMWARE_READY)
+			break;
+		msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
+	}
+	if (scratchpad != HPSA_FIRMWARE_READY) {
+		dev_warn(&pdev->dev, "board not ready, timed out.\n");
+		err = -ENODEV;
+		goto err_out_free_res;
+	}
+
+	/* get the address index number */
+	cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET);
+	cfg_base_addr &= (__u32) 0x0000ffff;
+	cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
+	if (cfg_base_addr_index == -1) {
+		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
+		err = -ENODEV;
+		goto err_out_free_res;
+	}
+
+	cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET);
+	h->cfgtable = remap_pci_mem(pci_resource_start(pdev,
+			       cfg_base_addr_index) + cfg_offset,
+				sizeof(h->cfgtable));
+	h->board_id = board_id;
+
+	/* Query controller for max supported commands: */
+	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+
+	h->product_name = products[prod_index].product_name;
+	h->access = *(products[prod_index].access);
+	/* Allow room for some ioctls */
+	h->nr_cmds = h->max_commands - 4;
+
+	if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
+	    (readb(&h->cfgtable->Signature[1]) != 'I') ||
+	    (readb(&h->cfgtable->Signature[2]) != 'S') ||
+	    (readb(&h->cfgtable->Signature[3]) != 'S')) {
+		dev_warn(&pdev->dev, "not a valid CISS config table\n");
+		err = -ENODEV;
+		goto err_out_free_res;
+	}
+#ifdef CONFIG_X86
+	{
+		/* Need to enable prefetch in the SCSI core for 6400 in x86 */
+		__u32 prefetch;
+		prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
+		prefetch |= 0x100;
+		writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
+	}
+#endif
+
+	/* Disabling DMA prefetch for the P600
+	 * An ASIC bug may result in a prefetch beyond
+	 * physical memory.
+	 */
+	if (board_id == 0x3225103C) {
+		__u32 dma_prefetch;
+		dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
+		dma_prefetch |= 0x8000;
+		writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
+	}
+
+	h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
+	/* Update the field, and then ring the doorbell */
+	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
+	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+
+	/* under certain very rare conditions, this can take awhile.
+	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+	 * as we enter this code.)
+	 */
+	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+			break;
+		/* delay and try again */
+		msleep(10);
+	}
+
+#ifdef HPSA_DEBUG
+	print_cfg_table(&pdev->dev, h->cfgtable);
+#endif				/* HPSA_DEBUG */
+
+	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
+		dev_warn(&pdev->dev, "unable to get board into simple mode\n");
+		err = -ENODEV;
+		goto err_out_free_res;
+	}
+	return 0;
+
+err_out_free_res:
+	/*
+	 * Deliberately omit pci_disable_device(): it does something nasty to
+	 * Smart Array controllers that pci_enable_device does not undo
+	 */
+	pci_release_regions(pdev);
+	return err;
+}
+
+static int __devinit hpsa_init_one(struct pci_dev *pdev,
+				    const struct pci_device_id *ent)
+{
+	int i;
+	int dac;
+	struct ctlr_info *h;
+
+	if (number_of_controllers == 0)
+		printk(KERN_INFO DRIVER_NAME "\n");
+	if (reset_devices) {
+		/* Reset the controller with a PCI power-cycle */
+		if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev))
+			return -ENODEV;
+
+		/* Some devices (notably the HP Smart Array 5i Controller)
+		   need a little pause here */
+		msleep(HPSA_POST_RESET_PAUSE_MSECS);
+
+		/* Now try to get the controller to respond to a no-op */
+		for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
+			if (hpsa_noop(pdev) == 0)
+				break;
+			else
+				dev_warn(&pdev->dev, "no-op failed%s\n",
+						(i < 11 ? "; re-trying" : ""));
+		}
+	}
+
+	BUILD_BUG_ON(sizeof(struct CommandList) % 8);
+	h = kzalloc(sizeof(*h), GFP_KERNEL);
+	if (!h)
+		return -1;
+
+	h->busy_initializing = 1;
+	INIT_HLIST_HEAD(&h->cmpQ);
+	INIT_HLIST_HEAD(&h->reqQ);
+	mutex_init(&h->busy_shutting_down);
+	init_completion(&h->scan_wait);
+	if (hpsa_pci_init(h, pdev) != 0)
+		goto clean1;
+
+	sprintf(h->devname, "hpsa%d", number_of_controllers);
+	h->ctlr = number_of_controllers;
+	number_of_controllers++;
+	h->pdev = pdev;
+
+	/* configure PCI DMA stuff */
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+		dac = 1;
+	else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+		dac = 0;
+	else {
+		dev_err(&pdev->dev, "no suitable DMA available\n");
+		goto clean1;
+	}
+
+	/* make sure the board interrupts are off */
+	h->access.set_intr_mask(h, HPSA_INTR_OFF);
+	if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr,
+			IRQF_DISABLED | IRQF_SHARED, h->devname, h)) {
+		dev_err(&pdev->dev, "unable to get irq %d for %s\n",
+		       h->intr[SIMPLE_MODE_INT], h->devname);
+		goto clean2;
+	}
+
+	dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
+	       h->devname, pdev->device, pci_name(pdev),
+	       h->intr[SIMPLE_MODE_INT], dac ? "" : " not");
+
+	h->cmd_pool_bits =
+	    kmalloc(((h->nr_cmds + BITS_PER_LONG -
+		      1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
+	h->cmd_pool = pci_alloc_consistent(h->pdev,
+		    h->nr_cmds * sizeof(*h->cmd_pool),
+		    &(h->cmd_pool_dhandle));
+	h->errinfo_pool = pci_alloc_consistent(h->pdev,
+		    h->nr_cmds * sizeof(*h->errinfo_pool),
+		    &(h->errinfo_pool_dhandle));
+	if ((h->cmd_pool_bits == NULL)
+	    || (h->cmd_pool == NULL)
+	    || (h->errinfo_pool == NULL)) {
+		dev_err(&pdev->dev, "out of memory");
+		goto clean4;
+	}
+	spin_lock_init(&h->lock);
+
+	pci_set_drvdata(pdev, h);
+	memset(h->cmd_pool_bits, 0,
+	       ((h->nr_cmds + BITS_PER_LONG -
+		 1) / BITS_PER_LONG) * sizeof(unsigned long));
+
+	hpsa_scsi_setup(h);
+
+	/* Turn the interrupts on so we can service requests */
+	h->access.set_intr_mask(h, HPSA_INTR_ON);
+
+	hpsa_register_scsi(h);	/* hook ourselves into SCSI subsystem */
+	h->busy_initializing = 0;
+	return 1;
+
+clean4:
+	kfree(h->cmd_pool_bits);
+	if (h->cmd_pool)
+		pci_free_consistent(h->pdev,
+			    h->nr_cmds * sizeof(struct CommandList),
+			    h->cmd_pool, h->cmd_pool_dhandle);
+	if (h->errinfo_pool)
+		pci_free_consistent(h->pdev,
+			    h->nr_cmds * sizeof(struct ErrorInfo),
+			    h->errinfo_pool,
+			    h->errinfo_pool_dhandle);
+	free_irq(h->intr[SIMPLE_MODE_INT], h);
+clean2:
+clean1:
+	h->busy_initializing = 0;
+	kfree(h);
+	return -1;
+}
+
+static void hpsa_flush_cache(struct ctlr_info *h)
+{
+	char *flush_buf;
+	struct CommandList *c;
+
+	flush_buf = kzalloc(4, GFP_KERNEL);
+	if (!flush_buf)
+		return;
+
+	c = cmd_special_alloc(h);
+	if (!c) {
+		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
+		goto out_of_memory;
+	}
+	fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
+		RAID_CTLR_LUNID, TYPE_CMD);
+	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
+	if (c->err_info->CommandStatus != 0)
+		dev_warn(&h->pdev->dev,
+			"error flushing cache on controller\n");
+	cmd_special_free(h, c);
+out_of_memory:
+	kfree(flush_buf);
+}
+
+static void hpsa_shutdown(struct pci_dev *pdev)
+{
+	struct ctlr_info *h;
+
+	h = pci_get_drvdata(pdev);
+	/* Turn board interrupts off  and send the flush cache command
+	 * sendcmd will turn off interrupt, and send the flush...
+	 * To write all data in the battery backed cache to disks
+	 */
+	hpsa_flush_cache(h);
+	h->access.set_intr_mask(h, HPSA_INTR_OFF);
+	free_irq(h->intr[2], h);
+#ifdef CONFIG_PCI_MSI
+	if (h->msix_vector)
+		pci_disable_msix(h->pdev);
+	else if (h->msi_vector)
+		pci_disable_msi(h->pdev);
+#endif				/* CONFIG_PCI_MSI */
+}
+
+static void __devexit hpsa_remove_one(struct pci_dev *pdev)
+{
+	struct ctlr_info *h;
+
+	if (pci_get_drvdata(pdev) == NULL) {
+		dev_err(&pdev->dev, "unable to remove device \n");
+		return;
+	}
+	h = pci_get_drvdata(pdev);
+	mutex_lock(&h->busy_shutting_down);
+	remove_from_scan_list(h);
+	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */
+	hpsa_shutdown(pdev);
+	iounmap(h->vaddr);
+	pci_free_consistent(h->pdev,
+		h->nr_cmds * sizeof(struct CommandList),
+		h->cmd_pool, h->cmd_pool_dhandle);
+	pci_free_consistent(h->pdev,
+		h->nr_cmds * sizeof(struct ErrorInfo),
+		h->errinfo_pool, h->errinfo_pool_dhandle);
+	kfree(h->cmd_pool_bits);
+	/*
+	 * Deliberately omit pci_disable_device(): it does something nasty to
+	 * Smart Array controllers that pci_enable_device does not undo
+	 */
+	pci_release_regions(pdev);
+	pci_set_drvdata(pdev, NULL);
+	mutex_unlock(&h->busy_shutting_down);
+	kfree(h);
+}
+
+static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
+	__attribute__((unused)) pm_message_t state)
+{
+	return -ENOSYS;
+}
+
+static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
+{
+	return -ENOSYS;
+}
+
+static struct pci_driver hpsa_pci_driver = {
+	.name = "hpsa",
+	.probe = hpsa_init_one,
+	.remove = __devexit_p(hpsa_remove_one),
+	.id_table = hpsa_pci_device_id,	/* id_table */
+	.shutdown = hpsa_shutdown,
+	.suspend = hpsa_suspend,
+	.resume = hpsa_resume,
+};
+
+/*
+ *  This is it.  Register the PCI driver information for the cards we control
+ *  the OS will call our registered routines when it finds one of our cards.
+ */
+static int __init hpsa_init(void)
+{
+	int err;
+	/* Start the scan thread */
+	hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
+	if (IS_ERR(hpsa_scan_thread)) {
+		err = PTR_ERR(hpsa_scan_thread);
+		return -ENODEV;
+	}
+	err = pci_register_driver(&hpsa_pci_driver);
+	if (err)
+		kthread_stop(hpsa_scan_thread);
+	return err;
+}
+
+static void __exit hpsa_cleanup(void)
+{
+	pci_unregister_driver(&hpsa_pci_driver);
+	kthread_stop(hpsa_scan_thread);
+}
+
+module_init(hpsa_init);
+module_exit(hpsa_cleanup);

+ 273 - 0
drivers/scsi/hpsa.h

@@ -0,0 +1,273 @@
+/*
+ *    Disk Array driver for HP Smart Array SAS controllers
+ *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; version 2 of the License.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+#ifndef HPSA_H
+#define HPSA_H
+
+#include <scsi/scsicam.h>
+
+#define IO_OK		0
+#define IO_ERROR	1
+
+struct ctlr_info;
+
+struct access_method {
+	void (*submit_command)(struct ctlr_info *h,
+		struct CommandList *c);
+	void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
+	unsigned long (*fifo_full)(struct ctlr_info *h);
+	unsigned long (*intr_pending)(struct ctlr_info *h);
+	unsigned long (*command_completed)(struct ctlr_info *h);
+};
+
+struct hpsa_scsi_dev_t {
+	int devtype;
+	int bus, target, lun;		/* as presented to the OS */
+	unsigned char scsi3addr[8];	/* as presented to the HW */
+#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
+	unsigned char device_id[16];    /* from inquiry pg. 0x83 */
+	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
+	unsigned char model[16];        /* bytes 16-31 of inquiry data */
+	unsigned char revision[4];      /* bytes 32-35 of inquiry data */
+	unsigned char raid_level;	/* from inquiry page 0xC1 */
+};
+
+struct ctlr_info {
+	int	ctlr;
+	char	devname[8];
+	char    *product_name;
+	char	firm_ver[4]; /* Firmware version */
+	struct pci_dev *pdev;
+	__u32	board_id;
+	void __iomem *vaddr;
+	unsigned long paddr;
+	int 	nr_cmds; /* Number of commands allowed on this controller */
+	struct CfgTable __iomem *cfgtable;
+	int	interrupts_enabled;
+	int	major;
+	int 	max_commands;
+	int	commands_outstanding;
+	int 	max_outstanding; /* Debug */
+	int	usage_count;  /* number of opens all all minor devices */
+#	define DOORBELL_INT	0
+#	define PERF_MODE_INT	1
+#	define SIMPLE_MODE_INT	2
+#	define MEMQ_MODE_INT	3
+	unsigned int intr[4];
+	unsigned int msix_vector;
+	unsigned int msi_vector;
+	struct access_method access;
+
+	/* queue and queue Info */
+	struct hlist_head reqQ;
+	struct hlist_head cmpQ;
+	unsigned int Qdepth;
+	unsigned int maxQsinceinit;
+	unsigned int maxSG;
+	spinlock_t lock;
+
+	/* pointers to command and error info pool */
+	struct CommandList 	*cmd_pool;
+	dma_addr_t		cmd_pool_dhandle;
+	struct ErrorInfo 	*errinfo_pool;
+	dma_addr_t		errinfo_pool_dhandle;
+	unsigned long  		*cmd_pool_bits;
+	int			nr_allocs;
+	int			nr_frees;
+	int			busy_initializing;
+	int			busy_scanning;
+	struct mutex		busy_shutting_down;
+	struct list_head	scan_list;
+	struct completion	scan_wait;
+
+	struct Scsi_Host *scsi_host;
+	spinlock_t devlock; /* to protect hba[ctlr]->dev[];  */
+	int ndevices; /* number of used elements in .dev[] array. */
+#define HPSA_MAX_SCSI_DEVS_PER_HBA 256
+	struct hpsa_scsi_dev_t *dev[HPSA_MAX_SCSI_DEVS_PER_HBA];
+};
+#define HPSA_ABORT_MSG 0
+#define HPSA_DEVICE_RESET_MSG 1
+#define HPSA_BUS_RESET_MSG 2
+#define HPSA_HOST_RESET_MSG 3
+#define HPSA_MSG_SEND_RETRY_LIMIT 10
+#define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS 1000
+
+/* Maximum time in seconds driver will wait for command completions
+ * when polling before giving up.
+ */
+#define HPSA_MAX_POLL_TIME_SECS (20)
+
+/* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
+ * how many times to retry TEST UNIT READY on a device
+ * while waiting for it to become ready before giving up.
+ * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
+ * between sending TURs while waiting for a device
+ * to become ready.
+ */
+#define HPSA_TUR_RETRY_LIMIT (20)
+#define HPSA_MAX_WAIT_INTERVAL_SECS (30)
+
+/* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
+ * to become ready, in seconds, before giving up on it.
+ * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
+ * between polling the board to see if it is ready, in
+ * milliseconds.  HPSA_BOARD_READY_POLL_INTERVAL and
+ * HPSA_BOARD_READY_ITERATIONS are derived from those.
+ */
+#define HPSA_BOARD_READY_WAIT_SECS (120)
+#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
+#define HPSA_BOARD_READY_POLL_INTERVAL \
+	((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
+#define HPSA_BOARD_READY_ITERATIONS \
+	((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
+		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
+#define HPSA_POST_RESET_PAUSE_MSECS (3000)
+#define HPSA_POST_RESET_NOOP_RETRIES (12)
+
+/*  Defining the diffent access_menthods */
+/*
+ * Memory mapped FIFO interface (SMART 53xx cards)
+ */
+#define SA5_DOORBELL	0x20
+#define SA5_REQUEST_PORT_OFFSET	0x40
+#define SA5_REPLY_INTR_MASK_OFFSET	0x34
+#define SA5_REPLY_PORT_OFFSET		0x44
+#define SA5_INTR_STATUS		0x30
+#define SA5_SCRATCHPAD_OFFSET	0xB0
+
+#define SA5_CTCFG_OFFSET	0xB4
+#define SA5_CTMEM_OFFSET	0xB8
+
+#define SA5_INTR_OFF		0x08
+#define SA5B_INTR_OFF		0x04
+#define SA5_INTR_PENDING	0x08
+#define SA5B_INTR_PENDING	0x04
+#define FIFO_EMPTY		0xffffffff
+#define HPSA_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
+
+#define HPSA_ERROR_BIT		0x02
+#define HPSA_TAG_CONTAINS_INDEX(tag) ((tag) & 0x04)
+#define HPSA_TAG_TO_INDEX(tag) ((tag) >> 3)
+#define HPSA_TAG_DISCARD_ERROR_BITS(tag) ((tag) & ~3)
+
+#define HPSA_INTR_ON 	1
+#define HPSA_INTR_OFF	0
+/*
+	Send the command to the hardware
+*/
+static void SA5_submit_command(struct ctlr_info *h,
+	struct CommandList *c)
+{
+#ifdef HPSA_DEBUG
+	 printk(KERN_WARNING "hpsa: Sending %x - down to controller\n",
+		c->busaddr);
+#endif /* HPSA_DEBUG */
+	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+	h->commands_outstanding++;
+	if (h->commands_outstanding > h->max_outstanding)
+		h->max_outstanding = h->commands_outstanding;
+}
+
+/*
+ *  This card is the opposite of the other cards.
+ *   0 turns interrupts on...
+ *   0x08 turns them off...
+ */
+static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
+{
+	if (val) { /* Turn interrupts on */
+		h->interrupts_enabled = 1;
+		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+	} else { /* Turn them off */
+		h->interrupts_enabled = 0;
+		writel(SA5_INTR_OFF,
+			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+	}
+}
+/*
+ *  Returns true if fifo is full.
+ *
+ */
+static unsigned long SA5_fifo_full(struct ctlr_info *h)
+{
+	if (h->commands_outstanding >= h->max_commands)
+		return 1;
+	else
+		return 0;
+
+}
+/*
+ *   returns value read from hardware.
+ *     returns FIFO_EMPTY if there is nothing to read
+ */
+static unsigned long SA5_completed(struct ctlr_info *h)
+{
+	unsigned long register_value
+		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
+
+	if (register_value != FIFO_EMPTY)
+		h->commands_outstanding--;
+
+#ifdef HPSA_DEBUG
+	if (register_value != FIFO_EMPTY)
+		printk(KERN_INFO "hpsa:  Read %lx back from board\n",
+			register_value);
+	else
+		printk(KERN_INFO "hpsa:  FIFO Empty read\n");
+#endif
+
+	return register_value;
+}
+/*
+ *	Returns true if an interrupt is pending..
+ */
+static unsigned long SA5_intr_pending(struct ctlr_info *h)
+{
+	unsigned long register_value  =
+		readl(h->vaddr + SA5_INTR_STATUS);
+#ifdef HPSA_DEBUG
+	printk(KERN_INFO "hpsa: intr_pending %lx\n", register_value);
+#endif  /* HPSA_DEBUG */
+	if (register_value &  SA5_INTR_PENDING)
+		return  1;
+	return 0 ;
+}
+
+
+static struct access_method SA5_access = {
+	SA5_submit_command,
+	SA5_intr_mask,
+	SA5_fifo_full,
+	SA5_intr_pending,
+	SA5_completed,
+};
+
+struct board_type {
+	__u32	board_id;
+	char	*product_name;
+	struct access_method *access;
+};
+
+
+/* end of old hpsa_scsi.h file */
+
+#endif /* HPSA_H */
+

+ 326 - 0
drivers/scsi/hpsa_cmd.h

@@ -0,0 +1,326 @@
+/*
+ *    Disk Array driver for HP Smart Array SAS controllers
+ *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; version 2 of the License.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+#ifndef HPSA_CMD_H
+#define HPSA_CMD_H
+
+/* general boundary defintions */
+#define SENSEINFOBYTES          32 /* may vary between hbas */
+#define MAXSGENTRIES            31
+#define MAXREPLYQS              256
+
+/* Command Status value */
+#define CMD_SUCCESS             0x0000
+#define CMD_TARGET_STATUS       0x0001
+#define CMD_DATA_UNDERRUN       0x0002
+#define CMD_DATA_OVERRUN        0x0003
+#define CMD_INVALID             0x0004
+#define CMD_PROTOCOL_ERR        0x0005
+#define CMD_HARDWARE_ERR        0x0006
+#define CMD_CONNECTION_LOST     0x0007
+#define CMD_ABORTED             0x0008
+#define CMD_ABORT_FAILED        0x0009
+#define CMD_UNSOLICITED_ABORT   0x000A
+#define CMD_TIMEOUT             0x000B
+#define CMD_UNABORTABLE		0x000C
+
+/* Unit Attentions ASC's as defined for the MSA2012sa */
+#define POWER_OR_RESET			0x29
+#define STATE_CHANGED			0x2a
+#define UNIT_ATTENTION_CLEARED		0x2f
+#define LUN_FAILED			0x3e
+#define REPORT_LUNS_CHANGED		0x3f
+
+/* Unit Attentions ASCQ's as defined for the MSA2012sa */
+
+	/* These ASCQ's defined for ASC = POWER_OR_RESET */
+#define POWER_ON_RESET			0x00
+#define POWER_ON_REBOOT			0x01
+#define SCSI_BUS_RESET			0x02
+#define MSA_TARGET_RESET		0x03
+#define CONTROLLER_FAILOVER		0x04
+#define TRANSCEIVER_SE			0x05
+#define TRANSCEIVER_LVD			0x06
+
+	/* These ASCQ's defined for ASC = STATE_CHANGED */
+#define RESERVATION_PREEMPTED		0x03
+#define ASYM_ACCESS_CHANGED		0x06
+#define LUN_CAPACITY_CHANGED		0x09
+
+/* transfer direction */
+#define XFER_NONE               0x00
+#define XFER_WRITE              0x01
+#define XFER_READ               0x02
+#define XFER_RSVD               0x03
+
+/* task attribute */
+#define ATTR_UNTAGGED           0x00
+#define ATTR_SIMPLE             0x04
+#define ATTR_HEADOFQUEUE        0x05
+#define ATTR_ORDERED            0x06
+#define ATTR_ACA                0x07
+
+/* cdb type */
+#define TYPE_CMD				0x00
+#define TYPE_MSG				0x01
+
+/* config space register offsets */
+#define CFG_VENDORID            0x00
+#define CFG_DEVICEID            0x02
+#define CFG_I2OBAR              0x10
+#define CFG_MEM1BAR             0x14
+
+/* i2o space register offsets */
+#define I2O_IBDB_SET            0x20
+#define I2O_IBDB_CLEAR          0x70
+#define I2O_INT_STATUS          0x30
+#define I2O_INT_MASK            0x34
+#define I2O_IBPOST_Q            0x40
+#define I2O_OBPOST_Q            0x44
+#define I2O_DMA1_CFG		0x214
+
+/* Configuration Table */
+#define CFGTBL_ChangeReq        0x00000001l
+#define CFGTBL_AccCmds          0x00000001l
+
+#define CFGTBL_Trans_Simple     0x00000002l
+
+#define CFGTBL_BusType_Ultra2   0x00000001l
+#define CFGTBL_BusType_Ultra3   0x00000002l
+#define CFGTBL_BusType_Fibre1G  0x00000100l
+#define CFGTBL_BusType_Fibre2G  0x00000200l
+struct vals32 {
+	__u32   lower;
+	__u32   upper;
+};
+
+union u64bit {
+	struct vals32 val32;
+	__u64 val;
+};
+
+/* FIXME this is a per controller value (barf!) */
+#define HPSA_MAX_TARGETS_PER_CTLR 16
+#define HPSA_MAX_LUN 256
+#define HPSA_MAX_PHYS_LUN 1024
+
+/* SCSI-3 Commands */
+#pragma pack(1)
+
+#define HPSA_INQUIRY 0x12
+struct InquiryData {
+	__u8 data_byte[36];
+};
+
+#define HPSA_REPORT_LOG 0xc2    /* Report Logical LUNs */
+#define HPSA_REPORT_PHYS 0xc3   /* Report Physical LUNs */
+struct ReportLUNdata {
+	__u8 LUNListLength[4];
+	__u32 reserved;
+	__u8 LUN[HPSA_MAX_LUN][8];
+};
+
+struct ReportExtendedLUNdata {
+	__u8 LUNListLength[4];
+	__u8 extended_response_flag;
+	__u8 reserved[3];
+	__u8 LUN[HPSA_MAX_LUN][24];
+};
+
+struct SenseSubsystem_info {
+	__u8 reserved[36];
+	__u8 portname[8];
+	__u8 reserved1[1108];
+};
+
+#define HPSA_READ_CAPACITY 0x25 /* Read Capacity */
+struct ReadCapdata {
+	__u8 total_size[4];	/* Total size in blocks */
+	__u8 block_size[4];	/* Size of blocks in bytes */
+};
+
+#if 0
+/* 12 byte commands not implemented in firmware yet. */
+#define HPSA_READ 	0xa8
+#define HPSA_WRITE	0xaa
+#endif
+
+#define HPSA_READ   0x28    /* Read(10) */
+#define HPSA_WRITE  0x2a    /* Write(10) */
+
+/* BMIC commands */
+#define BMIC_READ 0x26
+#define BMIC_WRITE 0x27
+#define BMIC_CACHE_FLUSH 0xc2
+#define HPSA_CACHE_FLUSH 0x01	/* C2 was already being used by HPSA */
+
+/* Command List Structure */
+union SCSI3Addr {
+	struct {
+		__u8 Dev;
+		__u8 Bus:6;
+		__u8 Mode:2;        /* b00 */
+	} PeripDev;
+	struct {
+		__u8 DevLSB;
+		__u8 DevMSB:6;
+		__u8 Mode:2;        /* b01 */
+	} LogDev;
+	struct {
+		__u8 Dev:5;
+		__u8 Bus:3;
+		__u8 Targ:6;
+		__u8 Mode:2;        /* b10 */
+	} LogUnit;
+};
+
+struct PhysDevAddr {
+	__u32             TargetId:24;
+	__u32             Bus:6;
+	__u32             Mode:2;
+	/* 2 level target device addr */
+	union SCSI3Addr  Target[2];
+};
+
+struct LogDevAddr {
+	__u32            VolId:30;
+	__u32            Mode:2;
+	__u8             reserved[4];
+};
+
+union LUNAddr {
+	__u8               LunAddrBytes[8];
+	union SCSI3Addr    SCSI3Lun[4];
+	struct PhysDevAddr PhysDev;
+	struct LogDevAddr  LogDev;
+};
+
+struct CommandListHeader {
+	__u8              ReplyQueue;
+	__u8              SGList;
+	__u16             SGTotal;
+	struct vals32     Tag;
+	union LUNAddr     LUN;
+};
+
+struct RequestBlock {
+	__u8   CDBLen;
+	struct {
+		__u8 Type:3;
+		__u8 Attribute:3;
+		__u8 Direction:2;
+	} Type;
+	__u16  Timeout;
+	__u8   CDB[16];
+};
+
+struct ErrDescriptor {
+	struct vals32 Addr;
+	__u32  Len;
+};
+
+struct SGDescriptor {
+	struct vals32 Addr;
+	__u32  Len;
+	__u32  Ext;
+};
+
+union MoreErrInfo {
+	struct {
+		__u8  Reserved[3];
+		__u8  Type;
+		__u32 ErrorInfo;
+	} Common_Info;
+	struct {
+		__u8  Reserved[2];
+		__u8  offense_size; /* size of offending entry */
+		__u8  offense_num;  /* byte # of offense 0-base */
+		__u32 offense_value;
+	} Invalid_Cmd;
+};
+struct ErrorInfo {
+	__u8               ScsiStatus;
+	__u8               SenseLen;
+	__u16              CommandStatus;
+	__u32              ResidualCnt;
+	union MoreErrInfo  MoreErrInfo;
+	__u8               SenseInfo[SENSEINFOBYTES];
+};
+/* Command types */
+#define CMD_IOCTL_PEND  0x01
+#define CMD_SCSI	0x03
+
+struct ctlr_info; /* defined in hpsa.h */
+/* The size of this structure needs to be divisible by 8
+ * od on all architectures, because the controller uses 2
+ * lower bits of the address, and the driver uses 1 lower
+ * bit (3 bits total.)
+ */
+struct CommandList {
+	struct CommandListHeader Header;
+	struct RequestBlock      Request;
+	struct ErrDescriptor     ErrDesc;
+	struct SGDescriptor      SG[MAXSGENTRIES];
+	/* information associated with the command */
+	__u32			   busaddr; /* physical addr of this record */
+	struct ErrorInfo *err_info; /* pointer to the allocated mem */
+	struct ctlr_info	   *h;
+	int			   cmd_type;
+	long			   cmdindex;
+	struct hlist_node list;
+	struct CommandList *prev;
+	struct CommandList *next;
+	struct request *rq;
+	struct completion *waiting;
+	int	 retry_count;
+	void   *scsi_cmd;
+};
+
+/* Configuration Table Structure */
+struct HostWrite {
+	__u32 TransportRequest;
+	__u32 Reserved;
+	__u32 CoalIntDelay;
+	__u32 CoalIntCount;
+};
+
+struct CfgTable {
+	__u8             Signature[4];
+	__u32            SpecValence;
+	__u32            TransportSupport;
+	__u32            TransportActive;
+	struct HostWrite HostWrite;
+	__u32            CmdsOutMax;
+	__u32            BusTypes;
+	__u32            Reserved;
+	__u8             ServerName[16];
+	__u32            HeartBeat;
+	__u32            SCSI_Prefetch;
+};
+
+struct hpsa_pci_info {
+	unsigned char	bus;
+	unsigned char	dev_fn;
+	unsigned short	domain;
+	__u32		board_id;
+};
+
+#pragma pack()
+#endif /* HPSA_CMD_H */

+ 1 - 0
drivers/scsi/ipr.c

@@ -6521,6 +6521,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
 	int rc;
 
 	ENTER;
+	ioa_cfg->pdev->state_saved = true;
 	rc = pci_restore_state(ioa_cfg->pdev);
 
 	if (rc != PCIBIOS_SUCCESSFUL) {

+ 36 - 29
drivers/scsi/libfc/fc_fcp.c

@@ -68,18 +68,20 @@ struct kmem_cache *scsi_pkt_cachep;
 
 /**
  * struct fc_fcp_internal - FCP layer internal data
- * @scsi_pkt_pool:  Memory pool to draw FCP packets from
+ * @scsi_pkt_pool: Memory pool to draw FCP packets from
+ * @scsi_queue_lock: Protects the scsi_pkt_queue
  * @scsi_pkt_queue: Current FCP packets
  * @last_can_queue_ramp_down_time: ramp down time
  * @last_can_queue_ramp_up_time: ramp up time
  * @max_can_queue: max can_queue size
  */
 struct fc_fcp_internal {
-	mempool_t	 *scsi_pkt_pool;
-	struct list_head scsi_pkt_queue;
-	unsigned long last_can_queue_ramp_down_time;
-	unsigned long last_can_queue_ramp_up_time;
-	int max_can_queue;
+	mempool_t		*scsi_pkt_pool;
+	spinlock_t		scsi_queue_lock;
+	struct list_head	scsi_pkt_queue;
+	unsigned long		last_can_queue_ramp_down_time;
+	unsigned long		last_can_queue_ramp_up_time;
+	int			max_can_queue;
 };
 
 #define fc_get_scsi_internal(x)	((struct fc_fcp_internal *)(x)->scsi_priv)
@@ -410,12 +412,14 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
 	unsigned long flags;
 
 	fp = fc_frame_alloc(lport, len);
-	if (!fp) {
-		spin_lock_irqsave(lport->host->host_lock, flags);
-		fc_fcp_can_queue_ramp_down(lport);
-		spin_unlock_irqrestore(lport->host->host_lock, flags);
-	}
-	return fp;
+	if (likely(fp))
+		return fp;
+
+	/* error case */
+	spin_lock_irqsave(lport->host->host_lock, flags);
+	fc_fcp_can_queue_ramp_down(lport);
+	spin_unlock_irqrestore(lport->host->host_lock, flags);
+	return NULL;
 }
 
 /**
@@ -990,7 +994,7 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
 	struct scsi_cmnd *sc_cmd;
 	unsigned long flags;
 
-	spin_lock_irqsave(lport->host->host_lock, flags);
+	spin_lock_irqsave(&si->scsi_queue_lock, flags);
 restart:
 	list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
 		sc_cmd = fsp->cmd;
@@ -1001,7 +1005,7 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
 			continue;
 
 		fc_fcp_pkt_hold(fsp);
-		spin_unlock_irqrestore(lport->host->host_lock, flags);
+		spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 
 		if (!fc_fcp_lock_pkt(fsp)) {
 			fc_fcp_cleanup_cmd(fsp, error);
@@ -1010,14 +1014,14 @@ static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
 		}
 
 		fc_fcp_pkt_release(fsp);
-		spin_lock_irqsave(lport->host->host_lock, flags);
+		spin_lock_irqsave(&si->scsi_queue_lock, flags);
 		/*
 		 * while we dropped the lock multiple pkts could
 		 * have been released, so we have to start over.
 		 */
 		goto restart;
 	}
-	spin_unlock_irqrestore(lport->host->host_lock, flags);
+	spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 }
 
 /**
@@ -1035,11 +1039,12 @@ static void fc_fcp_abort_io(struct fc_lport *lport)
  * @fsp:   The FCP packet to send
  *
  * Return:  Zero for success and -1 for failure
- * Locks:   Called with the host lock and irqs disabled.
+ * Locks:   Called without locks held
  */
 static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
 {
 	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+	unsigned long flags;
 	int rc;
 
 	fsp->cmd->SCp.ptr = (char *)fsp;
@@ -1049,13 +1054,16 @@ static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
 	int_to_scsilun(fsp->cmd->device->lun,
 		       (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
 	memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
-	list_add_tail(&fsp->list, &si->scsi_pkt_queue);
 
-	spin_unlock_irq(lport->host->host_lock);
+	spin_lock_irqsave(&si->scsi_queue_lock, flags);
+	list_add_tail(&fsp->list, &si->scsi_pkt_queue);
+	spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 	rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
-	spin_lock_irq(lport->host->host_lock);
-	if (rc)
+	if (unlikely(rc)) {
+		spin_lock_irqsave(&si->scsi_queue_lock, flags);
 		list_del(&fsp->list);
+		spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+	}
 
 	return rc;
 }
@@ -1752,6 +1760,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
 	struct fcoe_dev_stats *stats;
 
 	lport = shost_priv(sc_cmd->device->host);
+	spin_unlock_irq(lport->host->host_lock);
 
 	rval = fc_remote_port_chkready(rport);
 	if (rval) {
@@ -1834,6 +1843,7 @@ int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 	}
 out:
+	spin_lock_irq(lport->host->host_lock);
 	return rc;
 }
 EXPORT_SYMBOL(fc_queuecommand);
@@ -1864,11 +1874,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 
 	lport = fsp->lp;
 	si = fc_get_scsi_internal(lport);
-	spin_lock_irqsave(lport->host->host_lock, flags);
-	if (!fsp->cmd) {
-		spin_unlock_irqrestore(lport->host->host_lock, flags);
+	if (!fsp->cmd)
 		return;
-	}
 
 	/*
 	 * if can_queue ramp down is done then try can_queue ramp up
@@ -1880,10 +1887,8 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 	sc_cmd = fsp->cmd;
 	fsp->cmd = NULL;
 
-	if (!sc_cmd->SCp.ptr) {
-		spin_unlock_irqrestore(lport->host->host_lock, flags);
+	if (!sc_cmd->SCp.ptr)
 		return;
-	}
 
 	CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
 	switch (fsp->status_code) {
@@ -1945,10 +1950,11 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
 		break;
 	}
 
+	spin_lock_irqsave(&si->scsi_queue_lock, flags);
 	list_del(&fsp->list);
+	spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 	sc_cmd->SCp.ptr = NULL;
 	sc_cmd->scsi_done(sc_cmd);
-	spin_unlock_irqrestore(lport->host->host_lock, flags);
 
 	/* release ref from initial allocation in queue command */
 	fc_fcp_pkt_release(fsp);
@@ -2216,6 +2222,7 @@ int fc_fcp_init(struct fc_lport *lport)
 	lport->scsi_priv = si;
 	si->max_can_queue = lport->host->can_queue;
 	INIT_LIST_HEAD(&si->scsi_pkt_queue);
+	spin_lock_init(&si->scsi_queue_lock);
 
 	si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
 	if (!si->scsi_pkt_pool) {

+ 6 - 1
drivers/scsi/libfc/fc_lport.c

@@ -537,7 +537,9 @@ int fc_fabric_login(struct fc_lport *lport)
 	int rc = -1;
 
 	mutex_lock(&lport->lp_mutex);
-	if (lport->state == LPORT_ST_DISABLED) {
+	if (lport->state == LPORT_ST_DISABLED ||
+	    lport->state == LPORT_ST_LOGO) {
+		fc_lport_state_enter(lport, LPORT_ST_RESET);
 		fc_lport_enter_reset(lport);
 		rc = 0;
 	}
@@ -967,6 +969,9 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
 	FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
 		     fc_lport_state(lport));
 
+	if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
+		return;
+
 	if (lport->vport) {
 		if (lport->link_up)
 			fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);

+ 1 - 0
drivers/scsi/libfc/fc_rport.c

@@ -310,6 +310,7 @@ static void fc_rport_work(struct work_struct *work)
 				restart = 1;
 			else
 				list_del(&rdata->peers);
+			rdata->event = RPORT_EV_NONE;
 			mutex_unlock(&rdata->rp_mutex);
 			mutex_unlock(&lport->disc.disc_mutex);
 		}

+ 12 - 4
drivers/scsi/lpfc/lpfc_init.c

@@ -4506,9 +4506,13 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
 		pdev = phba->pcidev;
 
 	/* Set the device DMA mask size */
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
 			return error;
+		}
+	}
 
 	/* Get the bus address of Bar0 and Bar2 and the number of bytes
 	 * required by each mapping.
@@ -6021,9 +6025,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
 		pdev = phba->pcidev;
 
 	/* Set the device DMA mask size */
-	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
-		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
+	 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
+		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
+		 || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
 			return error;
+		}
+	}
 
 	/* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
 	 * number of bytes required by each mapping. They are actually

+ 9 - 5
drivers/scsi/megaraid/megaraid_sas.c

@@ -2501,7 +2501,9 @@ static int megasas_init_mfi(struct megasas_instance *instance)
 		instance->base_addr = pci_resource_start(instance->pdev, 0);
 	}
 
-	if (pci_request_regions(instance->pdev, "megasas: LSI")) {
+	if (pci_request_selected_regions(instance->pdev,
+		pci_select_bars(instance->pdev, IORESOURCE_MEM),
+		"megasas: LSI")) {
 		printk(KERN_DEBUG "megasas: IO memory region busy!\n");
 		return -EBUSY;
 	}
@@ -2642,7 +2644,8 @@ static int megasas_init_mfi(struct megasas_instance *instance)
 	iounmap(instance->reg_set);
 
       fail_ioremap:
-	pci_release_regions(instance->pdev);
+	pci_release_selected_regions(instance->pdev,
+		pci_select_bars(instance->pdev, IORESOURCE_MEM));
 
 	return -EINVAL;
 }
@@ -2662,7 +2665,8 @@ static void megasas_release_mfi(struct megasas_instance *instance)
 
 	iounmap(instance->reg_set);
 
-	pci_release_regions(instance->pdev);
+	pci_release_selected_regions(instance->pdev,
+		pci_select_bars(instance->pdev, IORESOURCE_MEM));
 }
 
 /**
@@ -2971,7 +2975,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	/*
 	 * PCI prepping: enable device set bus mastering and dma mask
 	 */
-	rval = pci_enable_device(pdev);
+	rval = pci_enable_device_mem(pdev);
 
 	if (rval) {
 		return rval;
@@ -3276,7 +3280,7 @@ megasas_resume(struct pci_dev *pdev)
 	/*
 	 * PCI prepping: enable device set bus mastering and dma mask
 	 */
-	rval = pci_enable_device(pdev);
+	rval = pci_enable_device_mem(pdev);
 
 	if (rval) {
 		printk(KERN_ERR "megasas: Enable device failed\n");

+ 5 - 0
drivers/scsi/mpt2sas/mpt2sas_base.c

@@ -3583,6 +3583,11 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
 	ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
 	mutex_init(&ioc->transport_cmds.mutex);
 
+	/* scsih internal command bits */
+	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+	ioc->scsih_cmds.status = MPT2_CMD_NOT_USED;
+	mutex_init(&ioc->scsih_cmds.mutex);
+
 	/* task management internal command bits */
 	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
 	ioc->tm_cmds.status = MPT2_CMD_NOT_USED;

+ 1 - 0
drivers/scsi/mvsas/mv_init.c

@@ -657,6 +657,7 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
 	{ PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
 	{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
 	{ PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
+	{ PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
 
 	{ }	/* terminate list */
 };

+ 47 - 41
drivers/scsi/osd/osd_initiator.c

@@ -432,30 +432,23 @@ static void _osd_free_seg(struct osd_request *or __unused,
 	seg->alloc_size = 0;
 }
 
-static void _put_request(struct request *rq , bool is_async)
+static void _put_request(struct request *rq)
 {
-	if (is_async) {
-		WARN_ON(rq->bio);
-		__blk_put_request(rq->q, rq);
-	} else {
-		/*
-		 * If osd_finalize_request() was called but the request was not
-		 * executed through the block layer, then we must release BIOs.
-		 * TODO: Keep error code in or->async_error. Need to audit all
-		 *       code paths.
-		 */
-		if (unlikely(rq->bio))
-			blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
-		else
-			blk_put_request(rq);
-	}
+	/*
+	 * If osd_finalize_request() was called but the request was not
+	 * executed through the block layer, then we must release BIOs.
+	 * TODO: Keep error code in or->async_error. Need to audit all
+	 *       code paths.
+	 */
+	if (unlikely(rq->bio))
+		blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
+	else
+		blk_put_request(rq);
 }
 
 void osd_end_request(struct osd_request *or)
 {
 	struct request *rq = or->request;
-	/* IMPORTANT: make sure this agrees with osd_execute_request_async */
-	bool is_async = (or->request->end_io_data == or);
 
 	_osd_free_seg(or, &or->set_attr);
 	_osd_free_seg(or, &or->enc_get_attr);
@@ -463,20 +456,34 @@ void osd_end_request(struct osd_request *or)
 
 	if (rq) {
 		if (rq->next_rq) {
-			_put_request(rq->next_rq, is_async);
+			_put_request(rq->next_rq);
 			rq->next_rq = NULL;
 		}
 
-		_put_request(rq, is_async);
+		_put_request(rq);
 	}
 	_osd_request_free(or);
 }
 EXPORT_SYMBOL(osd_end_request);
 
+static void _set_error_resid(struct osd_request *or, struct request *req,
+			     int error)
+{
+	or->async_error = error;
+	or->req_errors = req->errors ? : error;
+	or->sense_len = req->sense_len;
+	if (or->out.req)
+		or->out.residual = or->out.req->resid_len;
+	if (or->in.req)
+		or->in.residual = or->in.req->resid_len;
+}
+
 int osd_execute_request(struct osd_request *or)
 {
-	return or->async_error =
-			blk_execute_rq(or->request->q, NULL, or->request, 0);
+	int error = blk_execute_rq(or->request->q, NULL, or->request, 0);
+
+	_set_error_resid(or, or->request, error);
+	return error;
 }
 EXPORT_SYMBOL(osd_execute_request);
 
@@ -484,15 +491,17 @@ static void osd_request_async_done(struct request *req, int error)
 {
 	struct osd_request *or = req->end_io_data;
 
-	or->async_error = error;
-
-	if (unlikely(error)) {
-		OSD_DEBUG("osd_request_async_done error recieved %d "
-			  "errors 0x%x\n", error, req->errors);
-		if (!req->errors) /* don't miss out on this one */
-			req->errors = error;
+	_set_error_resid(or, req, error);
+	if (req->next_rq) {
+		__blk_put_request(req->q, req->next_rq);
+		req->next_rq = NULL;
 	}
 
+	__blk_put_request(req->q, req);
+	or->request = NULL;
+	or->in.req = NULL;
+	or->out.req = NULL;
+
 	if (or->async_done)
 		or->async_done(or, or->async_private);
 	else
@@ -1489,21 +1498,18 @@ int osd_req_decode_sense_full(struct osd_request *or,
 #endif
 	int ret;
 
-	if (likely(!or->request->errors)) {
-		osi->out_resid = 0;
-		osi->in_resid = 0;
+	if (likely(!or->req_errors))
 		return 0;
-	}
 
 	osi = osi ? : &local_osi;
 	memset(osi, 0, sizeof(*osi));
 
-	ssdb = or->request->sense;
-	sense_len = or->request->sense_len;
+	ssdb = (typeof(ssdb))or->sense;
+	sense_len = or->sense_len;
 	if ((sense_len < (int)sizeof(*ssdb) || !ssdb->sense_key)) {
 		OSD_ERR("Block-layer returned error(0x%x) but "
 			"sense_len(%u) || key(%d) is empty\n",
-			or->request->errors, sense_len, ssdb->sense_key);
+			or->req_errors, sense_len, ssdb->sense_key);
 		goto analyze;
 	}
 
@@ -1525,7 +1531,7 @@ int osd_req_decode_sense_full(struct osd_request *or,
 			"additional_code=0x%x async_error=%d errors=0x%x\n",
 			osi->key, original_sense_len, sense_len,
 			osi->additional_code, or->async_error,
-			or->request->errors);
+			or->req_errors);
 
 	if (original_sense_len < sense_len)
 		sense_len = original_sense_len;
@@ -1695,10 +1701,10 @@ int osd_req_decode_sense_full(struct osd_request *or,
 		ret = -EIO;
 	}
 
-	if (or->out.req)
-		osi->out_resid = or->out.req->resid_len ?: or->out.total_bytes;
-	if (or->in.req)
-		osi->in_resid = or->in.req->resid_len ?: or->in.total_bytes;
+	if (!or->out.residual)
+		or->out.residual = or->out.total_bytes;
+	if (!or->in.residual)
+		or->in.residual = or->in.total_bytes;
 
 	return ret;
 }

+ 0 - 10
drivers/scsi/pm8001/pm8001_ctl.h

@@ -45,16 +45,6 @@
 #define HEADER_LEN			28
 #define SIZE_OFFSET			16
 
-struct pm8001_ioctl_payload {
-	u32	signature;
-	u16	major_function;
-	u16	minor_function;
-	u16	length;
-	u16	status;
-	u16	offset;
-	u16	id;
-	u8	func_specific[1];
-};
 
 #define FLASH_OK                        0x000000
 #define FAIL_OPEN_BIOS_FILE             0x000100

+ 92 - 57
drivers/scsi/pm8001/pm8001_hwi.c

@@ -373,10 +373,7 @@ static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
 static void __devinit
 mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
 {
-	u32 offset;
-	u32 value;
-	u32 i, j;
-	u32 bit_cnt;
+	u32 value, offset, i;
 
 #define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
 #define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
@@ -392,55 +389,35 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
     */
 	if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR))
 		return;
-	/* set SSC bit of PHY 0 - 3 */
+
 	for (i = 0; i < 4; i++) {
 		offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
-		value = pm8001_cr32(pm8001_ha, 2, offset);
-		if (SSCbit) {
-			value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
-			value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
-		} else {
-			value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
-			value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
-		}
-		bit_cnt = 0;
-		for (j = 0; j < 31; j++)
-			if ((value >> j) & 0x00000001)
-				bit_cnt++;
-		if (bit_cnt % 2)
-			value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
-		else
-			value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
-
-		pm8001_cw32(pm8001_ha, 2, offset, value);
+		pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
 	}
-
 	/* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
 	if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR))
 		return;
-
-	/* set SSC bit of PHY 4 - 7 */
 	for (i = 4; i < 8; i++) {
 		offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
-		value = pm8001_cr32(pm8001_ha, 2, offset);
-		if (SSCbit) {
-			value |= 0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT;
-			value &= ~(0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT);
-		} else {
-			value |= 0x00000001 << PHY_G3_WITHOUT_SSC_BIT_SHIFT;
-			value &= ~(0x00000001 << PHY_G3_WITH_SSC_BIT_SHIFT);
-		}
-		bit_cnt = 0;
-		for (j = 0; j < 31; j++)
-			if ((value >> j) & 0x00000001)
-				bit_cnt++;
-		if (bit_cnt % 2)
-			value &= ~(0x00000001 << SNW3_PHY_CAPABILITIES_PARITY);
-		else
-			value |= 0x00000001 << SNW3_PHY_CAPABILITIES_PARITY;
-
-		pm8001_cw32(pm8001_ha, 2, offset, value);
+		pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
 	}
+	/*************************************************************
+	Change the SSC upspreading value to 0x0 so that upspreading is disabled.
+	Device MABC SMOD0 Controls
+	Address: (via MEMBASE-III):
+	Using shifted destination address 0x0_0000: with Offset 0xD8
+
+	31:28 R/W Reserved Do not change
+	27:24 R/W SAS_SMOD_SPRDUP 0000
+	23:20 R/W SAS_SMOD_SPRDDN 0000
+	19:0  R/W  Reserved Do not change
+	Upon power-up this register will read as 0x8990c016,
+	and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000
+	so that the written value will be 0x8090c016.
+	This will ensure only down-spreading SSC is enabled on the SPC.
+	*************************************************************/
+	value = pm8001_cr32(pm8001_ha, 2, 0xd8);
+	pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
 
 	/*set the shifted destination address to 0x0 to avoid error operation */
 	bar4_shift(pm8001_ha, 0x0);
@@ -1901,7 +1878,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
 	struct sas_task *t;
 	struct pm8001_ccb_info *ccb;
-	unsigned long flags;
+	unsigned long flags = 0;
 	u32 param;
 	u32 status;
 	u32 tag;
@@ -2040,7 +2017,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 			ts->stat = SAS_QUEUE_FULL;
 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 			mb();/*in order to force CPU ordering*/
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 			t->task_done(t);
+			spin_lock_irqsave(&pm8001_ha->lock, flags);
 			return;
 		}
 		break;
@@ -2058,7 +2037,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 			ts->stat = SAS_QUEUE_FULL;
 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 			mb();/*ditto*/
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 			t->task_done(t);
+			spin_lock_irqsave(&pm8001_ha->lock, flags);
 			return;
 		}
 		break;
@@ -2084,7 +2065,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 			ts->stat = SAS_QUEUE_FULL;
 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 			mb();/* ditto*/
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 			t->task_done(t);
+			spin_lock_irqsave(&pm8001_ha->lock, flags);
 			return;
 		}
 		break;
@@ -2149,7 +2132,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 			ts->stat = SAS_QUEUE_FULL;
 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 			mb();/*ditto*/
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 			t->task_done(t);
+			spin_lock_irqsave(&pm8001_ha->lock, flags);
 			return;
 		}
 		break;
@@ -2171,7 +2156,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 			ts->stat = SAS_QUEUE_FULL;
 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 			mb();/*ditto*/
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 			t->task_done(t);
+			spin_lock_irqsave(&pm8001_ha->lock, flags);
 			return;
 		}
 		break;
@@ -2200,11 +2187,20 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, status, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-	} else {
+	} else if (t->uldd_task) {
 		spin_unlock_irqrestore(&t->task_state_lock, flags);
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 		mb();/* ditto */
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		t->task_done(t);
+		spin_lock_irqsave(&pm8001_ha->lock, flags);
+	} else if (!t->uldd_task) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/*ditto*/
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+		t->task_done(t);
+		spin_lock_irqsave(&pm8001_ha->lock, flags);
 	}
 }
 
@@ -2212,7 +2208,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
 static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 {
 	struct sas_task *t;
-	unsigned long flags;
+	unsigned long flags = 0;
 	struct task_status_struct *ts;
 	struct pm8001_ccb_info *ccb;
 	struct pm8001_device *pm8001_dev;
@@ -2292,7 +2288,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 			ts->stat = SAS_QUEUE_FULL;
 			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
 			mb();/*ditto*/
+			spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 			t->task_done(t);
+			spin_lock_irqsave(&pm8001_ha->lock, flags);
 			return;
 		}
 		break;
@@ -2401,11 +2399,20 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
 			" resp 0x%x stat 0x%x but aborted by upper layer!\n",
 			t, event, ts->resp, ts->stat));
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-	} else {
+	} else if (t->uldd_task) {
 		spin_unlock_irqrestore(&t->task_state_lock, flags);
 		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
-		mb();/* in order to force CPU ordering */
+		mb();/* ditto */
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
 		t->task_done(t);
+		spin_lock_irqsave(&pm8001_ha->lock, flags);
+	} else if (!t->uldd_task) {
+		spin_unlock_irqrestore(&t->task_state_lock, flags);
+		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
+		mb();/*ditto*/
+		spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+		t->task_done(t);
+		spin_lock_irqsave(&pm8001_ha->lock, flags);
 	}
 }
 
@@ -2876,15 +2883,20 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
 		le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
 	u8 link_rate =
 		(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
+	u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
 	u8 phy_id =
 		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+	u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+	u8 portstate = (u8)(npip_portstate & 0x0000000F);
+	struct pm8001_port *port = &pm8001_ha->port[port_id];
 	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
 	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
 	unsigned long flags;
 	u8 deviceType = pPayload->sas_identify.dev_type;
-
+	port->port_state =  portstate;
 	PM8001_MSG_DBG(pm8001_ha,
-		pm8001_printk("HW_EVENT_SAS_PHY_UP \n"));
+		pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",
+		port_id, phy_id));
 
 	switch (deviceType) {
 	case SAS_PHY_UNUSED:
@@ -2895,16 +2907,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
 		PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n"));
 		pm8001_chip_phy_ctl_req(pm8001_ha, phy_id,
 			PHY_NOTIFY_ENABLE_SPINUP);
+		port->port_attached = 1;
 		get_lrate_mode(phy, link_rate);
 		break;
 	case SAS_EDGE_EXPANDER_DEVICE:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("expander device.\n"));
+		port->port_attached = 1;
 		get_lrate_mode(phy, link_rate);
 		break;
 	case SAS_FANOUT_EXPANDER_DEVICE:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk("fanout expander device.\n"));
+		port->port_attached = 1;
 		get_lrate_mode(phy, link_rate);
 		break;
 	default:
@@ -2946,11 +2961,20 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
 		le32_to_cpu(pPayload->lr_evt_status_phyid_portid);
 	u8 link_rate =
 		(u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28);
+	u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F);
 	u8 phy_id =
 		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
+	u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
+	u8 portstate = (u8)(npip_portstate & 0x0000000F);
+	struct pm8001_port *port = &pm8001_ha->port[port_id];
 	struct sas_ha_struct *sas_ha = pm8001_ha->sas;
 	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
 	unsigned long flags;
+	PM8001_MSG_DBG(pm8001_ha,
+		pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"
+		" phy id = %d\n", port_id, phy_id));
+	port->port_state =  portstate;
+	port->port_attached = 1;
 	get_lrate_mode(phy, link_rate);
 	phy->phy_type |= PORT_TYPE_SATA;
 	phy->phy_attached = 1;
@@ -2984,7 +3008,13 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
 		(u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4);
 	u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
 	u8 portstate = (u8)(npip_portstate & 0x0000000F);
-
+	struct pm8001_port *port = &pm8001_ha->port[port_id];
+	struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
+	port->port_state =  portstate;
+	phy->phy_type = 0;
+	phy->identify.device_type = 0;
+	phy->phy_attached = 0;
+	memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
 	switch (portstate) {
 	case PORT_VALID:
 		break;
@@ -2993,26 +3023,30 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
 			pm8001_printk(" PortInvalid portID %d \n", port_id));
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk(" Last phy Down and port invalid\n"));
+		port->port_attached = 0;
 		pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
 			port_id, phy_id, 0, 0);
 		break;
 	case PORT_IN_RESET:
 		PM8001_MSG_DBG(pm8001_ha,
-			pm8001_printk(" PortInReset portID %d \n", port_id));
+			pm8001_printk(" Port In Reset portID %d \n", port_id));
 		break;
 	case PORT_NOT_ESTABLISHED:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n"));
+		port->port_attached = 0;
 		break;
 	case PORT_LOSTCOMM:
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk(" phy Down and PORT_LOSTCOMM\n"));
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk(" Last phy Down and port invalid\n"));
+		port->port_attached = 0;
 		pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
 			port_id, phy_id, 0, 0);
 		break;
 	default:
+		port->port_attached = 0;
 		PM8001_MSG_DBG(pm8001_ha,
 			pm8001_printk(" phy Down and(default) = %x\n",
 			portstate));
@@ -3770,7 +3804,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
 	u32 opc = OPC_INB_SSPINIIOSTART;
 	memset(&ssp_cmd, 0, sizeof(ssp_cmd));
 	memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8);
-	ssp_cmd.dir_m_tlr = data_dir_flags[task->data_dir] << 8 | 0x0;/*0 for
+	ssp_cmd.dir_m_tlr =
+		cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for
 	SAS 1.1 compatible TLR*/
 	ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
 	ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
@@ -3841,7 +3876,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
 		}
 	}
 	if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
-		ncg_tag = cpu_to_le32(hdr_tag);
+		ncg_tag = hdr_tag;
 	dir = data_dir_flags[task->data_dir] << 8;
 	sata_cmd.tag = cpu_to_le32(tag);
 	sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
@@ -3986,7 +4021,7 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
 		((stp_sspsmp_sata & 0x03) * 0x10000000));
 	payload.firstburstsize_ITNexustimeout =
 		cpu_to_le32(ITNT | (firstBurstSize * 0x10000));
-	memcpy(&payload.sas_addr_hi, pm8001_dev->sas_device->sas_addr,
+	memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr,
 		SAS_ADDR_SIZE);
 	rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
 	return rc;
@@ -4027,7 +4062,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
 	struct inbound_queue_table *circularQ;
 	int ret;
 	u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
-	memset((u8 *)&payload, 0, sizeof(payload));
+	memset(&payload, 0, sizeof(payload));
 	circularQ = &pm8001_ha->inbnd_q_tbl[0];
 	payload.tag = 1;
 	payload.phyop_phyid =

+ 1 - 2
drivers/scsi/pm8001/pm8001_hwi.h

@@ -242,8 +242,7 @@ struct reg_dev_req {
 	__le32	phyid_portid;
 	__le32	dtype_dlr_retry;
 	__le32	firstburstsize_ITNexustimeout;
-	u32	sas_addr_hi;
-	u32	sas_addr_low;
+	u8	sas_addr[SAS_ADDR_SIZE];
 	__le32	upper_device_id;
 	u32	reserved[8];
 } __attribute__((packed, aligned(4)));

+ 14 - 5
drivers/scsi/pm8001/pm8001_init.c

@@ -200,8 +200,13 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
 {
 	int i;
 	spin_lock_init(&pm8001_ha->lock);
-	for (i = 0; i < pm8001_ha->chip->n_phy; i++)
+	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
 		pm8001_phy_init(pm8001_ha, i);
+		pm8001_ha->port[i].wide_port_phymap = 0;
+		pm8001_ha->port[i].port_attached = 0;
+		pm8001_ha->port[i].port_state = 0;
+		INIT_LIST_HEAD(&pm8001_ha->port[i].list);
+	}
 
 	pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
 	if (!pm8001_ha->tags)
@@ -511,19 +516,23 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
 	u8 i;
 #ifdef PM8001_READ_VPD
 	DECLARE_COMPLETION_ONSTACK(completion);
+	struct pm8001_ioctl_payload payload;
 	pm8001_ha->nvmd_completion = &completion;
-	PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, 0, 0);
+	payload.minor_function = 0;
+	payload.length = 128;
+	payload.func_specific = kzalloc(128, GFP_KERNEL);
+	PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
 	wait_for_completion(&completion);
 	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
 		memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
 			SAS_ADDR_SIZE);
 		PM8001_INIT_DBG(pm8001_ha,
-			pm8001_printk("phy %d sas_addr = %x \n", i,
-			(u64)pm8001_ha->phy[i].dev_sas_addr));
+			pm8001_printk("phy %d sas_addr = %016llx \n", i,
+			pm8001_ha->phy[i].dev_sas_addr));
 	}
 #else
 	for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
-		pm8001_ha->phy[i].dev_sas_addr = 0x500e004010000004ULL;
+		pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
 		pm8001_ha->phy[i].dev_sas_addr =
 			cpu_to_be64((u64)
 				(*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));

+ 53 - 4
drivers/scsi/pm8001/pm8001_sas.c

@@ -329,6 +329,23 @@ int pm8001_slave_configure(struct scsi_device *sdev)
 	}
 	return 0;
 }
+ /* Find the local port id that's attached to this device */
+static int sas_find_local_port_id(struct domain_device *dev)
+{
+	struct domain_device *pdev = dev->parent;
+
+	/* Directly attached device */
+	if (!pdev)
+		return dev->port->id;
+	while (pdev) {
+		struct domain_device *pdev_p = pdev->parent;
+		if (!pdev_p)
+			return pdev->port->id;
+		pdev = pdev->parent;
+	}
+	return 0;
+}
+
 /**
   * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
   * @task: the task to be execute.
@@ -346,11 +363,12 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
 	struct domain_device *dev = task->dev;
 	struct pm8001_hba_info *pm8001_ha;
 	struct pm8001_device *pm8001_dev;
+	struct pm8001_port *port = NULL;
 	struct sas_task *t = task;
 	struct pm8001_ccb_info *ccb;
 	u32 tag = 0xdeadbeef, rc, n_elem = 0;
 	u32 n = num;
-	unsigned long flags = 0;
+	unsigned long flags = 0, flags_libsas = 0;
 
 	if (!dev->port) {
 		struct task_status_struct *tsm = &t->task_status;
@@ -379,6 +397,35 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
 			rc = SAS_PHY_DOWN;
 			goto out_done;
 		}
+		port = &pm8001_ha->port[sas_find_local_port_id(dev)];
+		if (!port->port_attached) {
+			if (sas_protocol_ata(t->task_proto)) {
+				struct task_status_struct *ts = &t->task_status;
+				ts->resp = SAS_TASK_UNDELIVERED;
+				ts->stat = SAS_PHY_DOWN;
+
+				spin_unlock_irqrestore(&pm8001_ha->lock, flags);
+				spin_unlock_irqrestore(dev->sata_dev.ap->lock,
+						flags_libsas);
+				t->task_done(t);
+				spin_lock_irqsave(dev->sata_dev.ap->lock,
+					flags_libsas);
+				spin_lock_irqsave(&pm8001_ha->lock, flags);
+				if (n > 1)
+					t = list_entry(t->list.next,
+							struct sas_task, list);
+				continue;
+			} else {
+				struct task_status_struct *ts = &t->task_status;
+				ts->resp = SAS_TASK_UNDELIVERED;
+				ts->stat = SAS_PHY_DOWN;
+				t->task_done(t);
+				if (n > 1)
+					t = list_entry(t->list.next,
+							struct sas_task, list);
+				continue;
+			}
+		}
 		rc = pm8001_tag_alloc(pm8001_ha, &tag);
 		if (rc)
 			goto err_out;
@@ -569,11 +616,11 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
 	spin_lock_irqsave(&pm8001_ha->lock, flags);
 
 	pm8001_device = pm8001_alloc_dev(pm8001_ha);
-	pm8001_device->sas_device = dev;
 	if (!pm8001_device) {
 		res = -1;
 		goto found_out;
 	}
+	pm8001_device->sas_device = dev;
 	dev->lldd_dev = pm8001_device;
 	pm8001_device->dev_type = dev->dev_type;
 	pm8001_device->dcompletion = &completion;
@@ -609,7 +656,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
 	wait_for_completion(&completion);
 	if (dev->dev_type == SAS_END_DEV)
 		msleep(50);
-	pm8001_ha->flags = PM8001F_RUN_TIME ;
+	pm8001_ha->flags |= PM8001F_RUN_TIME ;
 	return 0;
 found_out:
 	spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -772,7 +819,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
 		task->task_done = pm8001_task_done;
 		task->timer.data = (unsigned long)task;
 		task->timer.function = pm8001_tmf_timedout;
-		task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
+		task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
 		add_timer(&task->timer);
 
 		res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
@@ -897,6 +944,8 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
 
 	if (dev_is_sata(dev)) {
 		DECLARE_COMPLETION_ONSTACK(completion_setstate);
+		if (scsi_is_sas_phy_local(phy))
+			return 0;
 		rc = sas_phy_reset(phy, 1);
 		msleep(2000);
 		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,

+ 24 - 8
drivers/scsi/pm8001/pm8001_sas.h

@@ -59,11 +59,11 @@
 
 #define DRV_NAME		"pm8001"
 #define DRV_VERSION		"0.1.36"
-#define PM8001_FAIL_LOGGING	0x01 /* libsas EH function logging */
+#define PM8001_FAIL_LOGGING	0x01 /* Error message logging */
 #define PM8001_INIT_LOGGING	0x02 /* driver init logging */
 #define PM8001_DISC_LOGGING	0x04 /* discovery layer logging */
 #define PM8001_IO_LOGGING	0x08 /* I/O path logging */
-#define PM8001_EH_LOGGING	0x10 /* Error message logging */
+#define PM8001_EH_LOGGING	0x10 /* libsas EH function logging*/
 #define PM8001_IOCTL_LOGGING	0x20 /* IOCTL message logging */
 #define PM8001_MSG_LOGGING	0x40 /* misc message logging */
 #define pm8001_printk(format, arg...)	printk(KERN_INFO "%s %d:" format,\
@@ -100,6 +100,7 @@ do {						\
 
 #define PM8001_USE_TASKLET
 #define PM8001_USE_MSIX
+#define PM8001_READ_VPD
 
 
 #define DEV_IS_EXPANDER(type)	((type == EDGE_DEV) || (type == FANOUT_DEV))
@@ -111,7 +112,22 @@ extern const struct pm8001_dispatch pm8001_8001_dispatch;
 struct pm8001_hba_info;
 struct pm8001_ccb_info;
 struct pm8001_device;
-struct pm8001_tmf_task;
+/* define task management IU */
+struct pm8001_tmf_task {
+	u8	tmf;
+	u32	tag_of_task_to_be_managed;
+};
+struct pm8001_ioctl_payload {
+	u32	signature;
+	u16	major_function;
+	u16	minor_function;
+	u16	length;
+	u16	status;
+	u16	offset;
+	u16	id;
+	u8	*func_specific;
+};
+
 struct pm8001_dispatch {
 	char *name;
 	int (*chip_init)(struct pm8001_hba_info *pm8001_ha);
@@ -164,6 +180,10 @@ struct pm8001_chip_info {
 
 struct pm8001_port {
 	struct asd_sas_port	sas_port;
+	u8			port_attached;
+	u8			wide_port_phymap;
+	u8			port_state;
+	struct list_head	list;
 };
 
 struct pm8001_phy {
@@ -386,11 +406,7 @@ struct pm8001_fw_image_header {
 	__be32 startup_entry;
 } __attribute__((packed, aligned(4)));
 
-/* define task management IU */
-struct pm8001_tmf_task {
-	u8	tmf;
-	u32	tag_of_task_to_be_managed;
-};
+
 /**
  * FW Flash Update status values
  */

+ 25 - 9
drivers/scsi/pmcraid.c

@@ -1,7 +1,8 @@
 /*
  * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
  *
- * Written By: PMC Sierra Corporation
+ * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
+ *             PMC-Sierra Inc
  *
  * Copyright (C) 2008, 2009 PMC Sierra Inc
  *
@@ -79,7 +80,7 @@ DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
 /*
  * Module parameters
  */
-MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com");
+MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
 MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(PMCRAID_DRIVER_VERSION);
@@ -162,10 +163,10 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
 	spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
 	list_for_each_entry(temp, &pinstance->used_res_q, queue) {
 
-		/* do not expose VSETs with order-ids >= 240 */
+		/* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
 		if (RES_IS_VSET(temp->cfg_entry)) {
 			target = temp->cfg_entry.unique_flags1;
-			if (target >= PMCRAID_MAX_VSET_TARGETS)
+			if (target > PMCRAID_MAX_VSET_TARGETS)
 				continue;
 			bus = PMCRAID_VSET_BUS_ID;
 			lun = 0;
@@ -1210,7 +1211,7 @@ static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte)
 	int retval = 0;
 
 	if (cfgte->resource_type == RES_TYPE_VSET)
-		retval = ((cfgte->unique_flags1 & 0xFF) < 0xFE);
+		retval = ((cfgte->unique_flags1 & 0x80) == 0);
 	else if (cfgte->resource_type == RES_TYPE_GSCSI)
 		retval = (RES_BUS(cfgte->resource_address) !=
 				PMCRAID_VIRTUAL_ENCL_BUS_ID);
@@ -1361,6 +1362,7 @@ static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
  * Return value:
  *  none
  */
+
 static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
 {
 	struct pmcraid_config_table_entry *cfg_entry;
@@ -1368,9 +1370,10 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
 	struct pmcraid_cmd *cmd;
 	struct pmcraid_cmd *cfgcmd;
 	struct pmcraid_resource_entry *res = NULL;
-	u32 new_entry = 1;
 	unsigned long lock_flags;
 	unsigned long host_lock_flags;
+	u32 new_entry = 1;
+	u32 hidden_entry = 0;
 	int rc;
 
 	ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
@@ -1406,9 +1409,15 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
 	}
 
 	/* If this resource is not going to be added to mid-layer, just notify
-	 * applications and return
+	 * applications and return. If this notification is about hiding a VSET
+	 * resource, check if it was exposed already.
 	 */
-	if (!pmcraid_expose_resource(cfg_entry))
+	if (pinstance->ccn.hcam->notification_type ==
+	    NOTIFICATION_TYPE_ENTRY_CHANGED &&
+	    cfg_entry->resource_type == RES_TYPE_VSET &&
+	    cfg_entry->unique_flags1 & 0x80) {
+		hidden_entry = 1;
+	} else if (!pmcraid_expose_resource(cfg_entry))
 		goto out_notify_apps;
 
 	spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
@@ -1424,6 +1433,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
 
 	if (new_entry) {
 
+		if (hidden_entry) {
+			spin_unlock_irqrestore(&pinstance->resource_lock,
+						lock_flags);
+			goto out_notify_apps;
+		}
+
 		/* If there are more number of resources than what driver can
 		 * manage, do not notify the applications about the CCN. Just
 		 * ignore this notifications and re-register the same HCAM
@@ -1454,8 +1469,9 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
 		sizeof(struct pmcraid_config_table_entry));
 
 	if (pinstance->ccn.hcam->notification_type ==
-	    NOTIFICATION_TYPE_ENTRY_DELETED) {
+	    NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
 		if (res->scsi_dev) {
+			res->cfg_entry.unique_flags1 &= 0x7F;
 			res->change_detected = RES_CHANGE_DEL;
 			res->cfg_entry.resource_handle =
 				PMCRAID_INVALID_RES_HANDLE;

+ 4 - 1
drivers/scsi/pmcraid.h

@@ -1,6 +1,9 @@
 /*
  * pmcraid.h -- PMC Sierra MaxRAID controller driver header file
  *
+ * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
+ *             PMC-Sierra Inc
+ *
  * Copyright (C) 2008, 2009 PMC Sierra Inc.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -106,7 +109,7 @@
 #define PMCRAID_VSET_LUN_ID                      0x0
 #define PMCRAID_PHYS_BUS_ID                      0x0
 #define PMCRAID_VIRTUAL_ENCL_BUS_ID              0x8
-#define PMCRAID_MAX_VSET_TARGETS                 240
+#define PMCRAID_MAX_VSET_TARGETS                 0x7F
 #define PMCRAID_MAX_VSET_LUNS_PER_TARGET         8
 
 #define PMCRAID_IOA_MAX_SECTORS                  32767

+ 2 - 3
drivers/scsi/qla2xxx/qla_def.h

@@ -1570,9 +1570,6 @@ typedef struct fc_port {
 	struct fc_rport *rport, *drport;
 	u32 supported_classes;
 
-	unsigned long last_queue_full;
-	unsigned long last_ramp_up;
-
 	uint16_t vp_idx;
 } fc_port_t;
 
@@ -2265,6 +2262,7 @@ struct qla_hw_data {
 		uint32_t	port0			:1;
 		uint32_t	running_gold_fw		:1;
 		uint32_t	cpu_affinity_enabled	:1;
+		uint32_t	disable_msix_handshake	:1;
 	} flags;
 
 	/* This spinlock is used to protect "io transactions", you must
@@ -2387,6 +2385,7 @@ struct qla_hw_data {
 #define IS_QLA81XX(ha)		(IS_QLA8001(ha))
 #define IS_QLA2XXX_MIDTYPE(ha)	(IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
 				IS_QLA25XX(ha) || IS_QLA81XX(ha))
+#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha))
 #define IS_NOPOLLING_TYPE(ha)	((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \
 				(ha)->flags.msix_enabled)
 #define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha))

+ 0 - 2
drivers/scsi/qla2xxx/qla_gbl.h

@@ -72,8 +72,6 @@ extern int ql2xloginretrycount;
 extern int ql2xfdmienable;
 extern int ql2xallocfwdump;
 extern int ql2xextended_error_logging;
-extern int ql2xqfullrampup;
-extern int ql2xqfulltracking;
 extern int ql2xiidmaenable;
 extern int ql2xmaxqueues;
 extern int ql2xmultique_tag;

+ 11 - 1
drivers/scsi/qla2xxx/qla_init.c

@@ -1442,7 +1442,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
 			icb->firmware_options_2 |=
 				__constant_cpu_to_le32(BIT_18);
 
-		icb->firmware_options_2 &= __constant_cpu_to_le32(~BIT_22);
+		/* Use Disable MSIX Handshake mode for capable adapters */
+		if (IS_MSIX_NACK_CAPABLE(ha)) {
+			icb->firmware_options_2 &=
+				__constant_cpu_to_le32(~BIT_22);
+			ha->flags.disable_msix_handshake = 1;
+			qla_printk(KERN_INFO, ha,
+				"MSIX Handshake Disable Mode turned on\n");
+		} else {
+			icb->firmware_options_2 |=
+				__constant_cpu_to_le32(BIT_22);
+		}
 		icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23);
 
 		WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);

+ 10 - 93
drivers/scsi/qla2xxx/qla_isr.c

@@ -811,78 +811,6 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
 		qla2x00_alert_all_vps(rsp, mb);
 }
 
-static void
-qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
-{
-	fc_port_t *fcport = data;
-	struct scsi_qla_host *vha = fcport->vha;
-	struct qla_hw_data *ha = vha->hw;
-	struct req_que *req = NULL;
-
-	if (!ql2xqfulltracking)
-		return;
-
-	req = vha->req;
-	if (!req)
-		return;
-	if (req->max_q_depth <= sdev->queue_depth)
-		return;
-
-	if (sdev->ordered_tags)
-		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
-		    sdev->queue_depth + 1);
-	else
-		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
-		    sdev->queue_depth + 1);
-
-	fcport->last_ramp_up = jiffies;
-
-	DEBUG2(qla_printk(KERN_INFO, ha,
-	    "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
-	    fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
-	    sdev->queue_depth));
-}
-
-static void
-qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
-{
-	fc_port_t *fcport = data;
-
-	if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
-		return;
-
-	DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
-	    "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
-	    fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
-	    sdev->queue_depth));
-}
-
-static inline void
-qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
-								srb_t *sp)
-{
-	fc_port_t *fcport;
-	struct scsi_device *sdev;
-
-	if (!ql2xqfulltracking)
-		return;
-
-	sdev = sp->cmd->device;
-	if (sdev->queue_depth >= req->max_q_depth)
-		return;
-
-	fcport = sp->fcport;
-	if (time_before(jiffies,
-	    fcport->last_ramp_up + ql2xqfullrampup * HZ))
-		return;
-	if (time_before(jiffies,
-	    fcport->last_queue_full + ql2xqfullrampup * HZ))
-		return;
-
-	starget_for_each_device(sdev->sdev_target, fcport,
-	    qla2x00_adjust_sdev_qdepth_up);
-}
-
 /**
  * qla2x00_process_completed_request() - Process a Fast Post response.
  * @ha: SCSI driver HA context
@@ -913,8 +841,6 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
 
 		/* Save ISP completion status */
 		sp->cmd->result = DID_OK << 16;
-
-		qla2x00_ramp_up_queue_depth(vha, req, sp);
 		qla2x00_sp_compl(ha, sp);
 	} else {
 		DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
@@ -1435,13 +1361,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 			    "scsi(%ld): QUEUE FULL status detected "
 			    "0x%x-0x%x.\n", vha->host_no, comp_status,
 			    scsi_status));
-
-			/* Adjust queue depth for all luns on the port. */
-			if (!ql2xqfulltracking)
-				break;
-			fcport->last_queue_full = jiffies;
-			starget_for_each_device(cp->device->sdev_target,
-			    fcport, qla2x00_adjust_sdev_qdepth_down);
 			break;
 		}
 		if (lscsi_status != SS_CHECK_CONDITION)
@@ -1516,17 +1435,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
 				    "scsi(%ld): QUEUE FULL status detected "
 				    "0x%x-0x%x.\n", vha->host_no, comp_status,
 				    scsi_status));
-
-				/*
-				 * Adjust queue depth for all luns on the
-				 * port.
-				 */
-				if (!ql2xqfulltracking)
-					break;
-				fcport->last_queue_full = jiffies;
-				starget_for_each_device(
-				    cp->device->sdev_target, fcport,
-				    qla2x00_adjust_sdev_qdepth_down);
 				break;
 			}
 			if (lscsi_status != SS_CHECK_CONDITION)
@@ -2020,7 +1928,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
 
 	vha = qla25xx_get_host(rsp);
 	qla24xx_process_response_queue(vha, rsp);
-	if (!ha->mqenable) {
+	if (!ha->flags.disable_msix_handshake) {
 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
 		RD_REG_DWORD_RELAXED(&reg->hccr);
 	}
@@ -2034,6 +1942,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
 {
 	struct qla_hw_data *ha;
 	struct rsp_que *rsp;
+	struct device_reg_24xx __iomem *reg;
 
 	rsp = (struct rsp_que *) dev_id;
 	if (!rsp) {
@@ -2043,6 +1952,14 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
 	}
 	ha = rsp->hw;
 
+	/* Clear the interrupt, if enabled, for this response queue */
+	if (rsp->options & ~BIT_6) {
+		reg = &ha->iobase->isp24;
+		spin_lock_irq(&ha->hardware_lock);
+		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+		RD_REG_DWORD_RELAXED(&reg->hccr);
+		spin_unlock_irq(&ha->hardware_lock);
+	}
 	queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
 
 	return IRQ_HANDLED;

+ 4 - 0
drivers/scsi/qla2xxx/qla_mid.c

@@ -696,6 +696,10 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
 	/* Use alternate PCI devfn */
 	if (LSB(rsp->rid))
 		options |= BIT_5;
+	/* Enable MSIX handshake mode on for uncapable adapters */
+	if (!IS_MSIX_NACK_CAPABLE(ha))
+		options |= BIT_6;
+
 	rsp->options = options;
 	rsp->id = que_id;
 	reg = ISP_QUE_REG(ha, que_id);

+ 54 - 21
drivers/scsi/qla2xxx/qla_os.c

@@ -78,21 +78,6 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xmaxqdepth,
 		"Maximum queue depth to report for target devices.");
 
-int ql2xqfulltracking = 1;
-module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ql2xqfulltracking,
-		"Controls whether the driver tracks queue full status "
-		"returns and dynamically adjusts a scsi device's queue "
-		"depth.  Default is 1, perform tracking.  Set to 0 to "
-		"disable dynamic tracking and adjustment of queue depth.");
-
-int ql2xqfullrampup = 120;
-module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(ql2xqfullrampup,
-		"Number of seconds to wait to begin to ramp-up the queue "
-		"depth for a device after a queue-full condition has been "
-		"detected.  Default is 120 seconds.");
-
 int ql2xiidmaenable=1;
 module_param(ql2xiidmaenable, int, S_IRUGO|S_IRUSR);
 MODULE_PARM_DESC(ql2xiidmaenable,
@@ -1217,13 +1202,61 @@ qla2xxx_slave_destroy(struct scsi_device *sdev)
 	sdev->hostdata = NULL;
 }
 
+static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
+{
+	fc_port_t *fcport = (struct fc_port *) sdev->hostdata;
+
+	if (!scsi_track_queue_full(sdev, qdepth))
+		return;
+
+	DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw,
+		"scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
+		fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
+		sdev->queue_depth));
+}
+
+static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
+{
+	fc_port_t *fcport = sdev->hostdata;
+	struct scsi_qla_host *vha = fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = NULL;
+
+	req = vha->req;
+	if (!req)
+		return;
+
+	if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth)
+		return;
+
+	if (sdev->ordered_tags)
+		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth);
+	else
+		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
+
+	DEBUG2(qla_printk(KERN_INFO, ha,
+	       "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
+	       fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun,
+	       sdev->queue_depth));
+}
+
 static int
 qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
 {
-	if (reason != SCSI_QDEPTH_DEFAULT)
-		return -EOPNOTSUPP;
+	switch (reason) {
+	case SCSI_QDEPTH_DEFAULT:
+		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+		break;
+	case SCSI_QDEPTH_QFULL:
+		qla2x00_handle_queue_full(sdev, qdepth);
+		break;
+	case SCSI_QDEPTH_RAMP_UP:
+		qla2x00_adjust_sdev_qdepth_up(sdev, qdepth);
+		break;
+	default:
+		return EOPNOTSUPP;
+	}
 
-	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
 	return sdev->queue_depth;
 }
 
@@ -2003,13 +2036,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 	DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
 	    base_vha->host_no, ha));
 
-	base_vha->flags.init_done = 1;
-	base_vha->flags.online = 1;
-
 	ret = scsi_add_host(host, &pdev->dev);
 	if (ret)
 		goto probe_failed;
 
+	base_vha->flags.init_done = 1;
+	base_vha->flags.online = 1;
+
 	ha->isp_ops->enable_intrs(ha);
 
 	scsi_scan_host(host);

+ 1 - 1
drivers/scsi/qla2xxx/qla_version.h

@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.01-k7"
+#define QLA2XXX_VERSION      "8.03.01-k8"
 
 #define QLA_DRIVER_MAJOR_VER	8
 #define QLA_DRIVER_MINOR_VER	3

+ 1 - 0
drivers/scsi/scsi_lib.c

@@ -859,6 +859,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
 				case 0x07: /* operation in progress */
 				case 0x08: /* Long write in progress */
 				case 0x09: /* self test in progress */
+				case 0x14: /* space allocation in progress */
 					action = ACTION_DELAYED_RETRY;
 					break;
 				default:

+ 14 - 3
drivers/scsi/scsi_transport_fc.c

@@ -649,11 +649,22 @@ static __init int fc_transport_init(void)
 		return error;
 	error = transport_class_register(&fc_vport_class);
 	if (error)
-		return error;
+		goto unreg_host_class;
 	error = transport_class_register(&fc_rport_class);
 	if (error)
-		return error;
-	return transport_class_register(&fc_transport_class);
+		goto unreg_vport_class;
+	error = transport_class_register(&fc_transport_class);
+	if (error)
+		goto unreg_rport_class;
+	return 0;
+
+unreg_rport_class:
+	transport_class_unregister(&fc_rport_class);
+unreg_vport_class:
+	transport_class_unregister(&fc_vport_class);
+unreg_host_class:
+	transport_class_unregister(&fc_host_class);
+	return error;
 }
 
 static void __exit fc_transport_exit(void)

+ 107 - 0
drivers/scsi/sd.c

@@ -264,6 +264,15 @@ sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
 	return snprintf(buf, 20, "%u\n", sdkp->ATO);
 }
 
+static ssize_t
+sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+	return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
+}
+
 static struct device_attribute sd_disk_attrs[] = {
 	__ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
 	       sd_store_cache_type),
@@ -274,6 +283,7 @@ static struct device_attribute sd_disk_attrs[] = {
 	       sd_store_manage_start_stop),
 	__ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
 	__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
+	__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
 	__ATTR_NULL,
 };
 
@@ -398,6 +408,57 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
 	scsi_set_prot_type(scmd, dif);
 }
 
+/**
+ * sd_prepare_discard - unmap blocks on thinly provisioned device
+ * @rq: Request to prepare
+ *
+ * Will issue either UNMAP or WRITE SAME(16) depending on preference
+ * indicated by target device.
+ **/
+static int sd_prepare_discard(struct request *rq)
+{
+	struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+	struct bio *bio = rq->bio;
+	sector_t sector = bio->bi_sector;
+	unsigned int num = bio_sectors(bio);
+
+	if (sdkp->device->sector_size == 4096) {
+		sector >>= 3;
+		num >>= 3;
+	}
+
+	rq->cmd_type = REQ_TYPE_BLOCK_PC;
+	rq->timeout = SD_TIMEOUT;
+
+	memset(rq->cmd, 0, rq->cmd_len);
+
+	if (sdkp->unmap) {
+		char *buf = kmap_atomic(bio_page(bio), KM_USER0);
+
+		rq->cmd[0] = UNMAP;
+		rq->cmd[8] = 24;
+		rq->cmd_len = 10;
+
+		/* Ensure that data length matches payload */
+		rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24;
+
+		put_unaligned_be16(6 + 16, &buf[0]);
+		put_unaligned_be16(16, &buf[2]);
+		put_unaligned_be64(sector, &buf[8]);
+		put_unaligned_be32(num, &buf[16]);
+
+		kunmap_atomic(buf, KM_USER0);
+	} else {
+		rq->cmd[0] = WRITE_SAME_16;
+		rq->cmd[1] = 0x8; /* UNMAP */
+		put_unaligned_be64(sector, &rq->cmd[2]);
+		put_unaligned_be32(num, &rq->cmd[10]);
+		rq->cmd_len = 16;
+	}
+
+	return BLKPREP_OK;
+}
+
 /**
  *	sd_init_command - build a scsi (read or write) command from
  *	information in the request structure.
@@ -418,6 +479,13 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
 	int ret, host_dif;
 	unsigned char protect;
 
+	/*
+	 * Discard request come in as REQ_TYPE_FS but we turn them into
+	 * block PC requests to make life easier.
+	 */
+	if (blk_discard_rq(rq))
+		ret = sd_prepare_discard(rq);
+
 	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 		ret = scsi_setup_blk_pc_cmnd(sdp, rq);
 		goto out;
@@ -1432,6 +1500,19 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
 		sd_printk(KERN_NOTICE, sdkp,
 			  "physical block alignment offset: %u\n", alignment);
 
+	if (buffer[14] & 0x80) { /* TPE */
+		struct request_queue *q = sdp->request_queue;
+
+		sdkp->thin_provisioning = 1;
+		q->limits.discard_granularity = sdkp->hw_sector_size;
+		q->limits.max_discard_sectors = 0xffffffff;
+
+		if (buffer[14] & 0x40) /* TPRZ */
+			q->limits.discard_zeroes_data = 1;
+
+		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+	}
+
 	sdkp->capacity = lba + 1;
 	return sector_size;
 }
@@ -1863,6 +1944,7 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
  */
 static void sd_read_block_limits(struct scsi_disk *sdkp)
 {
+	struct request_queue *q = sdkp->disk->queue;
 	unsigned int sector_sz = sdkp->device->sector_size;
 	char *buffer;
 
@@ -1877,6 +1959,31 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
 	blk_queue_io_opt(sdkp->disk->queue,
 			 get_unaligned_be32(&buffer[12]) * sector_sz);
 
+	/* Thin provisioning enabled and page length indicates TP support */
+	if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
+		unsigned int lba_count, desc_count, granularity;
+
+		lba_count = get_unaligned_be32(&buffer[20]);
+		desc_count = get_unaligned_be32(&buffer[24]);
+
+		if (lba_count) {
+			q->limits.max_discard_sectors =
+				lba_count * sector_sz >> 9;
+
+			if (desc_count)
+				sdkp->unmap = 1;
+		}
+
+		granularity = get_unaligned_be32(&buffer[28]);
+
+		if (granularity)
+			q->limits.discard_granularity = granularity * sector_sz;
+
+		if (buffer[32] & 0x80)
+			q->limits.discard_alignment =
+				get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+	}
+
 	kfree(buffer);
 }
 

+ 2 - 0
drivers/scsi/sd.h

@@ -60,6 +60,8 @@ struct scsi_disk {
 	unsigned	RCD : 1;	/* state of disk RCD bit, unused */
 	unsigned	DPOFUA : 1;	/* state of disk DPOFUA bit */
 	unsigned	first_scan : 1;
+	unsigned	thin_provisioning : 1;
+	unsigned	unmap : 1;
 };
 #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
 

+ 12 - 11
drivers/scsi/st.c

@@ -552,13 +552,15 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
 	SRpnt->waiting = waiting;
 
 	if (STp->buffer->do_dio) {
+		mdata->page_order = 0;
 		mdata->nr_entries = STp->buffer->sg_segs;
 		mdata->pages = STp->buffer->mapped_pages;
 	} else {
+		mdata->page_order = STp->buffer->reserved_page_order;
 		mdata->nr_entries =
 			DIV_ROUND_UP(bytes, PAGE_SIZE << mdata->page_order);
-		STp->buffer->map_data.pages = STp->buffer->reserved_pages;
-		STp->buffer->map_data.offset = 0;
+		mdata->pages = STp->buffer->reserved_pages;
+		mdata->offset = 0;
 	}
 
 	memcpy(SRpnt->cmd, cmd, sizeof(SRpnt->cmd));
@@ -3719,7 +3721,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
 		priority |= __GFP_ZERO;
 
 	if (STbuffer->frp_segs) {
-		order = STbuffer->map_data.page_order;
+		order = STbuffer->reserved_page_order;
 		b_size = PAGE_SIZE << order;
 	} else {
 		for (b_size = PAGE_SIZE, order = 0;
@@ -3752,7 +3754,7 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm
 		segs++;
 	}
 	STbuffer->b_data = page_address(STbuffer->reserved_pages[0]);
-	STbuffer->map_data.page_order = order;
+	STbuffer->reserved_page_order = order;
 
 	return 1;
 }
@@ -3765,7 +3767,7 @@ static void clear_buffer(struct st_buffer * st_bp)
 
 	for (i=0; i < st_bp->frp_segs; i++)
 		memset(page_address(st_bp->reserved_pages[i]), 0,
-		       PAGE_SIZE << st_bp->map_data.page_order);
+		       PAGE_SIZE << st_bp->reserved_page_order);
 	st_bp->cleared = 1;
 }
 
@@ -3773,7 +3775,7 @@ static void clear_buffer(struct st_buffer * st_bp)
 /* Release the extra buffer */
 static void normalize_buffer(struct st_buffer * STbuffer)
 {
-	int i, order = STbuffer->map_data.page_order;
+	int i, order = STbuffer->reserved_page_order;
 
 	for (i = 0; i < STbuffer->frp_segs; i++) {
 		__free_pages(STbuffer->reserved_pages[i], order);
@@ -3781,7 +3783,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
 	}
 	STbuffer->frp_segs = 0;
 	STbuffer->sg_segs = 0;
-	STbuffer->map_data.page_order = 0;
+	STbuffer->reserved_page_order = 0;
 	STbuffer->map_data.offset = 0;
 }
 
@@ -3791,7 +3793,7 @@ static void normalize_buffer(struct st_buffer * STbuffer)
 static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, int do_count)
 {
 	int i, cnt, res, offset;
-	int length = PAGE_SIZE << st_bp->map_data.page_order;
+	int length = PAGE_SIZE << st_bp->reserved_page_order;
 
 	for (i = 0, offset = st_bp->buffer_bytes;
 	     i < st_bp->frp_segs && offset >= length; i++)
@@ -3823,7 +3825,7 @@ static int append_to_buffer(const char __user *ubp, struct st_buffer * st_bp, in
 static int from_buffer(struct st_buffer * st_bp, char __user *ubp, int do_count)
 {
 	int i, cnt, res, offset;
-	int length = PAGE_SIZE << st_bp->map_data.page_order;
+	int length = PAGE_SIZE << st_bp->reserved_page_order;
 
 	for (i = 0, offset = st_bp->read_pointer;
 	     i < st_bp->frp_segs && offset >= length; i++)
@@ -3856,7 +3858,7 @@ static void move_buffer_data(struct st_buffer * st_bp, int offset)
 {
 	int src_seg, dst_seg, src_offset = 0, dst_offset;
 	int count, total;
-	int length = PAGE_SIZE << st_bp->map_data.page_order;
+	int length = PAGE_SIZE << st_bp->reserved_page_order;
 
 	if (offset == 0)
 		return;
@@ -4578,7 +4580,6 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
         }
 
 	mdata->offset = uaddr & ~PAGE_MASK;
-	mdata->page_order = 0;
 	STbp->mapped_pages = pages;
 
 	return nr_pages;

+ 1 - 0
drivers/scsi/st.h

@@ -46,6 +46,7 @@ struct st_buffer {
 	struct st_request *last_SRpnt;
 	struct st_cmdstatus cmdstat;
 	struct page **reserved_pages;
+	int reserved_page_order;
 	struct page **mapped_pages;
 	struct rq_map_data map_data;
 	unsigned char *b_data;

+ 2 - 0
include/linux/enclosure.h

@@ -42,6 +42,8 @@ enum enclosure_status {
 	ENCLOSURE_STATUS_NOT_INSTALLED,
 	ENCLOSURE_STATUS_UNKNOWN,
 	ENCLOSURE_STATUS_UNAVAILABLE,
+	/* last element for counting purposes */
+	ENCLOSURE_STATUS_MAX
 };
 
 /* SFF-8485 activity light settings */

+ 3 - 2
include/scsi/osd_initiator.h

@@ -142,6 +142,7 @@ struct osd_request {
 	struct _osd_io_info {
 		struct bio *bio;
 		u64 total_bytes;
+		u64 residual;
 		struct request *req;
 		struct _osd_req_data_segment *last_seg;
 		u8 *pad_buff;
@@ -150,12 +151,14 @@ struct osd_request {
 	gfp_t alloc_flags;
 	unsigned timeout;
 	unsigned retries;
+	unsigned sense_len;
 	u8 sense[OSD_MAX_SENSE_LEN];
 	enum osd_attributes_mode attributes_mode;
 
 	osd_req_done_fn *async_done;
 	void *async_private;
 	int async_error;
+	int req_errors;
 };
 
 static inline bool osd_req_is_ver1(struct osd_request *or)
@@ -297,8 +300,6 @@ enum osd_err_priority {
 };
 
 struct osd_sense_info {
-	u64 out_resid;		/* Zero on success otherwise out residual */
-	u64 in_resid;		/* Zero on success otherwise in residual */
 	enum osd_err_priority osd_err_pri;
 
 	int key;		/* one of enum scsi_sense_keys */