Bladeren bron

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "First round of SCSI updates for the 4.4 merge window.

  This batch includes a couple of minor fixes, some core changes to help
  issues we're still seeing with the suspend/resume code and updates to
  lpfc and cxlflash.

  We're (actually Martin Petersen is) trying to wrangle a mpt2/mpt3sas
  merger for the merge window which will help enormously with the
  maintenance burden, so there will be another round before it closes"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (56 commits)
  cxlflash: Fix to avoid bypassing context cleanup
  cxlflash: Fix to avoid lock instrumentation rejection
  cxlflash: Fix to avoid corrupting port selection mask
  cxlflash: Fix to escalate to LINK_RESET on login timeout
  cxlflash: Fix to avoid leaving dangling interrupt resources
  cxlflash: Fix to avoid potential deadlock on EEH
  cxlflash: Correct trace string
  cxlflash: Fix to avoid corrupting adapter fops
  cxlflash: Fix to double the delay each time
  MAINTAINERS: Add cxlflash driver
  cxlflash: Fix to prevent stale AFU RRQ
  cxlflash: Correct spelling, grammar, and alignment mistakes
  cxlflash: Fix to prevent EEH recovery failure
  cxlflash: Fix MMIO and endianness errors
  cxlflash: Fix function prolog parameters and return codes
  cxlflash: Remove unnecessary scsi_block_requests
  cxlflash: Correct behavior in device reset handler following EEH
  cxlflash: Fix to prevent workq from accessing freed memory
  cxlflash: Correct usage of scsi_host_put()
  cxlflash: Fix AFU version access/storage and add check
  ...
Linus Torvalds 9 jaren geleden
bovenliggende
commit
a3e7531535
37 gewijzigde bestanden met toevoegingen van 1342 en 1163 verwijderingen
  1. 11 2
      MAINTAINERS
  2. 2 2
      drivers/scsi/be2iscsi/Kconfig
  3. 1 1
      drivers/scsi/be2iscsi/Makefile
  4. 2 2
      drivers/scsi/be2iscsi/be.h
  5. 2 2
      drivers/scsi/be2iscsi/be_cmds.c
  6. 2 2
      drivers/scsi/be2iscsi/be_cmds.h
  7. 2 2
      drivers/scsi/be2iscsi/be_iscsi.c
  8. 4 4
      drivers/scsi/be2iscsi/be_main.c
  9. 3 3
      drivers/scsi/be2iscsi/be_main.h
  10. 2 2
      drivers/scsi/be2iscsi/be_mgmt.c
  11. 2 2
      drivers/scsi/be2iscsi/be_mgmt.h
  12. 0 1
      drivers/scsi/bnx2fc/bnx2fc_fcoe.c
  13. 13 17
      drivers/scsi/cxlflash/common.h
  14. 24 21
      drivers/scsi/cxlflash/lunmgt.c
  15. 832 718
      drivers/scsi/cxlflash/main.c
  16. 1 0
      drivers/scsi/cxlflash/main.h
  17. 4 4
      drivers/scsi/cxlflash/sislite.h
  18. 134 75
      drivers/scsi/cxlflash/superpipe.c
  19. 9 5
      drivers/scsi/cxlflash/superpipe.h
  20. 49 19
      drivers/scsi/cxlflash/vlun.c
  21. 38 8
      drivers/scsi/fnic/fnic_fcs.c
  22. 16 0
      drivers/scsi/fnic/fnic_scsi.c
  23. 5 3
      drivers/scsi/lpfc/lpfc.h
  24. 13 10
      drivers/scsi/lpfc/lpfc_attr.c
  25. 0 20
      drivers/scsi/lpfc/lpfc_bsg.c
  26. 6 5
      drivers/scsi/lpfc/lpfc_ct.c
  27. 28 72
      drivers/scsi/lpfc/lpfc_els.c
  28. 19 8
      drivers/scsi/lpfc/lpfc_hbadisc.c
  29. 5 1
      drivers/scsi/lpfc/lpfc_hw.h
  30. 23 13
      drivers/scsi/lpfc/lpfc_init.c
  31. 11 8
      drivers/scsi/lpfc/lpfc_mbox.c
  32. 0 2
      drivers/scsi/lpfc/lpfc_nportdisc.c
  33. 1 13
      drivers/scsi/lpfc/lpfc_scsi.c
  34. 1 9
      drivers/scsi/lpfc/lpfc_sli.c
  35. 0 1
      drivers/scsi/lpfc/lpfc_sli4.h
  36. 1 1
      drivers/scsi/lpfc/lpfc_version.h
  37. 76 105
      drivers/scsi/scsi_devinfo.c

+ 11 - 2
MAINTAINERS

@@ -3186,6 +3186,15 @@ F:	Documentation/powerpc/cxl.txt
 F:	Documentation/powerpc/cxl.txt
 F:	Documentation/powerpc/cxl.txt
 F:	Documentation/ABI/testing/sysfs-class-cxl
 F:	Documentation/ABI/testing/sysfs-class-cxl
 
 
+CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER
+M:	Manoj N. Kumar <manoj@linux.vnet.ibm.com>
+M:	Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
+L:	linux-scsi@vger.kernel.org
+S:	Supported
+F:	drivers/scsi/cxlflash/
+F:	include/uapi/scsi/cxlflash_ioctls.h
+F:	Documentation/powerpc/cxlflash.txt
+
 STMMAC ETHERNET DRIVER
 STMMAC ETHERNET DRIVER
 M:	Giuseppe Cavallaro <peppe.cavallaro@st.com>
 M:	Giuseppe Cavallaro <peppe.cavallaro@st.com>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org
@@ -9473,8 +9482,8 @@ F:	include/uapi/linux/phantom.h
 
 
 SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
 SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
 M:	Jayamohan Kallickal <jayamohan.kallickal@avagotech.com>
 M:	Jayamohan Kallickal <jayamohan.kallickal@avagotech.com>
-M:	Minh Tran <minh.tran@avagotech.com>
-M:	John Soni Jose <sony.john-n@avagotech.com>
+M:	Ketan Mukadam <ketan.mukadam@avagotech.com>
+M:	John Soni Jose <sony.john@avagotech.com>
 L:	linux-scsi@vger.kernel.org
 L:	linux-scsi@vger.kernel.org
 W:	http://www.avagotech.com
 W:	http://www.avagotech.com
 S:	Supported
 S:	Supported

+ 2 - 2
drivers/scsi/be2iscsi/Kconfig

@@ -1,9 +1,9 @@
 config BE2ISCSI
 config BE2ISCSI
-	tristate "ServerEngines' 10Gbps iSCSI - BladeEngine 2"
+	tristate "Emulex 10Gbps iSCSI - BladeEngine 2"
 	depends on PCI && SCSI && NET
 	depends on PCI && SCSI && NET
 	select SCSI_ISCSI_ATTRS
 	select SCSI_ISCSI_ATTRS
 	select ISCSI_BOOT_SYSFS
 	select ISCSI_BOOT_SYSFS
 
 
 	help
 	help
-	This driver implements the iSCSI functionality for ServerEngines'
+	This driver implements the iSCSI functionality for Emulex
 	10Gbps Storage adapter - BladeEngine 2.
 	10Gbps Storage adapter - BladeEngine 2.

+ 1 - 1
drivers/scsi/be2iscsi/Makefile

@@ -1,5 +1,5 @@
 #
 #
-# Makefile to build the iSCSI driver for ServerEngine's BladeEngine.
+# Makefile to build the iSCSI driver for Emulex OneConnect.
 #
 #
 #
 #
 
 

+ 2 - 2
drivers/scsi/be2iscsi/be.h

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -10,7 +10,7 @@
  * Contact Information:
  * Contact Information:
  * linux-drivers@avagotech.com
  * linux-drivers@avagotech.com
  *
  *
- * Avago Technologies
+ * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  * Costa Mesa, CA 92626
  */
  */

+ 2 - 2
drivers/scsi/be2iscsi/be_cmds.c

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -10,7 +10,7 @@
  * Contact Information:
  * Contact Information:
  * linux-drivers@avagotech.com
  * linux-drivers@avagotech.com
  *
  *
- * Avago Technologies
+ * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  * Costa Mesa, CA 92626
  */
  */

+ 2 - 2
drivers/scsi/be2iscsi/be_cmds.h

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -10,7 +10,7 @@
  * Contact Information:
  * Contact Information:
  * linux-drivers@avagotech.com
  * linux-drivers@avagotech.com
  *
  *
- * Avago Technologies
+ * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  * Costa Mesa, CA 92626
  */
  */

+ 2 - 2
drivers/scsi/be2iscsi/be_iscsi.c

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
  * Contact Information:
  * Contact Information:
  * linux-drivers@avagotech.com
  * linux-drivers@avagotech.com
  *
  *
- * Avago Technologies
+ * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  * Costa Mesa, CA 92626
  */
  */

+ 4 - 4
drivers/scsi/be2iscsi/be_main.c

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
  * Contact Information:
  * Contact Information:
  * linux-drivers@avagotech.com
  * linux-drivers@avagotech.com
  *
  *
- * Avago Technologies
+ * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  * Costa Mesa, CA 92626
  */
  */
@@ -50,7 +50,7 @@ static unsigned int enable_msix = 1;
 
 
 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
 MODULE_VERSION(BUILD_STR);
 MODULE_VERSION(BUILD_STR);
-MODULE_AUTHOR("Avago Technologies");
+MODULE_AUTHOR("Emulex Corporation");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 module_param(be_iopoll_budget, int, 0);
 module_param(be_iopoll_budget, int, 0);
 module_param(enable_msix, int, 0);
 module_param(enable_msix, int, 0);
@@ -552,7 +552,7 @@ MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
 
 
 static struct scsi_host_template beiscsi_sht = {
 static struct scsi_host_template beiscsi_sht = {
 	.module = THIS_MODULE,
 	.module = THIS_MODULE,
-	.name = "Avago Technologies 10Gbe open-iscsi Initiator Driver",
+	.name = "Emulex 10Gbe open-iscsi Initiator Driver",
 	.proc_name = DRV_NAME,
 	.proc_name = DRV_NAME,
 	.queuecommand = iscsi_queuecommand,
 	.queuecommand = iscsi_queuecommand,
 	.change_queue_depth = scsi_change_queue_depth,
 	.change_queue_depth = scsi_change_queue_depth,

+ 3 - 3
drivers/scsi/be2iscsi/be_main.h

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
  * Contact Information:
  * Contact Information:
  * linux-drivers@avagotech.com
  * linux-drivers@avagotech.com
  *
  *
- * Avago Technologies
+ * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  * Costa Mesa, CA 92626
  */
  */
@@ -37,7 +37,7 @@
 
 
 #define DRV_NAME		"be2iscsi"
 #define DRV_NAME		"be2iscsi"
 #define BUILD_STR		"10.6.0.0"
 #define BUILD_STR		"10.6.0.0"
-#define BE_NAME			"Avago Technologies OneConnect" \
+#define BE_NAME			"Emulex OneConnect" \
 				"Open-iSCSI Driver version" BUILD_STR
 				"Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC		BE_NAME " " "Driver"
 #define DRV_DESC		BE_NAME " " "Driver"
 
 

+ 2 - 2
drivers/scsi/be2iscsi/be_mgmt.c

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
  * Contact Information:
  * Contact Information:
  * linux-drivers@avagotech.com
  * linux-drivers@avagotech.com
  *
  *
- * Avago Technologies
+ * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  * Costa Mesa, CA 92626
  */
  */

+ 2 - 2
drivers/scsi/be2iscsi/be_mgmt.h

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2015 Emulex
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
  * Contact Information:
  * Contact Information:
  * linux-drivers@avagotech.com
  * linux-drivers@avagotech.com
  *
  *
- * Avago Technologies
+ * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  * Costa Mesa, CA 92626
  */
  */

+ 0 - 1
drivers/scsi/bnx2fc/bnx2fc_fcoe.c

@@ -856,7 +856,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
 		return;
 		return;
 
 
 	default:
 	default:
-		printk(KERN_ERR PFX "Unknown netevent %ld", event);
 		return;
 		return;
 	}
 	}
 
 

+ 13 - 17
drivers/scsi/cxlflash/common.h

@@ -16,10 +16,12 @@
 #define _CXLFLASH_COMMON_H
 #define _CXLFLASH_COMMON_H
 
 
 #include <linux/list.h>
 #include <linux/list.h>
+#include <linux/rwsem.h>
 #include <linux/types.h>
 #include <linux/types.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_device.h>
 
 
+extern const struct file_operations cxlflash_cxl_fops;
 
 
 #define MAX_CONTEXT  CXLFLASH_MAX_CONTEXT       /* num contexts per afu */
 #define MAX_CONTEXT  CXLFLASH_MAX_CONTEXT       /* num contexts per afu */
 
 
@@ -78,7 +80,7 @@ enum cxlflash_init_state {
 
 
 enum cxlflash_state {
 enum cxlflash_state {
 	STATE_NORMAL,	/* Normal running state, everything good */
 	STATE_NORMAL,	/* Normal running state, everything good */
-	STATE_LIMBO,	/* Limbo running state, trying to reset/recover */
+	STATE_RESET,	/* Reset state, trying to reset/recover */
 	STATE_FAILTERM	/* Failed/terminating state, error out users/threads */
 	STATE_FAILTERM	/* Failed/terminating state, error out users/threads */
 };
 };
 
 
@@ -101,29 +103,28 @@ struct cxlflash_cfg {
 	enum cxlflash_init_state init_state;
 	enum cxlflash_init_state init_state;
 	enum cxlflash_lr_state lr_state;
 	enum cxlflash_lr_state lr_state;
 	int lr_port;
 	int lr_port;
+	atomic_t scan_host_needed;
 
 
 	struct cxl_afu *cxl_afu;
 	struct cxl_afu *cxl_afu;
-
-	struct pci_pool *cxlflash_cmd_pool;
 	struct pci_dev *parent_dev;
 	struct pci_dev *parent_dev;
 
 
 	atomic_t recovery_threads;
 	atomic_t recovery_threads;
 	struct mutex ctx_recovery_mutex;
 	struct mutex ctx_recovery_mutex;
 	struct mutex ctx_tbl_list_mutex;
 	struct mutex ctx_tbl_list_mutex;
+	struct rw_semaphore ioctl_rwsem;
 	struct ctx_info *ctx_tbl[MAX_CONTEXT];
 	struct ctx_info *ctx_tbl[MAX_CONTEXT];
 	struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
 	struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
 	struct file_operations cxl_fops;
 	struct file_operations cxl_fops;
 
 
-	atomic_t num_user_contexts;
-
 	/* Parameters that are LUN table related */
 	/* Parameters that are LUN table related */
 	int last_lun_index[CXLFLASH_NUM_FC_PORTS];
 	int last_lun_index[CXLFLASH_NUM_FC_PORTS];
 	int promote_lun_index;
 	int promote_lun_index;
 	struct list_head lluns; /* list of llun_info structs */
 	struct list_head lluns; /* list of llun_info structs */
 
 
 	wait_queue_head_t tmf_waitq;
 	wait_queue_head_t tmf_waitq;
+	spinlock_t tmf_slock;
 	bool tmf_active;
 	bool tmf_active;
-	wait_queue_head_t limbo_waitq;
+	wait_queue_head_t reset_waitq;
 	enum cxlflash_state state;
 	enum cxlflash_state state;
 };
 };
 
 
@@ -160,9 +161,9 @@ struct afu {
 
 
 	/* AFU HW */
 	/* AFU HW */
 	struct cxl_ioctl_start_work work;
 	struct cxl_ioctl_start_work work;
-	struct cxlflash_afu_map *afu_map;	/* entire MMIO map */
-	struct sisl_host_map *host_map;		/* MC host map */
-	struct sisl_ctrl_map *ctrl_map;		/* MC control map */
+	struct cxlflash_afu_map __iomem *afu_map;	/* entire MMIO map */
+	struct sisl_host_map __iomem *host_map;		/* MC host map */
+	struct sisl_ctrl_map __iomem *ctrl_map;		/* MC control map */
 
 
 	ctx_hndl_t ctx_hndl;	/* master's context handle */
 	ctx_hndl_t ctx_hndl;	/* master's context handle */
 	u64 *hrrq_start;
 	u64 *hrrq_start;
@@ -175,7 +176,7 @@ struct afu {
 	u32 cmd_couts;		/* Number of command checkouts */
 	u32 cmd_couts;		/* Number of command checkouts */
 	u32 internal_lun;	/* User-desired LUN mode for this AFU */
 	u32 internal_lun;	/* User-desired LUN mode for this AFU */
 
 
-	char version[8];
+	char version[16];
 	u64 interface_version;
 	u64 interface_version;
 
 
 	struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
 	struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
@@ -184,17 +185,12 @@ struct afu {
 
 
 static inline u64 lun_to_lunid(u64 lun)
 static inline u64 lun_to_lunid(u64 lun)
 {
 {
-	u64 lun_id;
+	__be64 lun_id;
 
 
 	int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
 	int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
-	return swab64(lun_id);
+	return be64_to_cpu(lun_id);
 }
 }
 
 
-int cxlflash_send_cmd(struct afu *, struct afu_cmd *);
-void cxlflash_wait_resp(struct afu *, struct afu_cmd *);
-int cxlflash_afu_reset(struct cxlflash_cfg *);
-struct afu_cmd *cxlflash_cmd_checkout(struct afu *);
-void cxlflash_cmd_checkin(struct afu_cmd *);
 int cxlflash_afu_sync(struct afu *, ctx_hndl_t, res_hndl_t, u8);
 int cxlflash_afu_sync(struct afu *, ctx_hndl_t, res_hndl_t, u8);
 void cxlflash_list_init(void);
 void cxlflash_list_init(void);
 void cxlflash_term_global_luns(void);
 void cxlflash_term_global_luns(void);

+ 24 - 21
drivers/scsi/cxlflash/lunmgt.c

@@ -41,7 +41,6 @@ static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
 	}
 	}
 
 
 	lli->sdev = sdev;
 	lli->sdev = sdev;
-	lli->newly_created = true;
 	lli->host_no = sdev->host->host_no;
 	lli->host_no = sdev->host->host_no;
 	lli->in_table = false;
 	lli->in_table = false;
 
 
@@ -74,24 +73,19 @@ out:
 }
 }
 
 
 /**
 /**
- * refresh_local() - find and update local LUN information structure by WWID
+ * lookup_local() - find a local LUN information structure by WWID
  * @cfg:	Internal structure associated with the host.
  * @cfg:	Internal structure associated with the host.
  * @wwid:	WWID associated with LUN.
  * @wwid:	WWID associated with LUN.
  *
  *
- * When the LUN is found, mark it by updating it's newly_created field.
- *
  * Return: Found local lun_info structure on success, NULL on failure
  * Return: Found local lun_info structure on success, NULL on failure
- * If a LUN with the WWID is found in the list, refresh it's state.
  */
  */
-static struct llun_info *refresh_local(struct cxlflash_cfg *cfg, u8 *wwid)
+static struct llun_info *lookup_local(struct cxlflash_cfg *cfg, u8 *wwid)
 {
 {
 	struct llun_info *lli, *temp;
 	struct llun_info *lli, *temp;
 
 
 	list_for_each_entry_safe(lli, temp, &cfg->lluns, list)
 	list_for_each_entry_safe(lli, temp, &cfg->lluns, list)
-		if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN)) {
-			lli->newly_created = false;
+		if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
 			return lli;
 			return lli;
-		}
 
 
 	return NULL;
 	return NULL;
 }
 }
@@ -120,7 +114,8 @@ static struct glun_info *lookup_global(u8 *wwid)
  *
  *
  * The LUN is kept both in a local list (per adapter) and in a global list
  * The LUN is kept both in a local list (per adapter) and in a global list
  * (across all adapters). Certain attributes of the LUN are local to the
  * (across all adapters). Certain attributes of the LUN are local to the
- * adapter (such as index, port selection mask etc.).
+ * adapter (such as index, port selection mask, etc.).
+ *
  * The block allocation map is shared across all adapters (i.e. associated
  * The block allocation map is shared across all adapters (i.e. associated
  * wih the global list). Since different attributes are associated with
  * wih the global list). Since different attributes are associated with
  * the per adapter and global entries, allocate two separate structures for each
  * the per adapter and global entries, allocate two separate structures for each
@@ -128,6 +123,8 @@ static struct glun_info *lookup_global(u8 *wwid)
  *
  *
  * Keep a pointer back from the local to the global entry.
  * Keep a pointer back from the local to the global entry.
  *
  *
+ * This routine assumes the caller holds the global mutex.
+ *
  * Return: Found/Allocated local lun_info structure on success, NULL on failure
  * Return: Found/Allocated local lun_info structure on success, NULL on failure
  */
  */
 static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
 static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
@@ -137,11 +134,10 @@ static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
 	struct Scsi_Host *shost = sdev->host;
 	struct Scsi_Host *shost = sdev->host;
 	struct cxlflash_cfg *cfg = shost_priv(shost);
 	struct cxlflash_cfg *cfg = shost_priv(shost);
 
 
-	mutex_lock(&global.mutex);
 	if (unlikely(!wwid))
 	if (unlikely(!wwid))
 		goto out;
 		goto out;
 
 
-	lli = refresh_local(cfg, wwid);
+	lli = lookup_local(cfg, wwid);
 	if (lli)
 	if (lli)
 		goto out;
 		goto out;
 
 
@@ -169,7 +165,6 @@ static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
 	list_add(&gli->list, &global.gluns);
 	list_add(&gli->list, &global.gluns);
 
 
 out:
 out:
-	mutex_unlock(&global.mutex);
 	pr_debug("%s: returning %p\n", __func__, lli);
 	pr_debug("%s: returning %p\n", __func__, lli);
 	return lli;
 	return lli;
 }
 }
@@ -235,10 +230,11 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
 	u64 flags = manage->hdr.flags;
 	u64 flags = manage->hdr.flags;
 	u32 chan = sdev->channel;
 	u32 chan = sdev->channel;
 
 
+	mutex_lock(&global.mutex);
 	lli = find_and_create_lun(sdev, manage->wwid);
 	lli = find_and_create_lun(sdev, manage->wwid);
 	pr_debug("%s: ENTER: WWID = %016llX%016llX, flags = %016llX li = %p\n",
 	pr_debug("%s: ENTER: WWID = %016llX%016llX, flags = %016llX li = %p\n",
-		 __func__, get_unaligned_le64(&manage->wwid[0]),
-		 get_unaligned_le64(&manage->wwid[8]),
+		 __func__, get_unaligned_be64(&manage->wwid[0]),
+		 get_unaligned_be64(&manage->wwid[8]),
 		 manage->hdr.flags, lli);
 		 manage->hdr.flags, lli);
 	if (unlikely(!lli)) {
 	if (unlikely(!lli)) {
 		rc = -ENOMEM;
 		rc = -ENOMEM;
@@ -246,21 +242,28 @@ int cxlflash_manage_lun(struct scsi_device *sdev,
 	}
 	}
 
 
 	if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) {
 	if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) {
-		if (lli->newly_created)
-			lli->port_sel = CHAN2PORT(chan);
-		else
-			lli->port_sel = BOTH_PORTS;
-		/* Store off lun in unpacked, AFU-friendly format */
+		/*
+		 * Update port selection mask based upon channel, store off LUN
+		 * in unpacked, AFU-friendly format, and hang LUN reference in
+		 * the sdev.
+		 */
+		lli->port_sel |= CHAN2PORT(chan);
 		lli->lun_id[chan] = lun_to_lunid(sdev->lun);
 		lli->lun_id[chan] = lun_to_lunid(sdev->lun);
 		sdev->hostdata = lli;
 		sdev->hostdata = lli;
 	} else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) {
 	} else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) {
 		if (lli->parent->mode != MODE_NONE)
 		if (lli->parent->mode != MODE_NONE)
 			rc = -EBUSY;
 			rc = -EBUSY;
-		else
+		else {
 			sdev->hostdata = NULL;
 			sdev->hostdata = NULL;
+			lli->port_sel &= ~CHAN2PORT(chan);
+		}
 	}
 	}
 
 
+	pr_debug("%s: port_sel = %08X chan = %u lun_id = %016llX\n", __func__,
+		 lli->port_sel, chan, lli->lun_id[chan]);
+
 out:
 out:
+	mutex_unlock(&global.mutex);
 	pr_debug("%s: returning rc=%d\n", __func__, rc);
 	pr_debug("%s: returning rc=%d\n", __func__, rc);
 	return rc;
 	return rc;
 }
 }

+ 832 - 718
drivers/scsi/cxlflash/main.c

@@ -34,9 +34,8 @@ MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
 MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 
 
-
 /**
 /**
- * cxlflash_cmd_checkout() - checks out an AFU command
+ * cmd_checkout() - checks out an AFU command
  * @afu:	AFU to checkout from.
  * @afu:	AFU to checkout from.
  *
  *
  * Commands are checked out in a round-robin fashion. Note that since
  * Commands are checked out in a round-robin fashion. Note that since
@@ -47,7 +46,7 @@ MODULE_LICENSE("GPL");
  *
  *
  * Return: The checked out command or NULL when command pool is empty.
  * Return: The checked out command or NULL when command pool is empty.
  */
  */
-struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu)
+static struct afu_cmd *cmd_checkout(struct afu *afu)
 {
 {
 	int k, dec = CXLFLASH_NUM_CMDS;
 	int k, dec = CXLFLASH_NUM_CMDS;
 	struct afu_cmd *cmd;
 	struct afu_cmd *cmd;
@@ -58,8 +57,8 @@ struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu)
 		cmd = &afu->cmd[k];
 		cmd = &afu->cmd[k];
 
 
 		if (!atomic_dec_if_positive(&cmd->free)) {
 		if (!atomic_dec_if_positive(&cmd->free)) {
-			pr_debug("%s: returning found index=%d\n",
-				 __func__, cmd->slot);
+			pr_devel("%s: returning found index=%d cmd=%p\n",
+				 __func__, cmd->slot, cmd);
 			memset(cmd->buf, 0, CMD_BUFSIZE);
 			memset(cmd->buf, 0, CMD_BUFSIZE);
 			memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
 			memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
 			return cmd;
 			return cmd;
@@ -70,7 +69,7 @@ struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu)
 }
 }
 
 
 /**
 /**
- * cxlflash_cmd_checkin() - checks in an AFU command
+ * cmd_checkin() - checks in an AFU command
  * @cmd:	AFU command to checkin.
  * @cmd:	AFU command to checkin.
  *
  *
  * Safe to pass commands that have already been checked in. Several
  * Safe to pass commands that have already been checked in. Several
@@ -79,7 +78,7 @@ struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu)
  * to avoid clobbering values in the event that the command is checked
  * to avoid clobbering values in the event that the command is checked
  * out right away.
  * out right away.
  */
  */
-void cxlflash_cmd_checkin(struct afu_cmd *cmd)
+static void cmd_checkin(struct afu_cmd *cmd)
 {
 {
 	cmd->rcb.scp = NULL;
 	cmd->rcb.scp = NULL;
 	cmd->rcb.timeout = 0;
 	cmd->rcb.timeout = 0;
@@ -93,7 +92,7 @@ void cxlflash_cmd_checkin(struct afu_cmd *cmd)
 		return;
 		return;
 	}
 	}
 
 
-	pr_debug("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
+	pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
 }
 }
 
 
 /**
 /**
@@ -107,6 +106,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
 {
 {
 	struct sisl_ioarcb *ioarcb;
 	struct sisl_ioarcb *ioarcb;
 	struct sisl_ioasa *ioasa;
 	struct sisl_ioasa *ioasa;
+	u32 resid;
 
 
 	if (unlikely(!cmd))
 	if (unlikely(!cmd))
 		return;
 		return;
@@ -115,9 +115,10 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
 	ioasa = &(cmd->sa);
 	ioasa = &(cmd->sa);
 
 
 	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
 	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
-		pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
-			 __func__, cmd, scp);
-		scp->result = (DID_ERROR << 16);
+		resid = ioasa->resid;
+		scsi_set_resid(scp, resid);
+		pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
+			 __func__, cmd, scp, resid);
 	}
 	}
 
 
 	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
 	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
@@ -127,7 +128,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
 	}
 	}
 
 
 	pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
 	pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
-		 "afu_extra=0x%X, scsi_entra=0x%X, fc_extra=0x%X\n",
+		 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
 		 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
 		 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
 		 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
 		 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
 		 ioasa->fc_extra);
 		 ioasa->fc_extra);
@@ -158,8 +159,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
 				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
 				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
 				 * then we will handle this error else where.
 				 * then we will handle this error else where.
 				 * If not then we must handle it here.
 				 * If not then we must handle it here.
-				 * This is probably an AFU bug. We will
-				 * attempt a retry to see if that resolves it.
+				 * This is probably an AFU bug.
 				 */
 				 */
 				scp->result = (DID_ERROR << 16);
 				scp->result = (DID_ERROR << 16);
 			}
 			}
@@ -183,7 +183,7 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
 		/* We have an AFU error */
 		/* We have an AFU error */
 		switch (ioasa->rc.afu_rc) {
 		switch (ioasa->rc.afu_rc) {
 		case SISL_AFU_RC_NO_CHANNELS:
 		case SISL_AFU_RC_NO_CHANNELS:
-			scp->result = (DID_MEDIUM_ERROR << 16);
+			scp->result = (DID_NO_CONNECT << 16);
 			break;
 			break;
 		case SISL_AFU_RC_DATA_DMA_ERR:
 		case SISL_AFU_RC_DATA_DMA_ERR:
 			switch (ioasa->afu_extra) {
 			switch (ioasa->afu_extra) {
@@ -217,7 +217,6 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
 static void cmd_complete(struct afu_cmd *cmd)
 static void cmd_complete(struct afu_cmd *cmd)
 {
 {
 	struct scsi_cmnd *scp;
 	struct scsi_cmnd *scp;
-	u32 resid;
 	ulong lock_flags;
 	ulong lock_flags;
 	struct afu *afu = cmd->parent;
 	struct afu *afu = cmd->parent;
 	struct cxlflash_cfg *cfg = afu->parent;
 	struct cxlflash_cfg *cfg = afu->parent;
@@ -229,36 +228,171 @@ static void cmd_complete(struct afu_cmd *cmd)
 
 
 	if (cmd->rcb.scp) {
 	if (cmd->rcb.scp) {
 		scp = cmd->rcb.scp;
 		scp = cmd->rcb.scp;
-		if (unlikely(cmd->sa.rc.afu_rc ||
-			     cmd->sa.rc.scsi_rc ||
-			     cmd->sa.rc.fc_rc))
+		if (unlikely(cmd->sa.ioasc))
 			process_cmd_err(cmd, scp);
 			process_cmd_err(cmd, scp);
 		else
 		else
 			scp->result = (DID_OK << 16);
 			scp->result = (DID_OK << 16);
 
 
-		resid = cmd->sa.resid;
 		cmd_is_tmf = cmd->cmd_tmf;
 		cmd_is_tmf = cmd->cmd_tmf;
-		cxlflash_cmd_checkin(cmd); /* Don't use cmd after here */
+		cmd_checkin(cmd); /* Don't use cmd after here */
 
 
-		pr_debug("%s: calling scsi_set_resid, scp=%p "
-			 "result=%X resid=%d\n", __func__,
-			 scp, scp->result, resid);
+		pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
+				     "ioasc=%d\n", __func__, scp, scp->result,
+				     cmd->sa.ioasc);
 
 
-		scsi_set_resid(scp, resid);
 		scsi_dma_unmap(scp);
 		scsi_dma_unmap(scp);
 		scp->scsi_done(scp);
 		scp->scsi_done(scp);
 
 
 		if (cmd_is_tmf) {
 		if (cmd_is_tmf) {
-			spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+			spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 			cfg->tmf_active = false;
 			cfg->tmf_active = false;
 			wake_up_all_locked(&cfg->tmf_waitq);
 			wake_up_all_locked(&cfg->tmf_waitq);
-			spin_unlock_irqrestore(&cfg->tmf_waitq.lock,
-					       lock_flags);
+			spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 		}
 		}
 	} else
 	} else
 		complete(&cmd->cevent);
 		complete(&cmd->cevent);
 }
 }
 
 
+/**
+ * context_reset() - timeout handler for AFU commands
+ * @cmd:	AFU command that timed out.
+ *
+ * Sends a reset to the AFU.
+ */
+static void context_reset(struct afu_cmd *cmd)
+{
+	int nretry = 0;
+	u64 rrin = 0x1;
+	u64 room = 0;
+	struct afu *afu = cmd->parent;
+	ulong lock_flags;
+
+	pr_debug("%s: cmd=%p\n", __func__, cmd);
+
+	spin_lock_irqsave(&cmd->slock, lock_flags);
+
+	/* Already completed? */
+	if (cmd->sa.host_use_b[0] & B_DONE) {
+		spin_unlock_irqrestore(&cmd->slock, lock_flags);
+		return;
+	}
+
+	cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
+	spin_unlock_irqrestore(&cmd->slock, lock_flags);
+
+	/*
+	 * We really want to send this reset at all costs, so spread
+	 * out wait time on successive retries for available room.
+	 */
+	do {
+		room = readq_be(&afu->host_map->cmd_room);
+		atomic64_set(&afu->room, room);
+		if (room)
+			goto write_rrin;
+		udelay(nretry);
+	} while (nretry++ < MC_ROOM_RETRY_CNT);
+
+	pr_err("%s: no cmd_room to send reset\n", __func__);
+	return;
+
+write_rrin:
+	nretry = 0;
+	writeq_be(rrin, &afu->host_map->ioarrin);
+	do {
+		rrin = readq_be(&afu->host_map->ioarrin);
+		if (rrin != 0x1)
+			break;
+		/* Double delay each time */
+		udelay(2 << nretry);
+	} while (nretry++ < MC_ROOM_RETRY_CNT);
+}
+
+/**
+ * send_cmd() - sends an AFU command
+ * @afu:	AFU associated with the host.
+ * @cmd:	AFU command to send.
+ *
+ * Return:
+ *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
+ */
+static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
+{
+	struct cxlflash_cfg *cfg = afu->parent;
+	struct device *dev = &cfg->dev->dev;
+	int nretry = 0;
+	int rc = 0;
+	u64 room;
+	long newval;
+
+	/*
+	 * This routine is used by critical users such an AFU sync and to
+	 * send a task management function (TMF). Thus we want to retry a
+	 * bit before returning an error. To avoid the performance penalty
+	 * of MMIO, we spread the update of 'room' over multiple commands.
+	 */
+retry:
+	newval = atomic64_dec_if_positive(&afu->room);
+	if (!newval) {
+		do {
+			room = readq_be(&afu->host_map->cmd_room);
+			atomic64_set(&afu->room, room);
+			if (room)
+				goto write_ioarrin;
+			udelay(nretry);
+		} while (nretry++ < MC_ROOM_RETRY_CNT);
+
+		dev_err(dev, "%s: no cmd_room to send 0x%X\n",
+		       __func__, cmd->rcb.cdb[0]);
+
+		goto no_room;
+	} else if (unlikely(newval < 0)) {
+		/* This should be rare. i.e. Only if two threads race and
+		 * decrement before the MMIO read is done. In this case
+		 * just benefit from the other thread having updated
+		 * afu->room.
+		 */
+		if (nretry++ < MC_ROOM_RETRY_CNT) {
+			udelay(nretry);
+			goto retry;
+		}
+
+		goto no_room;
+	}
+
+write_ioarrin:
+	writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
+out:
+	pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
+		 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
+	return rc;
+
+no_room:
+	afu->read_room = true;
+	schedule_work(&cfg->work_q);
+	rc = SCSI_MLQUEUE_HOST_BUSY;
+	goto out;
+}
+
+/**
+ * wait_resp() - polls for a response or timeout to a sent AFU command
+ * @afu:	AFU associated with the host.
+ * @cmd:	AFU command that was sent.
+ */
+static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
+{
+	ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
+
+	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
+	if (!timeout)
+		context_reset(cmd);
+
+	if (unlikely(cmd->sa.ioasc != 0))
+		pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
+		       "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
+		       cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
+		       cmd->sa.rc.fc_rc);
+}
+
 /**
 /**
  * send_tmf() - sends a Task Management Function (TMF)
  * send_tmf() - sends a Task Management Function (TMF)
  * @afu:	AFU to checkout from.
  * @afu:	AFU to checkout from.
@@ -266,8 +400,7 @@ static void cmd_complete(struct afu_cmd *cmd)
  * @tmfcmd:	TMF command to send.
  * @tmfcmd:	TMF command to send.
  *
  *
  * Return:
  * Return:
- *	0 on success
- *	SCSI_MLQUEUE_HOST_BUSY when host is busy
+ *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
  */
  */
 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
 static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
 {
 {
@@ -277,25 +410,27 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
 	short lflag = 0;
 	short lflag = 0;
 	struct Scsi_Host *host = scp->device->host;
 	struct Scsi_Host *host = scp->device->host;
 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+	struct device *dev = &cfg->dev->dev;
 	ulong lock_flags;
 	ulong lock_flags;
 	int rc = 0;
 	int rc = 0;
+	ulong to;
 
 
-	cmd = cxlflash_cmd_checkout(afu);
+	cmd = cmd_checkout(afu);
 	if (unlikely(!cmd)) {
 	if (unlikely(!cmd)) {
-		pr_err("%s: could not get a free command\n", __func__);
+		dev_err(dev, "%s: could not get a free command\n", __func__);
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		goto out;
 		goto out;
 	}
 	}
 
 
-	/* If a Task Management Function is active, do not send one more.
-	 */
-	spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+	/* When Task Management Function is active do not send another */
+	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 	if (cfg->tmf_active)
 	if (cfg->tmf_active)
-		wait_event_interruptible_locked_irq(cfg->tmf_waitq,
-						    !cfg->tmf_active);
+		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
+						  !cfg->tmf_active,
+						  cfg->tmf_slock);
 	cfg->tmf_active = true;
 	cfg->tmf_active = true;
 	cmd->cmd_tmf = true;
 	cmd->cmd_tmf = true;
-	spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 
 
 	cmd->rcb.ctx_id = afu->ctx_hndl;
 	cmd->rcb.ctx_id = afu->ctx_hndl;
 	cmd->rcb.port_sel = port_sel;
 	cmd->rcb.port_sel = port_sel;
@@ -313,18 +448,27 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
 	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
 	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
 
 
 	/* Send the command */
 	/* Send the command */
-	rc = cxlflash_send_cmd(afu, cmd);
+	rc = send_cmd(afu, cmd);
 	if (unlikely(rc)) {
 	if (unlikely(rc)) {
-		cxlflash_cmd_checkin(cmd);
-		spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+		cmd_checkin(cmd);
+		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 		cfg->tmf_active = false;
 		cfg->tmf_active = false;
-		spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 		goto out;
 		goto out;
 	}
 	}
 
 
-	spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
-	wait_event_interruptible_locked_irq(cfg->tmf_waitq, !cfg->tmf_active);
-	spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
+	to = msecs_to_jiffies(5000);
+	to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
+						       !cfg->tmf_active,
+						       cfg->tmf_slock,
+						       to);
+	if (!to) {
+		cfg->tmf_active = false;
+		dev_err(dev, "%s: TMF timed out!\n", __func__);
+		rc = -1;
+	}
+	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 out:
 out:
 	return rc;
 	return rc;
 }
 }
@@ -345,15 +489,13 @@ static const char *cxlflash_driver_info(struct Scsi_Host *host)
  * @host:	SCSI host associated with device.
  * @host:	SCSI host associated with device.
  * @scp:	SCSI command to send.
  * @scp:	SCSI command to send.
  *
  *
- * Return:
- *	0 on success
- *	SCSI_MLQUEUE_HOST_BUSY when host is busy
+ * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
  */
  */
 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 {
 {
 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
 	struct afu *afu = cfg->afu;
 	struct afu *afu = cfg->afu;
-	struct pci_dev *pdev = cfg->dev;
+	struct device *dev = &cfg->dev->dev;
 	struct afu_cmd *cmd;
 	struct afu_cmd *cmd;
 	u32 port_sel = scp->device->channel + 1;
 	u32 port_sel = scp->device->channel + 1;
 	int nseg, i, ncount;
 	int nseg, i, ncount;
@@ -362,34 +504,34 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 	short lflag = 0;
 	short lflag = 0;
 	int rc = 0;
 	int rc = 0;
 
 
-	pr_debug("%s: (scp=%p) %d/%d/%d/%llu cdb=(%08X-%08X-%08X-%08X)\n",
-		 __func__, scp, host->host_no, scp->device->channel,
-		 scp->device->id, scp->device->lun,
-		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
-		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
-		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
-		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+	dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
+			    "cdb=(%08X-%08X-%08X-%08X)\n",
+			    __func__, scp, host->host_no, scp->device->channel,
+			    scp->device->id, scp->device->lun,
+			    get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+			    get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+			    get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+			    get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
 
 
-	/* If a Task Management Function is active, wait for it to complete
+	/*
+	 * If a Task Management Function is active, wait for it to complete
 	 * before continuing with regular commands.
 	 * before continuing with regular commands.
 	 */
 	 */
-	spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 	if (cfg->tmf_active) {
 	if (cfg->tmf_active) {
-		spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		goto out;
 		goto out;
 	}
 	}
-	spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 
 
 	switch (cfg->state) {
 	switch (cfg->state) {
-	case STATE_LIMBO:
-		dev_dbg_ratelimited(&cfg->dev->dev, "%s: device in limbo!\n",
-				    __func__);
+	case STATE_RESET:
+		dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		goto out;
 		goto out;
 	case STATE_FAILTERM:
 	case STATE_FAILTERM:
-		dev_dbg_ratelimited(&cfg->dev->dev, "%s: device has failed!\n",
-				    __func__);
+		dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
 		scp->result = (DID_NO_CONNECT << 16);
 		scp->result = (DID_NO_CONNECT << 16);
 		scp->scsi_done(scp);
 		scp->scsi_done(scp);
 		rc = 0;
 		rc = 0;
@@ -398,9 +540,9 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 		break;
 		break;
 	}
 	}
 
 
-	cmd = cxlflash_cmd_checkout(afu);
+	cmd = cmd_checkout(afu);
 	if (unlikely(!cmd)) {
 	if (unlikely(!cmd)) {
-		pr_err("%s: could not get a free command\n", __func__);
+		dev_err(dev, "%s: could not get a free command\n", __func__);
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		goto out;
 		goto out;
 	}
 	}
@@ -422,7 +564,7 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 
 
 	nseg = scsi_dma_map(scp);
 	nseg = scsi_dma_map(scp);
 	if (unlikely(nseg < 0)) {
 	if (unlikely(nseg < 0)) {
-		dev_err(&pdev->dev, "%s: Fail DMA map! nseg=%d\n",
+		dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
 			__func__, nseg);
 			__func__, nseg);
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		rc = SCSI_MLQUEUE_HOST_BUSY;
 		goto out;
 		goto out;
@@ -438,369 +580,56 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
 	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
 	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
 
 
 	/* Send the command */
 	/* Send the command */
-	rc = cxlflash_send_cmd(afu, cmd);
+	rc = send_cmd(afu, cmd);
 	if (unlikely(rc)) {
 	if (unlikely(rc)) {
-		cxlflash_cmd_checkin(cmd);
+		cmd_checkin(cmd);
 		scsi_dma_unmap(scp);
 		scsi_dma_unmap(scp);
 	}
 	}
 
 
 out:
 out:
+	pr_devel("%s: returning rc=%d\n", __func__, rc);
 	return rc;
 	return rc;
 }
 }
 
 
 /**
 /**
- * cxlflash_eh_device_reset_handler() - reset a single LUN
- * @scp:	SCSI command to send.
- *
- * Return:
- *	SUCCESS as defined in scsi/scsi.h
- *	FAILED as defined in scsi/scsi.h
+ * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
+ * @cfg:	Internal structure associated with the host.
  */
  */
-static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
+static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
 {
 {
-	int rc = SUCCESS;
-	struct Scsi_Host *host = scp->device->host;
-	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
-	struct afu *afu = cfg->afu;
-	int rcr = 0;
-
-	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
-		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
-		 host->host_no, scp->device->channel,
-		 scp->device->id, scp->device->lun,
-		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
-		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
-		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
-		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
-
-	switch (cfg->state) {
-	case STATE_NORMAL:
-		rcr = send_tmf(afu, scp, TMF_LUN_RESET);
-		if (unlikely(rcr))
-			rc = FAILED;
-		break;
-	case STATE_LIMBO:
-		wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
-		if (cfg->state == STATE_NORMAL)
-			break;
-		/* fall through */
-	default:
-		rc = FAILED;
-		break;
-	}
+	struct pci_dev *pdev = cfg->dev;
 
 
-	pr_debug("%s: returning rc=%d\n", __func__, rc);
-	return rc;
+	if (pci_channel_offline(pdev))
+		wait_event_timeout(cfg->reset_waitq,
+				   !pci_channel_offline(pdev),
+				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
 }
 }
 
 
 /**
 /**
- * cxlflash_eh_host_reset_handler() - reset the host adapter
- * @scp:	SCSI command from stack identifying host.
- *
- * Return:
- *	SUCCESS as defined in scsi/scsi.h
- *	FAILED as defined in scsi/scsi.h
+ * free_mem() - free memory associated with the AFU
+ * @cfg:	Internal structure associated with the host.
  */
  */
-static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
+static void free_mem(struct cxlflash_cfg *cfg)
 {
 {
-	int rc = SUCCESS;
-	int rcr = 0;
-	struct Scsi_Host *host = scp->device->host;
-	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+	int i;
+	char *buf = NULL;
+	struct afu *afu = cfg->afu;
 
 
-	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
-		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
-		 host->host_no, scp->device->channel,
-		 scp->device->id, scp->device->lun,
-		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
-		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
-		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
-		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+	if (cfg->afu) {
+		for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
+			buf = afu->cmd[i].buf;
+			if (!((u64)buf & (PAGE_SIZE - 1)))
+				free_page((ulong)buf);
+		}
 
 
-	switch (cfg->state) {
-	case STATE_NORMAL:
-		cfg->state = STATE_LIMBO;
-		scsi_block_requests(cfg->host);
-		cxlflash_mark_contexts_error(cfg);
-		rcr = cxlflash_afu_reset(cfg);
-		if (rcr) {
-			rc = FAILED;
-			cfg->state = STATE_FAILTERM;
-		} else
-			cfg->state = STATE_NORMAL;
-		wake_up_all(&cfg->limbo_waitq);
-		scsi_unblock_requests(cfg->host);
-		break;
-	case STATE_LIMBO:
-		wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
-		if (cfg->state == STATE_NORMAL)
-			break;
-		/* fall through */
-	default:
-		rc = FAILED;
-		break;
+		free_pages((ulong)afu, get_order(sizeof(struct afu)));
+		cfg->afu = NULL;
 	}
 	}
-
-	pr_debug("%s: returning rc=%d\n", __func__, rc);
-	return rc;
 }
 }
 
 
 /**
 /**
- * cxlflash_change_queue_depth() - change the queue depth for the device
- * @sdev:	SCSI device destined for queue depth change.
- * @qdepth:	Requested queue depth value to set.
- *
- * The requested queue depth is capped to the maximum supported value.
- *
- * Return: The actual queue depth set.
- */
-static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
-{
-
-	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
-		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
-
-	scsi_change_queue_depth(sdev, qdepth);
-	return sdev->queue_depth;
-}
-
-/**
- * cxlflash_show_port_status() - queries and presents the current port status
- * @dev:	Generic device associated with the host owning the port.
- * @attr:	Device attribute representing the port.
- * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t cxlflash_show_port_status(struct device *dev,
-					 struct device_attribute *attr,
-					 char *buf)
-{
-	struct Scsi_Host *shost = class_to_shost(dev);
-	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
-	struct afu *afu = cfg->afu;
-
-	char *disp_status;
-	int rc;
-	u32 port;
-	u64 status;
-	u64 *fc_regs;
-
-	rc = kstrtouint((attr->attr.name + 4), 10, &port);
-	if (rc || (port >= NUM_FC_PORTS))
-		return 0;
-
-	fc_regs = &afu->afu_map->global.fc_regs[port][0];
-	status =
-	    (readq_be(&fc_regs[FC_MTIP_STATUS / 8]) & FC_MTIP_STATUS_MASK);
-
-	if (status == FC_MTIP_STATUS_ONLINE)
-		disp_status = "online";
-	else if (status == FC_MTIP_STATUS_OFFLINE)
-		disp_status = "offline";
-	else
-		disp_status = "unknown";
-
-	return snprintf(buf, PAGE_SIZE, "%s\n", disp_status);
-}
-
-/**
- * cxlflash_show_lun_mode() - presents the current LUN mode of the host
- * @dev:	Generic device associated with the host.
- * @attr:	Device attribute representing the lun mode.
- * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t cxlflash_show_lun_mode(struct device *dev,
-				      struct device_attribute *attr, char *buf)
-{
-	struct Scsi_Host *shost = class_to_shost(dev);
-	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
-	struct afu *afu = cfg->afu;
-
-	return snprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
-}
-
-/**
- * cxlflash_store_lun_mode() - sets the LUN mode of the host
- * @dev:	Generic device associated with the host.
- * @attr:	Device attribute representing the lun mode.
- * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
- * @count:	Length of data resizing in @buf.
- *
- * The CXL Flash AFU supports a dummy LUN mode where the external
- * links and storage are not required. Space on the FPGA is used
- * to create 1 or 2 small LUNs which are presented to the system
- * as if they were a normal storage device. This feature is useful
- * during development and also provides manufacturing with a way
- * to test the AFU without an actual device.
- *
- * 0 = external LUN[s] (default)
- * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
- * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
- * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
- * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t cxlflash_store_lun_mode(struct device *dev,
-				       struct device_attribute *attr,
-				       const char *buf, size_t count)
-{
-	struct Scsi_Host *shost = class_to_shost(dev);
-	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
-	struct afu *afu = cfg->afu;
-	int rc;
-	u32 lun_mode;
-
-	rc = kstrtouint(buf, 10, &lun_mode);
-	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
-		afu->internal_lun = lun_mode;
-		cxlflash_afu_reset(cfg);
-		scsi_scan_host(cfg->host);
-	}
-
-	return count;
-}
-
-/**
- * cxlflash_show_ioctl_version() - presents the current ioctl version of the host
- * @dev:	Generic device associated with the host.
- * @attr:	Device attribute representing the ioctl version.
- * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t cxlflash_show_ioctl_version(struct device *dev,
-					   struct device_attribute *attr,
-					   char *buf)
-{
-	return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
-}
-
-/**
- * cxlflash_show_dev_mode() - presents the current mode of the device
- * @dev:	Generic device associated with the device.
- * @attr:	Device attribute representing the device mode.
- * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
- *
- * Return: The size of the ASCII string returned in @buf.
- */
-static ssize_t cxlflash_show_dev_mode(struct device *dev,
-				      struct device_attribute *attr, char *buf)
-{
-	struct scsi_device *sdev = to_scsi_device(dev);
-
-	return snprintf(buf, PAGE_SIZE, "%s\n",
-			sdev->hostdata ? "superpipe" : "legacy");
-}
-
-/**
- * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
- * @cxlflash:	Internal structure associated with the host.
- */
-static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
-{
-	struct pci_dev *pdev = cfg->dev;
-
-	if (pci_channel_offline(pdev))
-		wait_event_timeout(cfg->limbo_waitq,
-				   !pci_channel_offline(pdev),
-				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
-}
-
-/*
- * Host attributes
- */
-static DEVICE_ATTR(port0, S_IRUGO, cxlflash_show_port_status, NULL);
-static DEVICE_ATTR(port1, S_IRUGO, cxlflash_show_port_status, NULL);
-static DEVICE_ATTR(lun_mode, S_IRUGO | S_IWUSR, cxlflash_show_lun_mode,
-		   cxlflash_store_lun_mode);
-static DEVICE_ATTR(ioctl_version, S_IRUGO, cxlflash_show_ioctl_version, NULL);
-
-static struct device_attribute *cxlflash_host_attrs[] = {
-	&dev_attr_port0,
-	&dev_attr_port1,
-	&dev_attr_lun_mode,
-	&dev_attr_ioctl_version,
-	NULL
-};
-
-/*
- * Device attributes
- */
-static DEVICE_ATTR(mode, S_IRUGO, cxlflash_show_dev_mode, NULL);
-
-static struct device_attribute *cxlflash_dev_attrs[] = {
-	&dev_attr_mode,
-	NULL
-};
-
-/*
- * Host template
- */
-static struct scsi_host_template driver_template = {
-	.module = THIS_MODULE,
-	.name = CXLFLASH_ADAPTER_NAME,
-	.info = cxlflash_driver_info,
-	.ioctl = cxlflash_ioctl,
-	.proc_name = CXLFLASH_NAME,
-	.queuecommand = cxlflash_queuecommand,
-	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
-	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
-	.change_queue_depth = cxlflash_change_queue_depth,
-	.cmd_per_lun = 16,
-	.can_queue = CXLFLASH_MAX_CMDS,
-	.this_id = -1,
-	.sg_tablesize = SG_NONE,	/* No scatter gather support. */
-	.max_sectors = CXLFLASH_MAX_SECTORS,
-	.use_clustering = ENABLE_CLUSTERING,
-	.shost_attrs = cxlflash_host_attrs,
-	.sdev_attrs = cxlflash_dev_attrs,
-};
-
-/*
- * Device dependent values
- */
-static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
-
-/*
- * PCI device binding table
- */
-static struct pci_device_id cxlflash_pci_table[] = {
-	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
-	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
-	{}
-};
-
-MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
-
-/**
- * free_mem() - free memory associated with the AFU
- * @cxlflash:	Internal structure associated with the host.
- */
-static void free_mem(struct cxlflash_cfg *cfg)
-{
-	int i;
-	char *buf = NULL;
-	struct afu *afu = cfg->afu;
-
-	if (cfg->afu) {
-		for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
-			buf = afu->cmd[i].buf;
-			if (!((u64)buf & (PAGE_SIZE - 1)))
-				free_page((ulong)buf);
-		}
-
-		free_pages((ulong)afu, get_order(sizeof(struct afu)));
-		cfg->afu = NULL;
-	}
-}
-
-/**
- * stop_afu() - stops the AFU command timers and unmaps the MMIO space
- * @cxlflash:	Internal structure associated with the host.
+ * stop_afu() - stops the AFU command timers and unmaps the MMIO space
+ * @cfg:	Internal structure associated with the host.
  *
  *
  * Safe to call with AFU in a partially allocated/initialized state.
  * Safe to call with AFU in a partially allocated/initialized state.
  */
  */
@@ -814,7 +643,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
 			complete(&afu->cmd[i].cevent);
 			complete(&afu->cmd[i].cevent);
 
 
 		if (likely(afu->afu_map)) {
 		if (likely(afu->afu_map)) {
-			cxl_psa_unmap((void *)afu->afu_map);
+			cxl_psa_unmap((void __iomem *)afu->afu_map);
 			afu->afu_map = NULL;
 			afu->afu_map = NULL;
 		}
 		}
 	}
 	}
@@ -822,7 +651,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
 
 
 /**
 /**
  * term_mc() - terminates the master context
  * term_mc() - terminates the master context
- * @cxlflash:	Internal structure associated with the host.
+ * @cfg:	Internal structure associated with the host.
  * @level:	Depth of allocation, where to begin waterfall tear down.
  * @level:	Depth of allocation, where to begin waterfall tear down.
  *
  *
  * Safe to call with AFU/MC in partially allocated/initialized state.
  * Safe to call with AFU/MC in partially allocated/initialized state.
@@ -831,9 +660,10 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
 {
 {
 	int rc = 0;
 	int rc = 0;
 	struct afu *afu = cfg->afu;
 	struct afu *afu = cfg->afu;
+	struct device *dev = &cfg->dev->dev;
 
 
 	if (!afu || !cfg->mcctx) {
 	if (!afu || !cfg->mcctx) {
-		pr_err("%s: returning from term_mc with NULL afu or MC\n",
+		dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n",
 		       __func__);
 		       __func__);
 		return;
 		return;
 	}
 	}
@@ -857,7 +687,7 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
 
 
 /**
 /**
  * term_afu() - terminates the AFU
  * term_afu() - terminates the AFU
- * @cxlflash:	Internal structure associated with the host.
+ * @cfg:	Internal structure associated with the host.
  *
  *
  * Safe to call with AFU/MC in partially allocated/initialized state.
  * Safe to call with AFU/MC in partially allocated/initialized state.
  */
  */
@@ -885,11 +715,12 @@ static void cxlflash_remove(struct pci_dev *pdev)
 	/* If a Task Management Function is active, wait for it to complete
 	/* If a Task Management Function is active, wait for it to complete
 	 * before continuing with remove.
 	 * before continuing with remove.
 	 */
 	 */
-	spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
+	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
 	if (cfg->tmf_active)
 	if (cfg->tmf_active)
-		wait_event_interruptible_locked_irq(cfg->tmf_waitq,
-						    !cfg->tmf_active);
-	spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
+		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
+						  !cfg->tmf_active,
+						  cfg->tmf_slock);
+	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
 
 
 	cfg->state = STATE_FAILTERM;
 	cfg->state = STATE_FAILTERM;
 	cxlflash_stop_term_user_contexts(cfg);
 	cxlflash_stop_term_user_contexts(cfg);
@@ -898,16 +729,16 @@ static void cxlflash_remove(struct pci_dev *pdev)
 	case INIT_STATE_SCSI:
 	case INIT_STATE_SCSI:
 		cxlflash_term_local_luns(cfg);
 		cxlflash_term_local_luns(cfg);
 		scsi_remove_host(cfg->host);
 		scsi_remove_host(cfg->host);
-		scsi_host_put(cfg->host);
-		/* Fall through */
+		/* fall through */
 	case INIT_STATE_AFU:
 	case INIT_STATE_AFU:
 		term_afu(cfg);
 		term_afu(cfg);
+		cancel_work_sync(&cfg->work_q);
 	case INIT_STATE_PCI:
 	case INIT_STATE_PCI:
 		pci_release_regions(cfg->dev);
 		pci_release_regions(cfg->dev);
 		pci_disable_device(pdev);
 		pci_disable_device(pdev);
 	case INIT_STATE_NONE:
 	case INIT_STATE_NONE:
-		flush_work(&cfg->work_q);
 		free_mem(cfg);
 		free_mem(cfg);
+		scsi_host_put(cfg->host);
 		break;
 		break;
 	}
 	}
 
 
@@ -916,7 +747,7 @@ static void cxlflash_remove(struct pci_dev *pdev)
 
 
 /**
 /**
  * alloc_mem() - allocates the AFU and its command pool
  * alloc_mem() - allocates the AFU and its command pool
- * @cxlflash:	Internal structure associated with the host.
+ * @cfg:	Internal structure associated with the host.
  *
  *
  * A partially allocated state remains on failure.
  * A partially allocated state remains on failure.
  *
  *
@@ -929,15 +760,14 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
 	int rc = 0;
 	int rc = 0;
 	int i;
 	int i;
 	char *buf = NULL;
 	char *buf = NULL;
+	struct device *dev = &cfg->dev->dev;
 
 
-	/* This allocation is about 12K, i.e. only 1 64k page
-	 * and upto 4 4k pages
-	 */
+	/* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
 	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
 					    get_order(sizeof(struct afu)));
 					    get_order(sizeof(struct afu)));
 	if (unlikely(!cfg->afu)) {
 	if (unlikely(!cfg->afu)) {
-		pr_err("%s: cannot get %d free pages\n",
-		       __func__, get_order(sizeof(struct afu)));
+		dev_err(dev, "%s: cannot get %d free pages\n",
+			__func__, get_order(sizeof(struct afu)));
 		rc = -ENOMEM;
 		rc = -ENOMEM;
 		goto out;
 		goto out;
 	}
 	}
@@ -948,7 +778,8 @@ static int alloc_mem(struct cxlflash_cfg *cfg)
 		if (!((u64)buf & (PAGE_SIZE - 1))) {
 		if (!((u64)buf & (PAGE_SIZE - 1))) {
 			buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 			buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 			if (unlikely(!buf)) {
 			if (unlikely(!buf)) {
-				pr_err("%s: Allocate command buffers fail!\n",
+				dev_err(dev,
+					"%s: Allocate command buffers fail!\n",
 				       __func__);
 				       __func__);
 				rc = -ENOMEM;
 				rc = -ENOMEM;
 				free_mem(cfg);
 				free_mem(cfg);
@@ -967,12 +798,9 @@ out:
 
 
 /**
 /**
  * init_pci() - initializes the host as a PCI device
  * init_pci() - initializes the host as a PCI device
- * @cxlflash:	Internal structure associated with the host.
+ * @cfg:	Internal structure associated with the host.
  *
  *
- * Return:
- *	0 on success
- *	-EIO on unable to communicate with device
- *	A return code from the PCI sub-routines
+ * Return: 0 on success, -errno on failure
  */
  */
 static int init_pci(struct cxlflash_cfg *cfg)
 static int init_pci(struct cxlflash_cfg *cfg)
 {
 {
@@ -1052,11 +880,9 @@ out_release_regions:
 
 
 /**
 /**
  * init_scsi() - adds the host to the SCSI stack and kicks off host scan
  * init_scsi() - adds the host to the SCSI stack and kicks off host scan
- * @cxlflash:	Internal structure associated with the host.
+ * @cfg:	Internal structure associated with the host.
  *
  *
- * Return:
- *	0 on success
- *	A return code from adding the host
+ * Return: 0 on success, -errno on failure
  */
  */
 static int init_scsi(struct cxlflash_cfg *cfg)
 static int init_scsi(struct cxlflash_cfg *cfg)
 {
 {
@@ -1085,7 +911,7 @@ out:
  * that the FC link layer has synced, completed the handshaking process, and
  * that the FC link layer has synced, completed the handshaking process, and
  * is ready for login to start.
  * is ready for login to start.
  */
  */
-static void set_port_online(u64 *fc_regs)
+static void set_port_online(__be64 __iomem *fc_regs)
 {
 {
 	u64 cmdcfg;
 	u64 cmdcfg;
 
 
@@ -1101,7 +927,7 @@ static void set_port_online(u64 *fc_regs)
  *
  *
  * The provided MMIO region must be mapped prior to call.
  * The provided MMIO region must be mapped prior to call.
  */
  */
-static void set_port_offline(u64 *fc_regs)
+static void set_port_offline(__be64 __iomem *fc_regs)
 {
 {
 	u64 cmdcfg;
 	u64 cmdcfg;
 
 
@@ -1125,7 +951,7 @@ static void set_port_offline(u64 *fc_regs)
  *	FALSE (0) when the specified port fails to come online after timeout
  *	FALSE (0) when the specified port fails to come online after timeout
  *	-EINVAL when @delay_us is less than 1000
  *	-EINVAL when @delay_us is less than 1000
  */
  */
-static int wait_port_online(u64 *fc_regs, u32 delay_us, u32 nretry)
+static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
 {
 {
 	u64 status;
 	u64 status;
 
 
@@ -1156,7 +982,7 @@ static int wait_port_online(u64 *fc_regs, u32 delay_us, u32 nretry)
  *	FALSE (0) when the specified port fails to go offline after timeout
  *	FALSE (0) when the specified port fails to go offline after timeout
  *	-EINVAL when @delay_us is less than 1000
  *	-EINVAL when @delay_us is less than 1000
  */
  */
-static int wait_port_offline(u64 *fc_regs, u32 delay_us, u32 nretry)
+static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
 {
 {
 	u64 status;
 	u64 status;
 
 
@@ -1191,9 +1017,10 @@ static int wait_port_offline(u64 *fc_regs, u32 delay_us, u32 nretry)
  *	0 when the WWPN is successfully written and the port comes back online
  *	0 when the WWPN is successfully written and the port comes back online
  *	-1 when the port fails to go offline or come back up online
  *	-1 when the port fails to go offline or come back up online
  */
  */
-static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn)
+static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
+			u64 wwpn)
 {
 {
-	int ret = 0;
+	int rc = 0;
 
 
 	set_port_offline(fc_regs);
 	set_port_offline(fc_regs);
 
 
@@ -1201,33 +1028,26 @@ static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn)
 			       FC_PORT_STATUS_RETRY_CNT)) {
 			       FC_PORT_STATUS_RETRY_CNT)) {
 		pr_debug("%s: wait on port %d to go offline timed out\n",
 		pr_debug("%s: wait on port %d to go offline timed out\n",
 			 __func__, port);
 			 __func__, port);
-		ret = -1; /* but continue on to leave the port back online */
+		rc = -1; /* but continue on to leave the port back online */
 	}
 	}
 
 
-	if (ret == 0)
+	if (rc == 0)
 		writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
 		writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
 
 
+	/* Always return success after programming WWPN */
+	rc = 0;
+
 	set_port_online(fc_regs);
 	set_port_online(fc_regs);
 
 
 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
 			      FC_PORT_STATUS_RETRY_CNT)) {
 			      FC_PORT_STATUS_RETRY_CNT)) {
-		pr_debug("%s: wait on port %d to go online timed out\n",
-			 __func__, port);
-		ret = -1;
-
-		/*
-		 * Override for internal lun!!!
-		 */
-		if (afu->internal_lun) {
-			pr_debug("%s: Overriding port %d online timeout!!!\n",
-				 __func__, port);
-			ret = 0;
-		}
+		pr_err("%s: wait on port %d to go online timed out\n",
+		       __func__, port);
 	}
 	}
 
 
-	pr_debug("%s: returning rc=%d\n", __func__, ret);
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
 
 
-	return ret;
+	return rc;
 }
 }
 
 
 /**
 /**
@@ -1243,7 +1063,7 @@ static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn)
  * the alternate port exclusively while the reset takes place.
  * the alternate port exclusively while the reset takes place.
  * failure to come online is overridden.
  * failure to come online is overridden.
  */
  */
-static void afu_link_reset(struct afu *afu, int port, u64 *fc_regs)
+static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
 {
 {
 	u64 port_sel;
 	u64 port_sel;
 
 
@@ -1280,19 +1100,19 @@ static const struct asyc_intr_info ainfo[] = {
 	{SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
 	{SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
 	{SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
 	{SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
 	{SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
 	{SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
-	{SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, 0},
+	{SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
 	{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
 	{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
-	{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, 0},
+	{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
 	{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
 	{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
-	{SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
+	{SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST},
 	{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
 	{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
 	{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
 	{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
 	{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
 	{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
 	{SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
 	{SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
 	{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
 	{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
-	{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, 0},
+	{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
 	{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
 	{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
-	{SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
+	{SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST},
 	{0x0, "", 0, 0}		/* terminator */
 	{0x0, "", 0, 0}		/* terminator */
 };
 };
 
 
@@ -1454,47 +1274,46 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
 static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
 {
 {
 	struct afu *afu = (struct afu *)data;
 	struct afu *afu = (struct afu *)data;
-	struct cxlflash_cfg *cfg;
+	struct cxlflash_cfg *cfg = afu->parent;
+	struct device *dev = &cfg->dev->dev;
 	u64 reg_unmasked;
 	u64 reg_unmasked;
 	const struct asyc_intr_info *info;
 	const struct asyc_intr_info *info;
-	struct sisl_global_map *global = &afu->afu_map->global;
+	struct sisl_global_map __iomem *global = &afu->afu_map->global;
 	u64 reg;
 	u64 reg;
 	u8 port;
 	u8 port;
 	int i;
 	int i;
 
 
-	cfg = afu->parent;
-
 	reg = readq_be(&global->regs.aintr_status);
 	reg = readq_be(&global->regs.aintr_status);
 	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
 	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
 
 
 	if (reg_unmasked == 0) {
 	if (reg_unmasked == 0) {
-		pr_err("%s: spurious interrupt, aintr_status 0x%016llX\n",
-		       __func__, reg);
+		dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
+			__func__, reg);
 		goto out;
 		goto out;
 	}
 	}
 
 
-	/* it is OK to clear AFU status before FC_ERROR */
+	/* FYI, it is 'okay' to clear AFU status before FC_ERROR */
 	writeq_be(reg_unmasked, &global->regs.aintr_clear);
 	writeq_be(reg_unmasked, &global->regs.aintr_clear);
 
 
-	/* check each bit that is on */
+	/* Check each bit that is on */
 	for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
 	for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
 		info = find_ainfo(1ULL << i);
 		info = find_ainfo(1ULL << i);
-		if ((reg_unmasked & 0x1) || !info)
+		if (((reg_unmasked & 0x1) == 0) || !info)
 			continue;
 			continue;
 
 
 		port = info->port;
 		port = info->port;
 
 
-		pr_err("%s: FC Port %d -> %s, fc_status 0x%08llX\n",
-		       __func__, port, info->desc,
+		dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
+			__func__, port, info->desc,
 		       readq_be(&global->fc_regs[port][FC_STATUS / 8]));
 		       readq_be(&global->fc_regs[port][FC_STATUS / 8]));
 
 
 		/*
 		/*
-		 * do link reset first, some OTHER errors will set FC_ERROR
+		 * Do link reset first, some OTHER errors will set FC_ERROR
 		 * again if cleared before or w/o a reset
 		 * again if cleared before or w/o a reset
 		 */
 		 */
 		if (info->action & LINK_RESET) {
 		if (info->action & LINK_RESET) {
-			pr_err("%s: FC Port %d: resetting link\n",
-			       __func__, port);
+			dev_err(dev, "%s: FC Port %d: resetting link\n",
+				__func__, port);
 			cfg->lr_state = LINK_RESET_REQUIRED;
 			cfg->lr_state = LINK_RESET_REQUIRED;
 			cfg->lr_port = port;
 			cfg->lr_port = port;
 			schedule_work(&cfg->work_q);
 			schedule_work(&cfg->work_q);
@@ -1504,26 +1323,31 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
 			reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
 			reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
 
 
 			/*
 			/*
-			 * since all errors are unmasked, FC_ERROR and FC_ERRCAP
+			 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
 			 * should be the same and tracing one is sufficient.
 			 * should be the same and tracing one is sufficient.
 			 */
 			 */
 
 
-			pr_err("%s: fc %d: clearing fc_error 0x%08llX\n",
-			       __func__, port, reg);
+			dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
+				__func__, port, reg);
 
 
 			writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
 			writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
 			writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
 			writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
 		}
 		}
+
+		if (info->action & SCAN_HOST) {
+			atomic_inc(&cfg->scan_host_needed);
+			schedule_work(&cfg->work_q);
+		}
 	}
 	}
 
 
 out:
 out:
-	pr_debug("%s: returning rc=%d, afu=%p\n", __func__, IRQ_HANDLED, afu);
+	dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
 /**
 /**
  * start_context() - starts the master context
  * start_context() - starts the master context
- * @cxlflash:	Internal structure associated with the host.
+ * @cfg:	Internal structure associated with the host.
  *
  *
  * Return: A success or failure value from CXL services.
  * Return: A success or failure value from CXL services.
  */
  */
@@ -1541,12 +1365,10 @@ static int start_context(struct cxlflash_cfg *cfg)
 
 
 /**
 /**
  * read_vpd() - obtains the WWPNs from VPD
  * read_vpd() - obtains the WWPNs from VPD
- * @cxlflash:	Internal structure associated with the host.
+ * @cfg:	Internal structure associated with the host.
  * @wwpn:	Array of size NUM_FC_PORTS to pass back WWPNs
  * @wwpn:	Array of size NUM_FC_PORTS to pass back WWPNs
  *
  *
- * Return:
- *	0 on success
- *	-ENODEV when VPD or WWPN keywords not found
+ * Return: 0 on success, -errno on failure
  */
  */
 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
 static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
 {
 {
@@ -1561,7 +1383,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
 	/* Get the VPD data from the device */
 	/* Get the VPD data from the device */
 	vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
 	vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
 	if (unlikely(vpd_size <= 0)) {
 	if (unlikely(vpd_size <= 0)) {
-		pr_err("%s: Unable to read VPD (size = %ld)\n",
+		dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
 		       __func__, vpd_size);
 		       __func__, vpd_size);
 		rc = -ENODEV;
 		rc = -ENODEV;
 		goto out;
 		goto out;
@@ -1571,7 +1393,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
 				    PCI_VPD_LRDT_RO_DATA);
 				    PCI_VPD_LRDT_RO_DATA);
 	if (unlikely(ro_start < 0)) {
 	if (unlikely(ro_start < 0)) {
-		pr_err("%s: VPD Read-only data not found\n", __func__);
+		dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
+			__func__);
 		rc = -ENODEV;
 		rc = -ENODEV;
 		goto out;
 		goto out;
 	}
 	}
@@ -1600,8 +1423,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
 
 
 		i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
 		i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
 		if (unlikely(i < 0)) {
 		if (unlikely(i < 0)) {
-			pr_err("%s: Port %d WWPN not found in VPD\n",
-			       __func__, k);
+			dev_err(&dev->dev, "%s: Port %d WWPN not found "
+				"in VPD\n", __func__, k);
 			rc = -ENODEV;
 			rc = -ENODEV;
 			goto out;
 			goto out;
 		}
 		}
@@ -1609,7 +1432,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
 		j = pci_vpd_info_field_size(&vpd_data[i]);
 		j = pci_vpd_info_field_size(&vpd_data[i]);
 		i += PCI_VPD_INFO_FLD_HDR_SIZE;
 		i += PCI_VPD_INFO_FLD_HDR_SIZE;
 		if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
 		if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
-			pr_err("%s: Port %d WWPN incomplete or VPD corrupt\n",
+			dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
+				"VPD corrupt\n",
 			       __func__, k);
 			       __func__, k);
 			rc = -ENODEV;
 			rc = -ENODEV;
 			goto out;
 			goto out;
@@ -1618,8 +1442,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
 		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
 		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
 		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
 		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
 		if (unlikely(rc)) {
 		if (unlikely(rc)) {
-			pr_err("%s: Fail to convert port %d WWPN to integer\n",
-			       __func__, k);
+			dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
+				"to integer\n", __func__, k);
 			rc = -ENODEV;
 			rc = -ENODEV;
 			goto out;
 			goto out;
 		}
 		}
@@ -1631,91 +1455,36 @@ out:
 }
 }
 
 
 /**
 /**
- * cxlflash_context_reset() - timeout handler for AFU commands
- * @cmd:	AFU command that timed out.
+ * init_pcr() - initialize the provisioning and control registers
+ * @cfg:	Internal structure associated with the host.
  *
  *
- * Sends a reset to the AFU.
+ * Also sets up fast access to the mapped registers and initializes AFU
+ * command fields that never change.
  */
  */
-void cxlflash_context_reset(struct afu_cmd *cmd)
+static void init_pcr(struct cxlflash_cfg *cfg)
 {
 {
-	int nretry = 0;
-	u64 rrin = 0x1;
-	u64 room = 0;
-	struct afu *afu = cmd->parent;
-	ulong lock_flags;
-
-	pr_debug("%s: cmd=%p\n", __func__, cmd);
-
-	spin_lock_irqsave(&cmd->slock, lock_flags);
-
-	/* Already completed? */
-	if (cmd->sa.host_use_b[0] & B_DONE) {
-		spin_unlock_irqrestore(&cmd->slock, lock_flags);
-		return;
-	}
-
-	cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
-	spin_unlock_irqrestore(&cmd->slock, lock_flags);
-
-	/*
-	 * We really want to send this reset at all costs, so spread
-	 * out wait time on successive retries for available room.
-	 */
-	do {
-		room = readq_be(&afu->host_map->cmd_room);
-		atomic64_set(&afu->room, room);
-		if (room)
-			goto write_rrin;
-		udelay(nretry);
-	} while (nretry++ < MC_ROOM_RETRY_CNT);
-
-	pr_err("%s: no cmd_room to send reset\n", __func__);
-	return;
-
-write_rrin:
-	nretry = 0;
-	writeq_be(rrin, &afu->host_map->ioarrin);
-	do {
-		rrin = readq_be(&afu->host_map->ioarrin);
-		if (rrin != 0x1)
-			break;
-		/* Double delay each time */
-		udelay(2 ^ nretry);
-	} while (nretry++ < MC_ROOM_RETRY_CNT);
-}
-
-/**
- * init_pcr() - initialize the provisioning and control registers
- * @cxlflash:	Internal structure associated with the host.
- *
- * Also sets up fast access to the mapped registers and initializes AFU
- * command fields that never change.
- */
-void init_pcr(struct cxlflash_cfg *cfg)
-{
-	struct afu *afu = cfg->afu;
-	struct sisl_ctrl_map *ctrl_map;
-	int i;
+	struct afu *afu = cfg->afu;
+	struct sisl_ctrl_map __iomem *ctrl_map;
+	int i;
 
 
 	for (i = 0; i < MAX_CONTEXT; i++) {
 	for (i = 0; i < MAX_CONTEXT; i++) {
 		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
 		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
-		/* disrupt any clients that could be running */
-		/* e. g. clients that survived a master restart */
+		/* Disrupt any clients that could be running */
+		/* e.g. clients that survived a master restart */
 		writeq_be(0, &ctrl_map->rht_start);
 		writeq_be(0, &ctrl_map->rht_start);
 		writeq_be(0, &ctrl_map->rht_cnt_id);
 		writeq_be(0, &ctrl_map->rht_cnt_id);
 		writeq_be(0, &ctrl_map->ctx_cap);
 		writeq_be(0, &ctrl_map->ctx_cap);
 	}
 	}
 
 
-	/* copy frequently used fields into afu */
+	/* Copy frequently used fields into afu */
 	afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
 	afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
-	/* ctx_hndl is 16 bits in CAIA */
 	afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
 	afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
 	afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
 	afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
 
 
 	/* Program the Endian Control for the master context */
 	/* Program the Endian Control for the master context */
 	writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
 	writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
 
 
-	/* initialize cmd fields that never change */
+	/* Initialize cmd fields that never change */
 	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
 	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
 		afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
 		afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
 		afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
 		afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
@@ -1725,11 +1494,12 @@ void init_pcr(struct cxlflash_cfg *cfg)
 
 
 /**
 /**
  * init_global() - initialize AFU global registers
  * init_global() - initialize AFU global registers
- * @cxlflash:	Internal structure associated with the host.
+ * @cfg:	Internal structure associated with the host.
  */
  */
-int init_global(struct cxlflash_cfg *cfg)
+static int init_global(struct cxlflash_cfg *cfg)
 {
 {
 	struct afu *afu = cfg->afu;
 	struct afu *afu = cfg->afu;
+	struct device *dev = &cfg->dev->dev;
 	u64 wwpn[NUM_FC_PORTS];	/* wwpn of AFU ports */
 	u64 wwpn[NUM_FC_PORTS];	/* wwpn of AFU ports */
 	int i = 0, num_ports = 0;
 	int i = 0, num_ports = 0;
 	int rc = 0;
 	int rc = 0;
@@ -1737,13 +1507,13 @@ int init_global(struct cxlflash_cfg *cfg)
 
 
 	rc = read_vpd(cfg, &wwpn[0]);
 	rc = read_vpd(cfg, &wwpn[0]);
 	if (rc) {
 	if (rc) {
-		pr_err("%s: could not read vpd rc=%d\n", __func__, rc);
+		dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
 		goto out;
 		goto out;
 	}
 	}
 
 
 	pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
 	pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
 
 
-	/* set up RRQ in AFU for master issued cmds */
+	/* Set up RRQ in AFU for master issued cmds */
 	writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
 	writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
 	writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
 	writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
 
 
@@ -1756,9 +1526,9 @@ int init_global(struct cxlflash_cfg *cfg)
 	/* checker on if dual afu */
 	/* checker on if dual afu */
 	writeq_be(reg, &afu->afu_map->global.regs.afu_config);
 	writeq_be(reg, &afu->afu_map->global.regs.afu_config);
 
 
-	/* global port select: select either port */
+	/* Global port select: select either port */
 	if (afu->internal_lun) {
 	if (afu->internal_lun) {
-		/* only use port 0 */
+		/* Only use port 0 */
 		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
 		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
 		num_ports = NUM_FC_PORTS - 1;
 		num_ports = NUM_FC_PORTS - 1;
 	} else {
 	} else {
@@ -1767,20 +1537,20 @@ int init_global(struct cxlflash_cfg *cfg)
 	}
 	}
 
 
 	for (i = 0; i < num_ports; i++) {
 	for (i = 0; i < num_ports; i++) {
-		/* unmask all errors (but they are still masked at AFU) */
+		/* Unmask all errors (but they are still masked at AFU) */
 		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
 		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
-		/* clear CRC error cnt & set a threshold */
+		/* Clear CRC error cnt & set a threshold */
 		(void)readq_be(&afu->afu_map->global.
 		(void)readq_be(&afu->afu_map->global.
 			       fc_regs[i][FC_CNT_CRCERR / 8]);
 			       fc_regs[i][FC_CNT_CRCERR / 8]);
 		writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
 		writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
 			  [FC_CRC_THRESH / 8]);
 			  [FC_CRC_THRESH / 8]);
 
 
-		/* set WWPNs. If already programmed, wwpn[i] is 0 */
+		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
 		if (wwpn[i] != 0 &&
 		if (wwpn[i] != 0 &&
 		    afu_set_wwpn(afu, i,
 		    afu_set_wwpn(afu, i,
 				 &afu->afu_map->global.fc_regs[i][0],
 				 &afu->afu_map->global.fc_regs[i][0],
 				 wwpn[i])) {
 				 wwpn[i])) {
-			pr_err("%s: failed to set WWPN on port %d\n",
+			dev_err(dev, "%s: failed to set WWPN on port %d\n",
 			       __func__, i);
 			       __func__, i);
 			rc = -EIO;
 			rc = -EIO;
 			goto out;
 			goto out;
@@ -1789,18 +1559,17 @@ int init_global(struct cxlflash_cfg *cfg)
 		 * offline/online transitions and a PLOGI
 		 * offline/online transitions and a PLOGI
 		 */
 		 */
 		msleep(100);
 		msleep(100);
-
 	}
 	}
 
 
-	/* set up master's own CTX_CAP to allow real mode, host translation */
-	/* tbls, afu cmds and read/write GSCSI cmds. */
+	/* Set up master's own CTX_CAP to allow real mode, host translation */
+	/* tables, afu cmds and read/write GSCSI cmds. */
 	/* First, unlock ctx_cap write by reading mbox */
 	/* First, unlock ctx_cap write by reading mbox */
 	(void)readq_be(&afu->ctrl_map->mbox_r);	/* unlock ctx_cap */
 	(void)readq_be(&afu->ctrl_map->mbox_r);	/* unlock ctx_cap */
 	writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
 	writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
 		   SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
 		   SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
 		   SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
 		   SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
 		  &afu->ctrl_map->ctx_cap);
 		  &afu->ctrl_map->ctx_cap);
-	/* init heartbeat */
+	/* Initialize heartbeat */
 	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
 	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
 
 
 out:
 out:
@@ -1809,7 +1578,7 @@ out:
 
 
 /**
 /**
  * start_afu() - initializes and starts the AFU
  * start_afu() - initializes and starts the AFU
- * @cxlflash:	Internal structure associated with the host.
+ * @cfg:	Internal structure associated with the host.
  */
  */
 static int start_afu(struct cxlflash_cfg *cfg)
 static int start_afu(struct cxlflash_cfg *cfg)
 {
 {
@@ -1829,7 +1598,10 @@ static int start_afu(struct cxlflash_cfg *cfg)
 
 
 	init_pcr(cfg);
 	init_pcr(cfg);
 
 
-	/* initialize RRQ pointers */
+	/* After an AFU reset, RRQ entries are stale, clear them */
+	memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
+
+	/* Initialize RRQ pointers */
 	afu->hrrq_start = &afu->rrq_entry[0];
 	afu->hrrq_start = &afu->rrq_entry[0];
 	afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
 	afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
 	afu->hrrq_curr = afu->hrrq_start;
 	afu->hrrq_curr = afu->hrrq_start;
@@ -1843,12 +1615,9 @@ static int start_afu(struct cxlflash_cfg *cfg)
 
 
 /**
 /**
  * init_mc() - create and register as the master context
  * init_mc() - create and register as the master context
- * @cxlflash:	Internal structure associated with the host.
+ * @cfg:	Internal structure associated with the host.
  *
  *
- * Return:
- *	0 on success
- *	-ENOMEM when unable to obtain a context from CXL services
- *	A failure value from CXL services.
+ * Return: 0 on success, -errno on failure
  */
  */
 static int init_mc(struct cxlflash_cfg *cfg)
 static int init_mc(struct cxlflash_cfg *cfg)
 {
 {
@@ -1932,15 +1701,12 @@ out:
 
 
 /**
 /**
  * init_afu() - setup as master context and start AFU
  * init_afu() - setup as master context and start AFU
- * @cxlflash:	Internal structure associated with the host.
+ * @cfg:	Internal structure associated with the host.
  *
  *
  * This routine is a higher level of control for configuring the
  * This routine is a higher level of control for configuring the
  * AFU on probe and reset paths.
  * AFU on probe and reset paths.
  *
  *
- * Return:
- *	0 on success
- *	-ENOMEM when unable to map the AFU MMIO space
- *	A failure value from internal services.
+ * Return: 0 on success, -errno on failure
  */
  */
 static int init_afu(struct cxlflash_cfg *cfg)
 static int init_afu(struct cxlflash_cfg *cfg)
 {
 {
@@ -1955,36 +1721,38 @@ static int init_afu(struct cxlflash_cfg *cfg)
 	if (rc) {
 	if (rc) {
 		dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
 		dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
 			__func__, rc);
 			__func__, rc);
-		goto err1;
+		goto out;
 	}
 	}
 
 
-	/* Map the entire MMIO space of the AFU.
-	 */
+	/* Map the entire MMIO space of the AFU */
 	afu->afu_map = cxl_psa_map(cfg->mcctx);
 	afu->afu_map = cxl_psa_map(cfg->mcctx);
 	if (!afu->afu_map) {
 	if (!afu->afu_map) {
-		rc = -ENOMEM;
-		term_mc(cfg, UNDO_START);
 		dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
 		dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
+		rc = -ENOMEM;
 		goto err1;
 		goto err1;
 	}
 	}
 
 
-	/* don't byte reverse on reading afu_version, else the string form */
-	/*     will be backwards */
-	reg = afu->afu_map->global.regs.afu_version;
-	memcpy(afu->version, &reg, 8);
+	/* No byte reverse on reading afu_version or string will be backwards */
+	reg = readq(&afu->afu_map->global.regs.afu_version);
+	memcpy(afu->version, &reg, sizeof(reg));
 	afu->interface_version =
 	afu->interface_version =
 	    readq_be(&afu->afu_map->global.regs.interface_version);
 	    readq_be(&afu->afu_map->global.regs.interface_version);
-	pr_debug("%s: afu version %s, interface version 0x%llX\n",
-		 __func__, afu->version, afu->interface_version);
+	if ((afu->interface_version + 1) == 0) {
+		pr_err("Back level AFU, please upgrade. AFU version %s "
+		       "interface version 0x%llx\n", afu->version,
+		       afu->interface_version);
+		rc = -EINVAL;
+		goto err2;
+	}
+
+	pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
+		 afu->version, afu->interface_version);
 
 
 	rc = start_afu(cfg);
 	rc = start_afu(cfg);
 	if (rc) {
 	if (rc) {
 		dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
 		dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
 			__func__, rc);
 			__func__, rc);
-		term_mc(cfg, UNDO_START);
-		cxl_psa_unmap((void *)afu->afu_map);
-		afu->afu_map = NULL;
-		goto err1;
+		goto err2;
 	}
 	}
 
 
 	afu_err_intr_init(cfg->afu);
 	afu_err_intr_init(cfg->afu);
@@ -1992,97 +1760,18 @@ static int init_afu(struct cxlflash_cfg *cfg)
 
 
 	/* Restore the LUN mappings */
 	/* Restore the LUN mappings */
 	cxlflash_restore_luntable(cfg);
 	cxlflash_restore_luntable(cfg);
-err1:
-	pr_debug("%s: returning rc=%d\n", __func__, rc);
-	return rc;
-}
-
-/**
- * cxlflash_send_cmd() - sends an AFU command
- * @afu:	AFU associated with the host.
- * @cmd:	AFU command to send.
- *
- * Return:
- *	0 on success
- *	-1 on failure
- */
-int cxlflash_send_cmd(struct afu *afu, struct afu_cmd *cmd)
-{
-	struct cxlflash_cfg *cfg = afu->parent;
-	int nretry = 0;
-	int rc = 0;
-	u64 room;
-	long newval;
-
-	/*
-	 * This routine is used by critical users such an AFU sync and to
-	 * send a task management function (TMF). Thus we want to retry a
-	 * bit before returning an error. To avoid the performance penalty
-	 * of MMIO, we spread the update of 'room' over multiple commands.
-	 */
-retry:
-	newval = atomic64_dec_if_positive(&afu->room);
-	if (!newval) {
-		do {
-			room = readq_be(&afu->host_map->cmd_room);
-			atomic64_set(&afu->room, room);
-			if (room)
-				goto write_ioarrin;
-			udelay(nretry);
-		} while (nretry++ < MC_ROOM_RETRY_CNT);
-
-		pr_err("%s: no cmd_room to send 0x%X\n",
-		       __func__, cmd->rcb.cdb[0]);
-
-		goto no_room;
-	} else if (unlikely(newval < 0)) {
-		/* This should be rare. i.e. Only if two threads race and
-		 * decrement before the MMIO read is done. In this case
-		 * just benefit from the other thread having updated
-		 * afu->room.
-		 */
-		if (nretry++ < MC_ROOM_RETRY_CNT) {
-			udelay(nretry);
-			goto retry;
-		}
-
-		goto no_room;
-	}
-
-write_ioarrin:
-	writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
 out:
 out:
-	pr_debug("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
-		 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
 	return rc;
 	return rc;
 
 
-no_room:
-	afu->read_room = true;
-	schedule_work(&cfg->work_q);
-	rc = SCSI_MLQUEUE_HOST_BUSY;
+err2:
+	cxl_psa_unmap((void __iomem *)afu->afu_map);
+	afu->afu_map = NULL;
+err1:
+	term_mc(cfg, UNDO_START);
 	goto out;
 	goto out;
 }
 }
 
 
-/**
- * cxlflash_wait_resp() - polls for a response or timeout to a sent AFU command
- * @afu:	AFU associated with the host.
- * @cmd:	AFU command that was sent.
- */
-void cxlflash_wait_resp(struct afu *afu, struct afu_cmd *cmd)
-{
-	ulong timeout = jiffies + (cmd->rcb.timeout * 2 * HZ);
-
-	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
-	if (!timeout)
-		cxlflash_context_reset(cmd);
-
-	if (unlikely(cmd->sa.ioasc != 0))
-		pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
-		       "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
-		       cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
-		       cmd->sa.rc.fc_rc);
-}
-
 /**
 /**
  * cxlflash_afu_sync() - builds and sends an AFU sync command
  * cxlflash_afu_sync() - builds and sends an AFU sync command
  * @afu:	AFU associated with the host.
  * @afu:	AFU associated with the host.
@@ -2091,7 +1780,7 @@ void cxlflash_wait_resp(struct afu *afu, struct afu_cmd *cmd)
  * @mode:	Type of sync to issue (lightweight, heavyweight, global).
  * @mode:	Type of sync to issue (lightweight, heavyweight, global).
  *
  *
  * The AFU can only take 1 sync command at a time. This routine enforces this
  * The AFU can only take 1 sync command at a time. This routine enforces this
- * limitation by using a mutex to provide exlusive access to the AFU during
+ * limitation by using a mutex to provide exclusive access to the AFU during
  * the sync. This design point requires calling threads to not be on interrupt
  * the sync. This design point requires calling threads to not be on interrupt
  * context due to the possibility of sleeping during concurrent sync operations.
  * context due to the possibility of sleeping during concurrent sync operations.
  *
  *
@@ -2109,6 +1798,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
 		      res_hndl_t res_hndl_u, u8 mode)
 		      res_hndl_t res_hndl_u, u8 mode)
 {
 {
 	struct cxlflash_cfg *cfg = afu->parent;
 	struct cxlflash_cfg *cfg = afu->parent;
+	struct device *dev = &cfg->dev->dev;
 	struct afu_cmd *cmd = NULL;
 	struct afu_cmd *cmd = NULL;
 	int rc = 0;
 	int rc = 0;
 	int retry_cnt = 0;
 	int retry_cnt = 0;
@@ -2121,13 +1811,13 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
 
 
 	mutex_lock(&sync_active);
 	mutex_lock(&sync_active);
 retry:
 retry:
-	cmd = cxlflash_cmd_checkout(afu);
+	cmd = cmd_checkout(afu);
 	if (unlikely(!cmd)) {
 	if (unlikely(!cmd)) {
 		retry_cnt++;
 		retry_cnt++;
 		udelay(1000 * retry_cnt);
 		udelay(1000 * retry_cnt);
 		if (retry_cnt < MC_RETRY_CNT)
 		if (retry_cnt < MC_RETRY_CNT)
 			goto retry;
 			goto retry;
-		pr_err("%s: could not get a free command\n", __func__);
+		dev_err(dev, "%s: could not get a free command\n", __func__);
 		rc = -1;
 		rc = -1;
 		goto out;
 		goto out;
 	}
 	}
@@ -2147,36 +1837,34 @@ retry:
 	cmd->rcb.cdb[1] = mode;
 	cmd->rcb.cdb[1] = mode;
 
 
 	/* The cdb is aligned, no unaligned accessors required */
 	/* The cdb is aligned, no unaligned accessors required */
-	*((u16 *)&cmd->rcb.cdb[2]) = swab16(ctx_hndl_u);
-	*((u32 *)&cmd->rcb.cdb[4]) = swab32(res_hndl_u);
+	*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
+	*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
 
 
-	rc = cxlflash_send_cmd(afu, cmd);
+	rc = send_cmd(afu, cmd);
 	if (unlikely(rc))
 	if (unlikely(rc))
 		goto out;
 		goto out;
 
 
-	cxlflash_wait_resp(afu, cmd);
+	wait_resp(afu, cmd);
 
 
-	/* set on timeout */
+	/* Set on timeout */
 	if (unlikely((cmd->sa.ioasc != 0) ||
 	if (unlikely((cmd->sa.ioasc != 0) ||
 		     (cmd->sa.host_use_b[0] & B_ERROR)))
 		     (cmd->sa.host_use_b[0] & B_ERROR)))
 		rc = -1;
 		rc = -1;
 out:
 out:
 	mutex_unlock(&sync_active);
 	mutex_unlock(&sync_active);
 	if (cmd)
 	if (cmd)
-		cxlflash_cmd_checkin(cmd);
+		cmd_checkin(cmd);
 	pr_debug("%s: returning rc=%d\n", __func__, rc);
 	pr_debug("%s: returning rc=%d\n", __func__, rc);
 	return rc;
 	return rc;
 }
 }
 
 
 /**
 /**
- * cxlflash_afu_reset() - resets the AFU
- * @cxlflash:	Internal structure associated with the host.
+ * afu_reset() - resets the AFU
+ * @cfg:	Internal structure associated with the host.
  *
  *
- * Return:
- *	0 on success
- *	A failure value from internal services.
+ * Return: 0 on success, -errno on failure
  */
  */
-int cxlflash_afu_reset(struct cxlflash_cfg *cfg)
+static int afu_reset(struct cxlflash_cfg *cfg)
 {
 {
 	int rc = 0;
 	int rc = 0;
 	/* Stop the context before the reset. Since the context is
 	/* Stop the context before the reset. Since the context is
@@ -2191,6 +1879,413 @@ int cxlflash_afu_reset(struct cxlflash_cfg *cfg)
 	return rc;
 	return rc;
 }
 }
 
 
+/**
+ * cxlflash_eh_device_reset_handler() - reset a single LUN
+ * @scp:	SCSI command to send.
+ *
+ * Return:
+ *	SUCCESS as defined in scsi/scsi.h
+ *	FAILED as defined in scsi/scsi.h
+ */
+static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
+{
+	int rc = SUCCESS;
+	struct Scsi_Host *host = scp->device->host;
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+	struct afu *afu = cfg->afu;
+	int rcr = 0;
+
+	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
+		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
+		 host->host_no, scp->device->channel,
+		 scp->device->id, scp->device->lun,
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+
+retry:
+	switch (cfg->state) {
+	case STATE_NORMAL:
+		rcr = send_tmf(afu, scp, TMF_LUN_RESET);
+		if (unlikely(rcr))
+			rc = FAILED;
+		break;
+	case STATE_RESET:
+		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
+		goto retry;
+	default:
+		rc = FAILED;
+		break;
+	}
+
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * cxlflash_eh_host_reset_handler() - reset the host adapter
+ * @scp:	SCSI command from stack identifying host.
+ *
+ * Return:
+ *	SUCCESS as defined in scsi/scsi.h
+ *	FAILED as defined in scsi/scsi.h
+ */
+static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
+{
+	int rc = SUCCESS;
+	int rcr = 0;
+	struct Scsi_Host *host = scp->device->host;
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
+
+	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
+		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
+		 host->host_no, scp->device->channel,
+		 scp->device->id, scp->device->lun,
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
+		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
+
+	switch (cfg->state) {
+	case STATE_NORMAL:
+		cfg->state = STATE_RESET;
+		cxlflash_mark_contexts_error(cfg);
+		rcr = afu_reset(cfg);
+		if (rcr) {
+			rc = FAILED;
+			cfg->state = STATE_FAILTERM;
+		} else
+			cfg->state = STATE_NORMAL;
+		wake_up_all(&cfg->reset_waitq);
+		break;
+	case STATE_RESET:
+		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
+		if (cfg->state == STATE_NORMAL)
+			break;
+		/* fall through */
+	default:
+		rc = FAILED;
+		break;
+	}
+
+	pr_debug("%s: returning rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/**
+ * cxlflash_change_queue_depth() - change the queue depth for the device
+ * @sdev:	SCSI device destined for queue depth change.
+ * @qdepth:	Requested queue depth value to set.
+ *
+ * The requested queue depth is capped to the maximum supported value.
+ *
+ * Return: The actual queue depth set.
+ */
+static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+
+	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
+		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
+
+	scsi_change_queue_depth(sdev, qdepth);
+	return sdev->queue_depth;
+}
+
+/**
+ * cxlflash_show_port_status() - queries and presents the current port status
+ * @port:	Desired port for status reporting.
+ * @afu:	AFU owning the specified port.
+ * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
+{
+	char *disp_status;
+	u64 status;
+	__be64 __iomem *fc_regs;
+
+	if (port >= NUM_FC_PORTS)
+		return 0;
+
+	fc_regs = &afu->afu_map->global.fc_regs[port][0];
+	status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+	status &= FC_MTIP_STATUS_MASK;
+
+	if (status == FC_MTIP_STATUS_ONLINE)
+		disp_status = "online";
+	else if (status == FC_MTIP_STATUS_OFFLINE)
+		disp_status = "offline";
+	else
+		disp_status = "unknown";
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
+}
+
+/**
+ * port0_show() - queries and presents the current status of port 0
+ * @dev:	Generic device associated with the host owning the port.
+ * @attr:	Device attribute representing the port.
+ * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t port0_show(struct device *dev,
+			  struct device_attribute *attr,
+			  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+	struct afu *afu = cfg->afu;
+
+	return cxlflash_show_port_status(0, afu, buf);
+}
+
+/**
+ * port1_show() - queries and presents the current status of port 1
+ * @dev:	Generic device associated with the host owning the port.
+ * @attr:	Device attribute representing the port.
+ * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t port1_show(struct device *dev,
+			  struct device_attribute *attr,
+			  char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+	struct afu *afu = cfg->afu;
+
+	return cxlflash_show_port_status(1, afu, buf);
+}
+
+/**
+ * lun_mode_show() - presents the current LUN mode of the host
+ * @dev:	Generic device associated with the host.
+ * @attr:	Device attribute representing the LUN mode.
+ * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t lun_mode_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+	struct afu *afu = cfg->afu;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
+}
+
+/**
+ * lun_mode_store() - sets the LUN mode of the host
+ * @dev:	Generic device associated with the host.
+ * @attr:	Device attribute representing the LUN mode.
+ * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
+ * @count:	Length of data resizing in @buf.
+ *
+ * The CXL Flash AFU supports a dummy LUN mode where the external
+ * links and storage are not required. Space on the FPGA is used
+ * to create 1 or 2 small LUNs which are presented to the system
+ * as if they were a normal storage device. This feature is useful
+ * during development and also provides manufacturing with a way
+ * to test the AFU without an actual device.
+ *
+ * 0 = external LUN[s] (default)
+ * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
+ * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
+ * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
+ * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t lun_mode_store(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+	struct afu *afu = cfg->afu;
+	int rc;
+	u32 lun_mode;
+
+	rc = kstrtouint(buf, 10, &lun_mode);
+	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
+		afu->internal_lun = lun_mode;
+		afu_reset(cfg);
+		scsi_scan_host(cfg->host);
+	}
+
+	return count;
+}
+
+/**
+ * ioctl_version_show() - presents the current ioctl version of the host
+ * @dev:	Generic device associated with the host.
+ * @attr:	Device attribute representing the ioctl version.
+ * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t ioctl_version_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
+}
+
+/**
+ * cxlflash_show_port_lun_table() - queries and presents the port LUN table
+ * @port:	Desired port for status reporting.
+ * @afu:	AFU owning the specified port.
+ * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t cxlflash_show_port_lun_table(u32 port,
+					    struct afu *afu,
+					    char *buf)
+{
+	int i;
+	ssize_t bytes = 0;
+	__be64 __iomem *fc_port;
+
+	if (port >= NUM_FC_PORTS)
+		return 0;
+
+	fc_port = &afu->afu_map->global.fc_port[port][0];
+
+	for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
+		bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+				   "%03d: %016llX\n", i, readq_be(&fc_port[i]));
+	return bytes;
+}
+
+/**
+ * port0_lun_table_show() - presents the current LUN table of port 0
+ * @dev:	Generic device associated with the host owning the port.
+ * @attr:	Device attribute representing the port.
+ * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t port0_lun_table_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+	struct afu *afu = cfg->afu;
+
+	return cxlflash_show_port_lun_table(0, afu, buf);
+}
+
+/**
+ * port1_lun_table_show() - presents the current LUN table of port 1
+ * @dev:	Generic device associated with the host owning the port.
+ * @attr:	Device attribute representing the port.
+ * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t port1_lun_table_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
+	struct afu *afu = cfg->afu;
+
+	return cxlflash_show_port_lun_table(1, afu, buf);
+}
+
+/**
+ * mode_show() - presents the current mode of the device
+ * @dev:	Generic device associated with the device.
+ * @attr:	Device attribute representing the device mode.
+ * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
+ *
+ * Return: The size of the ASCII string returned in @buf.
+ */
+static ssize_t mode_show(struct device *dev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			 sdev->hostdata ? "superpipe" : "legacy");
+}
+
+/*
+ * Host attributes
+ */
+static DEVICE_ATTR_RO(port0);
+static DEVICE_ATTR_RO(port1);
+static DEVICE_ATTR_RW(lun_mode);
+static DEVICE_ATTR_RO(ioctl_version);
+static DEVICE_ATTR_RO(port0_lun_table);
+static DEVICE_ATTR_RO(port1_lun_table);
+
+static struct device_attribute *cxlflash_host_attrs[] = {
+	&dev_attr_port0,
+	&dev_attr_port1,
+	&dev_attr_lun_mode,
+	&dev_attr_ioctl_version,
+	&dev_attr_port0_lun_table,
+	&dev_attr_port1_lun_table,
+	NULL
+};
+
+/*
+ * Device attributes
+ */
+static DEVICE_ATTR_RO(mode);
+
+static struct device_attribute *cxlflash_dev_attrs[] = {
+	&dev_attr_mode,
+	NULL
+};
+
+/*
+ * Host template
+ */
+static struct scsi_host_template driver_template = {
+	.module = THIS_MODULE,
+	.name = CXLFLASH_ADAPTER_NAME,
+	.info = cxlflash_driver_info,
+	.ioctl = cxlflash_ioctl,
+	.proc_name = CXLFLASH_NAME,
+	.queuecommand = cxlflash_queuecommand,
+	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
+	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
+	.change_queue_depth = cxlflash_change_queue_depth,
+	.cmd_per_lun = 16,
+	.can_queue = CXLFLASH_MAX_CMDS,
+	.this_id = -1,
+	.sg_tablesize = SG_NONE,	/* No scatter gather support */
+	.max_sectors = CXLFLASH_MAX_SECTORS,
+	.use_clustering = ENABLE_CLUSTERING,
+	.shost_attrs = cxlflash_host_attrs,
+	.sdev_attrs = cxlflash_dev_attrs,
+};
+
+/*
+ * Device dependent values
+ */
+static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
+
+/*
+ * PCI device binding table
+ */
+static struct pci_device_id cxlflash_pci_table[] = {
+	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
+	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
+	{}
+};
+
+MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
+
 /**
 /**
  * cxlflash_worker_thread() - work thread handler for the AFU
  * cxlflash_worker_thread() - work thread handler for the AFU
  * @work:	Work structure contained within cxlflash associated with host.
  * @work:	Work structure contained within cxlflash associated with host.
@@ -2199,12 +2294,14 @@ int cxlflash_afu_reset(struct cxlflash_cfg *cfg)
  * - Link reset which cannot be performed on interrupt context due to
  * - Link reset which cannot be performed on interrupt context due to
  * blocking up to a few seconds
  * blocking up to a few seconds
  * - Read AFU command room
  * - Read AFU command room
+ * - Rescan the host
  */
  */
 static void cxlflash_worker_thread(struct work_struct *work)
 static void cxlflash_worker_thread(struct work_struct *work)
 {
 {
 	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
 	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
 						work_q);
 						work_q);
 	struct afu *afu = cfg->afu;
 	struct afu *afu = cfg->afu;
+	struct device *dev = &cfg->dev->dev;
 	int port;
 	int port;
 	ulong lock_flags;
 	ulong lock_flags;
 
 
@@ -2218,15 +2315,15 @@ static void cxlflash_worker_thread(struct work_struct *work)
 	if (cfg->lr_state == LINK_RESET_REQUIRED) {
 	if (cfg->lr_state == LINK_RESET_REQUIRED) {
 		port = cfg->lr_port;
 		port = cfg->lr_port;
 		if (port < 0)
 		if (port < 0)
-			pr_err("%s: invalid port index %d\n", __func__, port);
+			dev_err(dev, "%s: invalid port index %d\n",
+				__func__, port);
 		else {
 		else {
 			spin_unlock_irqrestore(cfg->host->host_lock,
 			spin_unlock_irqrestore(cfg->host->host_lock,
 					       lock_flags);
 					       lock_flags);
 
 
 			/* The reset can block... */
 			/* The reset can block... */
 			afu_link_reset(afu, port,
 			afu_link_reset(afu, port,
-				       &afu->afu_map->
-				       global.fc_regs[port][0]);
+				       &afu->afu_map->global.fc_regs[port][0]);
 			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
 			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
 		}
 		}
 
 
@@ -2239,6 +2336,9 @@ static void cxlflash_worker_thread(struct work_struct *work)
 	}
 	}
 
 
 	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
 	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
+
+	if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
+		scsi_scan_host(cfg->host);
 }
 }
 
 
 /**
 /**
@@ -2246,7 +2346,7 @@ static void cxlflash_worker_thread(struct work_struct *work)
  * @pdev:	PCI device associated with the host.
  * @pdev:	PCI device associated with the host.
  * @dev_id:	PCI device id associated with device.
  * @dev_id:	PCI device id associated with device.
  *
  *
- * Return: 0 on success / non-zero on failure
+ * Return: 0 on success, -errno on failure
  */
  */
 static int cxlflash_probe(struct pci_dev *pdev,
 static int cxlflash_probe(struct pci_dev *pdev,
 			  const struct pci_device_id *dev_id)
 			  const struct pci_device_id *dev_id)
@@ -2281,14 +2381,16 @@ static int cxlflash_probe(struct pci_dev *pdev,
 	cfg->host = host;
 	cfg->host = host;
 	rc = alloc_mem(cfg);
 	rc = alloc_mem(cfg);
 	if (rc) {
 	if (rc) {
-		dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
+		dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n",
 			__func__);
 			__func__);
 		rc = -ENOMEM;
 		rc = -ENOMEM;
+		scsi_host_put(cfg->host);
 		goto out;
 		goto out;
 	}
 	}
 
 
 	cfg->init_state = INIT_STATE_NONE;
 	cfg->init_state = INIT_STATE_NONE;
 	cfg->dev = pdev;
 	cfg->dev = pdev;
+	cfg->cxl_fops = cxlflash_cxl_fops;
 
 
 	/*
 	/*
 	 * The promoted LUNs move to the top of the LUN table. The rest stay
 	 * The promoted LUNs move to the top of the LUN table. The rest stay
@@ -2301,28 +2403,30 @@ static int cxlflash_probe(struct pci_dev *pdev,
 	cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
 	cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
 
 
 	cfg->dev_id = (struct pci_device_id *)dev_id;
 	cfg->dev_id = (struct pci_device_id *)dev_id;
-	cfg->mcctx = NULL;
 
 
 	init_waitqueue_head(&cfg->tmf_waitq);
 	init_waitqueue_head(&cfg->tmf_waitq);
-	init_waitqueue_head(&cfg->limbo_waitq);
+	init_waitqueue_head(&cfg->reset_waitq);
 
 
 	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
 	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
 	cfg->lr_state = LINK_RESET_INVALID;
 	cfg->lr_state = LINK_RESET_INVALID;
 	cfg->lr_port = -1;
 	cfg->lr_port = -1;
+	spin_lock_init(&cfg->tmf_slock);
 	mutex_init(&cfg->ctx_tbl_list_mutex);
 	mutex_init(&cfg->ctx_tbl_list_mutex);
 	mutex_init(&cfg->ctx_recovery_mutex);
 	mutex_init(&cfg->ctx_recovery_mutex);
+	init_rwsem(&cfg->ioctl_rwsem);
 	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
 	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
 	INIT_LIST_HEAD(&cfg->lluns);
 	INIT_LIST_HEAD(&cfg->lluns);
 
 
 	pci_set_drvdata(pdev, cfg);
 	pci_set_drvdata(pdev, cfg);
 
 
-	/* Use the special service provided to look up the physical
+	/*
+	 * Use the special service provided to look up the physical
 	 * PCI device, since we are called on the probe of the virtual
 	 * PCI device, since we are called on the probe of the virtual
 	 * PCI host bus (vphb)
 	 * PCI host bus (vphb)
 	 */
 	 */
 	phys_dev = cxl_get_phys_dev(pdev);
 	phys_dev = cxl_get_phys_dev(pdev);
 	if (!dev_is_pci(phys_dev)) {
 	if (!dev_is_pci(phys_dev)) {
-		pr_err("%s: not a pci dev\n", __func__);
+		dev_err(&pdev->dev, "%s: not a pci dev\n", __func__);
 		rc = -ENODEV;
 		rc = -ENODEV;
 		goto out_remove;
 		goto out_remove;
 	}
 	}
@@ -2346,7 +2450,6 @@ static int cxlflash_probe(struct pci_dev *pdev,
 	}
 	}
 	cfg->init_state = INIT_STATE_AFU;
 	cfg->init_state = INIT_STATE_AFU;
 
 
-
 	rc = init_scsi(cfg);
 	rc = init_scsi(cfg);
 	if (rc) {
 	if (rc) {
 		dev_err(&pdev->dev, "%s: call to init_scsi "
 		dev_err(&pdev->dev, "%s: call to init_scsi "
@@ -2364,6 +2467,19 @@ out_remove:
 	goto out;
 	goto out;
 }
 }
 
 
+/**
+ * drain_ioctls() - wait until all currently executing ioctls have completed
+ * @cfg:	Internal structure associated with the host.
+ *
+ * Obtain write access to read/write semaphore that wraps ioctl
+ * handling to 'drain' ioctls currently executing.
+ */
+static void drain_ioctls(struct cxlflash_cfg *cfg)
+{
+	down_write(&cfg->ioctl_rwsem);
+	up_write(&cfg->ioctl_rwsem);
+}
+
 /**
 /**
  * cxlflash_pci_error_detected() - called when a PCI error is detected
  * cxlflash_pci_error_detected() - called when a PCI error is detected
  * @pdev:	PCI device struct.
  * @pdev:	PCI device struct.
@@ -2382,21 +2498,19 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
 
 
 	switch (state) {
 	switch (state) {
 	case pci_channel_io_frozen:
 	case pci_channel_io_frozen:
-		cfg->state = STATE_LIMBO;
-
-		/* Turn off legacy I/O */
+		cfg->state = STATE_RESET;
 		scsi_block_requests(cfg->host);
 		scsi_block_requests(cfg->host);
+		drain_ioctls(cfg);
 		rc = cxlflash_mark_contexts_error(cfg);
 		rc = cxlflash_mark_contexts_error(cfg);
 		if (unlikely(rc))
 		if (unlikely(rc))
 			dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
 			dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
 				__func__, rc);
 				__func__, rc);
 		term_mc(cfg, UNDO_START);
 		term_mc(cfg, UNDO_START);
 		stop_afu(cfg);
 		stop_afu(cfg);
-
 		return PCI_ERS_RESULT_NEED_RESET;
 		return PCI_ERS_RESULT_NEED_RESET;
 	case pci_channel_io_perm_failure:
 	case pci_channel_io_perm_failure:
 		cfg->state = STATE_FAILTERM;
 		cfg->state = STATE_FAILTERM;
-		wake_up_all(&cfg->limbo_waitq);
+		wake_up_all(&cfg->reset_waitq);
 		scsi_unblock_requests(cfg->host);
 		scsi_unblock_requests(cfg->host);
 		return PCI_ERS_RESULT_DISCONNECT;
 		return PCI_ERS_RESULT_DISCONNECT;
 	default:
 	default:
@@ -2443,7 +2557,7 @@ static void cxlflash_pci_resume(struct pci_dev *pdev)
 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
 
 
 	cfg->state = STATE_NORMAL;
 	cfg->state = STATE_NORMAL;
-	wake_up_all(&cfg->limbo_waitq);
+	wake_up_all(&cfg->reset_waitq);
 	scsi_unblock_requests(cfg->host);
 	scsi_unblock_requests(cfg->host);
 }
 }
 
 
@@ -2467,7 +2581,7 @@ static struct pci_driver cxlflash_driver = {
 /**
 /**
  * init_cxlflash() - module entry point
  * init_cxlflash() - module entry point
  *
  *
- * Return: 0 on success / non-zero on failure
+ * Return: 0 on success, -errno on failure
  */
  */
 static int __init init_cxlflash(void)
 static int __init init_cxlflash(void)
 {
 {

+ 1 - 0
drivers/scsi/cxlflash/main.h

@@ -99,6 +99,7 @@ struct asyc_intr_info {
 	u8 action;
 	u8 action;
 #define CLR_FC_ERROR	0x01
 #define CLR_FC_ERROR	0x01
 #define LINK_RESET	0x02
 #define LINK_RESET	0x02
+#define SCAN_HOST	0x04
 };
 };
 
 
 #ifndef CONFIG_CXL_EEH
 #ifndef CONFIG_CXL_EEH

+ 4 - 4
drivers/scsi/cxlflash/sislite.h

@@ -146,7 +146,7 @@ struct sisl_rc {
 #define SISL_FC_RC_ABORTFAIL	0x59	/* pending abort completed w/fail */
 #define SISL_FC_RC_ABORTFAIL	0x59	/* pending abort completed w/fail */
 #define SISL_FC_RC_RESID	0x5A	/* ioasa underrun/overrun flags set */
 #define SISL_FC_RC_RESID	0x5A	/* ioasa underrun/overrun flags set */
 #define SISL_FC_RC_RESIDERR	0x5B	/* actual data len does not match SCSI
 #define SISL_FC_RC_RESIDERR	0x5B	/* actual data len does not match SCSI
-					   reported len, possbly due to dropped
+					   reported len, possibly due to dropped
 					   frames */
 					   frames */
 #define SISL_FC_RC_TGTABORT	0x5C	/* command aborted by target */
 #define SISL_FC_RC_TGTABORT	0x5C	/* command aborted by target */
 };
 };
@@ -258,7 +258,7 @@ struct sisl_host_map {
 	__be64 rrq_start;	/* start & end are both inclusive */
 	__be64 rrq_start;	/* start & end are both inclusive */
 	__be64 rrq_end;		/* write sequence: start followed by end */
 	__be64 rrq_end;		/* write sequence: start followed by end */
 	__be64 cmd_room;
 	__be64 cmd_room;
-	__be64 ctx_ctrl;	/* least signiifcant byte or b56:63 is LISN# */
+	__be64 ctx_ctrl;	/* least significant byte or b56:63 is LISN# */
 	__be64 mbox_w;		/* restricted use */
 	__be64 mbox_w;		/* restricted use */
 };
 };
 
 
@@ -290,7 +290,7 @@ struct sisl_global_regs {
 #define SISL_ASTATUS_FC0_LOGO    0x4000ULL /* b49, target sent FLOGI/PLOGI/LOGO
 #define SISL_ASTATUS_FC0_LOGO    0x4000ULL /* b49, target sent FLOGI/PLOGI/LOGO
 						   while logged in */
 						   while logged in */
 #define SISL_ASTATUS_FC0_CRC_T   0x2000ULL /* b50, CRC threshold exceeded */
 #define SISL_ASTATUS_FC0_CRC_T   0x2000ULL /* b50, CRC threshold exceeded */
-#define SISL_ASTATUS_FC0_LOGI_R  0x1000ULL /* b51, login state mechine timed out
+#define SISL_ASTATUS_FC0_LOGI_R  0x1000ULL /* b51, login state machine timed out
 						   and retrying */
 						   and retrying */
 #define SISL_ASTATUS_FC0_LOGI_F  0x0800ULL /* b52, login failed,
 #define SISL_ASTATUS_FC0_LOGI_F  0x0800ULL /* b52, login failed,
 					      FC_ERROR[19:0] */
 					      FC_ERROR[19:0] */
@@ -340,7 +340,7 @@ struct sisl_global_regs {
 #define SISL_AFUCONF_MBOX_CLR_READ     0x0010ULL
 #define SISL_AFUCONF_MBOX_CLR_READ     0x0010ULL
 	__be64 afu_config;
 	__be64 afu_config;
 	__be64 rsvd[0xf8];
 	__be64 rsvd[0xf8];
-	__be64 afu_version;
+	__le64 afu_version;
 	__be64 interface_version;
 	__be64 interface_version;
 };
 };
 
 

+ 134 - 75
drivers/scsi/cxlflash/superpipe.c

@@ -76,7 +76,7 @@ void cxlflash_free_errpage(void)
  *
  *
  * When the host needs to go down, all users must be quiesced and their
  * When the host needs to go down, all users must be quiesced and their
  * memory freed. This is accomplished by putting the contexts in error
  * memory freed. This is accomplished by putting the contexts in error
- * state which will notify the user and let them 'drive' the tear-down.
+ * state which will notify the user and let them 'drive' the tear down.
  * Meanwhile, this routine camps until all user contexts have been removed.
  * Meanwhile, this routine camps until all user contexts have been removed.
  */
  */
 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
 void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
@@ -100,7 +100,7 @@ void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
 
 
 		dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
 		dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
 			__func__);
 			__func__);
-		wake_up_all(&cfg->limbo_waitq);
+		wake_up_all(&cfg->reset_waitq);
 		ssleep(1);
 		ssleep(1);
 	}
 	}
 }
 }
@@ -162,10 +162,7 @@ struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
 
 
 	if (likely(ctxid < MAX_CONTEXT)) {
 	if (likely(ctxid < MAX_CONTEXT)) {
 		while (true) {
 		while (true) {
-			rc = mutex_lock_interruptible(&cfg->ctx_tbl_list_mutex);
-			if (rc)
-				goto out;
-
+			mutex_lock(&cfg->ctx_tbl_list_mutex);
 			ctxi = cfg->ctx_tbl[ctxid];
 			ctxi = cfg->ctx_tbl[ctxid];
 			if (ctxi)
 			if (ctxi)
 				if ((file && (ctxi->file != file)) ||
 				if ((file && (ctxi->file != file)) ||
@@ -253,7 +250,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
 {
 {
 	struct device *dev = &cfg->dev->dev;
 	struct device *dev = &cfg->dev->dev;
 	struct afu *afu = cfg->afu;
 	struct afu *afu = cfg->afu;
-	struct sisl_ctrl_map *ctrl_map = ctxi->ctrl_map;
+	struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map;
 	int rc = 0;
 	int rc = 0;
 	u64 val;
 	u64 val;
 
 
@@ -283,6 +280,24 @@ out:
  * @sdev:	SCSI device associated with LUN.
  * @sdev:	SCSI device associated with LUN.
  * @lli:	LUN destined for capacity request.
  * @lli:	LUN destined for capacity request.
  *
  *
+ * The READ_CAP16 can take quite a while to complete. Should an EEH occur while
+ * in scsi_execute(), the EEH handler will attempt to recover. As part of the
+ * recovery, the handler drains all currently running ioctls, waiting until they
+ * have completed before proceeding with a reset. As this routine is used on the
+ * ioctl path, this can create a condition where the EEH handler becomes stuck,
+ * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily
+ * unmark this thread as an ioctl thread by releasing the ioctl read semaphore.
+ * This will allow the EEH handler to proceed with a recovery while this thread
+ * is still running. Once the scsi_execute() returns, reacquire the ioctl read
+ * semaphore and check the adapter state in case it changed while inside of
+ * scsi_execute(). The state check will wait if the adapter is still being
+ * recovered or return a failure if the recovery failed. In the event that the
+ * adapter reset failed, simply return the failure as the ioctl would be unable
+ * to continue.
+ *
+ * Note that the above puts a requirement on this routine to only be called on
+ * an ioctl thread.
+ *
  * Return: 0 on success, -errno on failure
  * Return: 0 on success, -errno on failure
  */
  */
 static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
 static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
@@ -296,7 +311,7 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
 	int rc = 0;
 	int rc = 0;
 	int result = 0;
 	int result = 0;
 	int retry_cnt = 0;
 	int retry_cnt = 0;
-	u32 tout = (MC_DISCOVERY_TIMEOUT * HZ);
+	u32 to = CMD_TIMEOUT * HZ;
 
 
 retry:
 retry:
 	cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
 	cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
@@ -314,8 +329,18 @@ retry:
 	dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__,
 	dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__,
 		retry_cnt ? "re" : "", scsi_cmd[0]);
 		retry_cnt ? "re" : "", scsi_cmd[0]);
 
 
+	/* Drop the ioctl read semahpore across lengthy call */
+	up_read(&cfg->ioctl_rwsem);
 	result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
 	result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
-			      CMD_BUFSIZE, sense_buf, tout, 5, 0, NULL);
+			      CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL);
+	down_read(&cfg->ioctl_rwsem);
+	rc = check_state(cfg);
+	if (rc) {
+		dev_err(dev, "%s: Failed state! result=0x08%X\n",
+			__func__, result);
+		rc = -ENODEV;
+		goto out;
+	}
 
 
 	if (driver_byte(result) == DRIVER_SENSE) {
 	if (driver_byte(result) == DRIVER_SENSE) {
 		result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
 		result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
@@ -365,8 +390,8 @@ retry:
 	 * as the buffer is allocated on an aligned boundary.
 	 * as the buffer is allocated on an aligned boundary.
 	 */
 	 */
 	mutex_lock(&gli->mutex);
 	mutex_lock(&gli->mutex);
-	gli->max_lba = be64_to_cpu(*((u64 *)&cmd_buf[0]));
-	gli->blk_len = be32_to_cpu(*((u32 *)&cmd_buf[8]));
+	gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0]));
+	gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8]));
 	mutex_unlock(&gli->mutex);
 	mutex_unlock(&gli->mutex);
 
 
 out:
 out:
@@ -712,7 +737,6 @@ static void destroy_context(struct cxlflash_cfg *cfg,
 	kfree(ctxi->rht_needs_ws);
 	kfree(ctxi->rht_needs_ws);
 	kfree(ctxi->rht_lun);
 	kfree(ctxi->rht_lun);
 	kfree(ctxi);
 	kfree(ctxi);
-	atomic_dec_if_positive(&cfg->num_user_contexts);
 }
 }
 
 
 /**
 /**
@@ -737,7 +761,7 @@ static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
 	struct afu *afu = cfg->afu;
 	struct afu *afu = cfg->afu;
 	struct ctx_info *ctxi = NULL;
 	struct ctx_info *ctxi = NULL;
 	struct llun_info **lli = NULL;
 	struct llun_info **lli = NULL;
-	bool *ws = NULL;
+	u8 *ws = NULL;
 	struct sisl_rht_entry *rhte;
 	struct sisl_rht_entry *rhte;
 
 
 	ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
 	ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
@@ -769,7 +793,6 @@ static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
 	INIT_LIST_HEAD(&ctxi->luns);
 	INIT_LIST_HEAD(&ctxi->luns);
 	INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
 	INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
 
 
-	atomic_inc(&cfg->num_user_contexts);
 	mutex_lock(&ctxi->mutex);
 	mutex_lock(&ctxi->mutex);
 out:
 out:
 	return ctxi;
 	return ctxi;
@@ -880,6 +903,9 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
 			sys_close(lfd);
 			sys_close(lfd);
 	}
 	}
 
 
+	/* Release the sdev reference that bound this LUN to the context */
+	scsi_device_put(sdev);
+
 out:
 out:
 	if (put_ctx)
 	if (put_ctx)
 		put_context(ctxi);
 		put_context(ctxi);
@@ -1161,10 +1187,7 @@ out:
 	return rc;
 	return rc;
 }
 }
 
 
-/*
- * Local fops for adapter file descriptor
- */
-static const struct file_operations cxlflash_cxl_fops = {
+const struct file_operations cxlflash_cxl_fops = {
 	.owner = THIS_MODULE,
 	.owner = THIS_MODULE,
 	.mmap = cxlflash_cxl_mmap,
 	.mmap = cxlflash_cxl_mmap,
 	.release = cxlflash_cxl_release,
 	.release = cxlflash_cxl_release,
@@ -1210,6 +1233,46 @@ static const struct file_operations null_fops = {
 	.owner = THIS_MODULE,
 	.owner = THIS_MODULE,
 };
 };
 
 
+/**
+ * check_state() - checks and responds to the current adapter state
+ * @cfg:	Internal structure associated with the host.
+ *
+ * This routine can block and should only be used on process context.
+ * It assumes that the caller is an ioctl thread and holding the ioctl
+ * read semaphore. This is temporarily let up across the wait to allow
+ * for draining actively running ioctls. Also note that when waking up
+ * from waiting in reset, the state is unknown and must be checked again
+ * before proceeding.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int check_state(struct cxlflash_cfg *cfg)
+{
+	struct device *dev = &cfg->dev->dev;
+	int rc = 0;
+
+retry:
+	switch (cfg->state) {
+	case STATE_RESET:
+		dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__);
+		up_read(&cfg->ioctl_rwsem);
+		rc = wait_event_interruptible(cfg->reset_waitq,
+					      cfg->state != STATE_RESET);
+		down_read(&cfg->ioctl_rwsem);
+		if (unlikely(rc))
+			break;
+		goto retry;
+	case STATE_FAILTERM:
+		dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
+		rc = -ENODEV;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
 /**
 /**
  * cxlflash_disk_attach() - attach a LUN to a context
  * cxlflash_disk_attach() - attach a LUN to a context
  * @sdev:	SCSI device associated with LUN.
  * @sdev:	SCSI device associated with LUN.
@@ -1243,10 +1306,6 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 
 
 	int fd = -1;
 	int fd = -1;
 
 
-	/* On first attach set fileops */
-	if (atomic_read(&cfg->num_user_contexts) == 0)
-		cfg->cxl_fops = cxlflash_cxl_fops;
-
 	if (attach->num_interrupts > 4) {
 	if (attach->num_interrupts > 4) {
 		dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
 		dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
 			__func__, attach->num_interrupts);
 			__func__, attach->num_interrupts);
@@ -1287,11 +1346,17 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 			}
 			}
 	}
 	}
 
 
+	rc = scsi_device_get(sdev);
+	if (unlikely(rc)) {
+		dev_err(dev, "%s: Unable to get sdev reference!\n", __func__);
+		goto out;
+	}
+
 	lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
 	lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
 	if (unlikely(!lun_access)) {
 	if (unlikely(!lun_access)) {
 		dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
 		dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
 		rc = -ENOMEM;
 		rc = -ENOMEM;
-		goto out;
+		goto err0;
 	}
 	}
 
 
 	lun_access->lli = lli;
 	lun_access->lli = lli;
@@ -1311,21 +1376,21 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 		dev_err(dev, "%s: Could not initialize context %p\n",
 		dev_err(dev, "%s: Could not initialize context %p\n",
 			__func__, ctx);
 			__func__, ctx);
 		rc = -ENODEV;
 		rc = -ENODEV;
-		goto err0;
+		goto err1;
 	}
 	}
 
 
 	ctxid = cxl_process_element(ctx);
 	ctxid = cxl_process_element(ctx);
 	if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
 	if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
 		dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
 		dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
 		rc = -EPERM;
 		rc = -EPERM;
-		goto err1;
+		goto err2;
 	}
 	}
 
 
 	file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
 	file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
 	if (unlikely(fd < 0)) {
 	if (unlikely(fd < 0)) {
 		rc = -ENODEV;
 		rc = -ENODEV;
 		dev_err(dev, "%s: Could not get file descriptor\n", __func__);
 		dev_err(dev, "%s: Could not get file descriptor\n", __func__);
-		goto err1;
+		goto err2;
 	}
 	}
 
 
 	/* Translate read/write O_* flags from fcntl.h to AFU permission bits */
 	/* Translate read/write O_* flags from fcntl.h to AFU permission bits */
@@ -1335,7 +1400,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 	if (unlikely(!ctxi)) {
 	if (unlikely(!ctxi)) {
 		dev_err(dev, "%s: Failed to create context! (%d)\n",
 		dev_err(dev, "%s: Failed to create context! (%d)\n",
 			__func__, ctxid);
 			__func__, ctxid);
-		goto err2;
+		goto err3;
 	}
 	}
 
 
 	work = &ctxi->work;
 	work = &ctxi->work;
@@ -1346,13 +1411,13 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 	if (unlikely(rc)) {
 	if (unlikely(rc)) {
 		dev_dbg(dev, "%s: Could not start context rc=%d\n",
 		dev_dbg(dev, "%s: Could not start context rc=%d\n",
 			__func__, rc);
 			__func__, rc);
-		goto err3;
+		goto err4;
 	}
 	}
 
 
 	rc = afu_attach(cfg, ctxi);
 	rc = afu_attach(cfg, ctxi);
 	if (unlikely(rc)) {
 	if (unlikely(rc)) {
 		dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
 		dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
-		goto err4;
+		goto err5;
 	}
 	}
 
 
 	/*
 	/*
@@ -1375,7 +1440,8 @@ out_attach:
 	attach->block_size = gli->blk_len;
 	attach->block_size = gli->blk_len;
 	attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
 	attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
 	attach->last_lba = gli->max_lba;
 	attach->last_lba = gli->max_lba;
-	attach->max_xfer = (sdev->host->max_sectors * 512) / gli->blk_len;
+	attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT;
+	attach->max_xfer /= gli->blk_len;
 
 
 out:
 out:
 	attach->adap_fd = fd;
 	attach->adap_fd = fd;
@@ -1387,13 +1453,13 @@ out:
 		__func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
 		__func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
 	return rc;
 	return rc;
 
 
-err4:
+err5:
 	cxl_stop_context(ctx);
 	cxl_stop_context(ctx);
-err3:
+err4:
 	put_context(ctxi);
 	put_context(ctxi);
 	destroy_context(cfg, ctxi);
 	destroy_context(cfg, ctxi);
 	ctxi = NULL;
 	ctxi = NULL;
-err2:
+err3:
 	/*
 	/*
 	 * Here, we're overriding the fops with a dummy all-NULL fops because
 	 * Here, we're overriding the fops with a dummy all-NULL fops because
 	 * fput() calls the release fop, which will cause us to mistakenly
 	 * fput() calls the release fop, which will cause us to mistakenly
@@ -1405,10 +1471,12 @@ err2:
 	fput(file);
 	fput(file);
 	put_unused_fd(fd);
 	put_unused_fd(fd);
 	fd = -1;
 	fd = -1;
-err1:
+err2:
 	cxl_release_context(ctx);
 	cxl_release_context(ctx);
-err0:
+err1:
 	kfree(lun_access);
 	kfree(lun_access);
+err0:
+	scsi_device_put(sdev);
 	goto out;
 	goto out;
 }
 }
 
 
@@ -1510,41 +1578,6 @@ err1:
 	goto out;
 	goto out;
 }
 }
 
 
-/**
- * check_state() - checks and responds to the current adapter state
- * @cfg:	Internal structure associated with the host.
- *
- * This routine can block and should only be used on process context.
- * Note that when waking up from waiting in limbo, the state is unknown
- * and must be checked again before proceeding.
- *
- * Return: 0 on success, -errno on failure
- */
-static int check_state(struct cxlflash_cfg *cfg)
-{
-	struct device *dev = &cfg->dev->dev;
-	int rc = 0;
-
-retry:
-	switch (cfg->state) {
-	case STATE_LIMBO:
-		dev_dbg(dev, "%s: Limbo, going to wait...\n", __func__);
-		rc = wait_event_interruptible(cfg->limbo_waitq,
-					      cfg->state != STATE_LIMBO);
-		if (unlikely(rc))
-			break;
-		goto retry;
-	case STATE_FAILTERM:
-		dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
-		rc = -ENODEV;
-		break;
-	default:
-		break;
-	}
-
-	return rc;
-}
-
 /**
 /**
  * cxlflash_afu_recover() - initiates AFU recovery
  * cxlflash_afu_recover() - initiates AFU recovery
  * @sdev:	SCSI device associated with LUN.
  * @sdev:	SCSI device associated with LUN.
@@ -1561,10 +1594,10 @@ retry:
  * quite possible for this routine to act as the kernel's EEH detection
  * quite possible for this routine to act as the kernel's EEH detection
  * source (MMIO read of mbox_r). Because of this, there is a window of
  * source (MMIO read of mbox_r). Because of this, there is a window of
  * time where an EEH might have been detected but not yet 'serviced'
  * time where an EEH might have been detected but not yet 'serviced'
- * (callback invoked, causing the device to enter limbo state). To avoid
+ * (callback invoked, causing the device to enter reset state). To avoid
  * looping in this routine during that window, a 1 second sleep is in place
  * looping in this routine during that window, a 1 second sleep is in place
  * between the time the MMIO failure is detected and the time a wait on the
  * between the time the MMIO failure is detected and the time a wait on the
- * limbo wait queue is attempted via check_state().
+ * reset wait queue is attempted via check_state().
  *
  *
  * Return: 0 on success, -errno on failure
  * Return: 0 on success, -errno on failure
  */
  */
@@ -1634,9 +1667,14 @@ retry_recover:
 	/* Test if in error state */
 	/* Test if in error state */
 	reg = readq_be(&afu->ctrl_map->mbox_r);
 	reg = readq_be(&afu->ctrl_map->mbox_r);
 	if (reg == -1) {
 	if (reg == -1) {
-		dev_dbg(dev, "%s: MMIO read fail! Wait for recovery...\n",
-			__func__);
-		mutex_unlock(&ctxi->mutex);
+		dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__);
+
+		/*
+		 * Before checking the state, put back the context obtained with
+		 * get_context() as it is no longer needed and sleep for a short
+		 * period of time (see prolog notes).
+		 */
+		put_context(ctxi);
 		ctxi = NULL;
 		ctxi = NULL;
 		ssleep(1);
 		ssleep(1);
 		rc = check_state(cfg);
 		rc = check_state(cfg);
@@ -1765,12 +1803,21 @@ static int cxlflash_disk_verify(struct scsi_device *sdev,
 	 * inquiry (i.e. the Unit attention is due to the WWN changing).
 	 * inquiry (i.e. the Unit attention is due to the WWN changing).
 	 */
 	 */
 	if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
 	if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
+		/* Can't hold mutex across process_sense/read_cap16,
+		 * since we could have an intervening EEH event.
+		 */
+		ctxi->unavail = true;
+		mutex_unlock(&ctxi->mutex);
 		rc = process_sense(sdev, verify);
 		rc = process_sense(sdev, verify);
 		if (unlikely(rc)) {
 		if (unlikely(rc)) {
 			dev_err(dev, "%s: Failed to validate sense data (%d)\n",
 			dev_err(dev, "%s: Failed to validate sense data (%d)\n",
 				__func__, rc);
 				__func__, rc);
+			mutex_lock(&ctxi->mutex);
+			ctxi->unavail = false;
 			goto out;
 			goto out;
 		}
 		}
+		mutex_lock(&ctxi->mutex);
+		ctxi->unavail = false;
 	}
 	}
 
 
 	switch (gli->mode) {
 	switch (gli->mode) {
@@ -1955,6 +2002,14 @@ out:
  * @cmd:	IOCTL command.
  * @cmd:	IOCTL command.
  * @arg:	Userspace ioctl data structure.
  * @arg:	Userspace ioctl data structure.
  *
  *
+ * A read/write semaphore is used to implement a 'drain' of currently
+ * running ioctls. The read semaphore is taken at the beginning of each
+ * ioctl thread and released upon concluding execution. Additionally the
+ * semaphore should be released and then reacquired in any ioctl execution
+ * path which will wait for an event to occur that is outside the scope of
+ * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
+ * a thread simply needs to acquire the write semaphore.
+ *
  * Return: 0 on success, -errno on failure
  * Return: 0 on success, -errno on failure
  */
  */
 int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
 int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
@@ -1989,6 +2044,9 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
 	{sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
 	{sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
 	};
 	};
 
 
+	/* Hold read semaphore so we can drain if needed */
+	down_read(&cfg->ioctl_rwsem);
+
 	/* Restrict command set to physical support only for internal LUN */
 	/* Restrict command set to physical support only for internal LUN */
 	if (afu->internal_lun)
 	if (afu->internal_lun)
 		switch (cmd) {
 		switch (cmd) {
@@ -2070,6 +2128,7 @@ int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
 	/* fall through to exit */
 	/* fall through to exit */
 
 
 cxlflash_ioctl_exit:
 cxlflash_ioctl_exit:
+	up_read(&cfg->ioctl_rwsem);
 	if (unlikely(rc && known_ioctl))
 	if (unlikely(rc && known_ioctl))
 		dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
 		dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
 			"returned rc %d\n", __func__,
 			"returned rc %d\n", __func__,

+ 9 - 5
drivers/scsi/cxlflash/superpipe.h

@@ -28,7 +28,10 @@ extern struct cxlflash_global global;
 */
 */
 #define MC_CHUNK_SIZE     (1 << MC_RHT_NMASK)	/* in LBAs */
 #define MC_CHUNK_SIZE     (1 << MC_RHT_NMASK)	/* in LBAs */
 
 
-#define MC_DISCOVERY_TIMEOUT 5  /* 5 secs */
+#define CMD_TIMEOUT 30  /* 30 secs */
+#define CMD_RETRIES 5   /* 5 retries for scsi_execute */
+
+#define MAX_SECTOR_UNIT  512 /* max_sector is in 512 byte multiples */
 
 
 #define CHAN2PORT(_x)	((_x) + 1)
 #define CHAN2PORT(_x)	((_x) + 1)
 #define PORT2CHAN(_x)	((_x) - 1)
 #define PORT2CHAN(_x)	((_x) - 1)
@@ -60,7 +63,6 @@ struct llun_info {
 	u32 lun_index;		/* Index in the LUN table */
 	u32 lun_index;		/* Index in the LUN table */
 	u32 host_no;		/* host_no from Scsi_host */
 	u32 host_no;		/* host_no from Scsi_host */
 	u32 port_sel;		/* What port to use for this LUN */
 	u32 port_sel;		/* What port to use for this LUN */
-	bool newly_created;	/* Whether the LUN was just discovered */
 	bool in_table;		/* Whether a LUN table entry was created */
 	bool in_table;		/* Whether a LUN table entry was created */
 
 
 	u8 wwid[16];		/* Keep a duplicate copy here? */
 	u8 wwid[16];		/* Keep a duplicate copy here? */
@@ -84,17 +86,17 @@ enum ctx_ctrl {
 	CTX_CTRL_FILE		= (1 << 5)
 	CTX_CTRL_FILE		= (1 << 5)
 };
 };
 
 
-#define ENCODE_CTXID(_ctx, _id)	(((((u64)_ctx) & 0xFFFFFFFF0) << 28) | _id)
+#define ENCODE_CTXID(_ctx, _id)	(((((u64)_ctx) & 0xFFFFFFFF0ULL) << 28) | _id)
 #define DECODE_CTXID(_val)	(_val & 0xFFFFFFFF)
 #define DECODE_CTXID(_val)	(_val & 0xFFFFFFFF)
 
 
 struct ctx_info {
 struct ctx_info {
-	struct sisl_ctrl_map *ctrl_map; /* initialized at startup */
+	struct sisl_ctrl_map __iomem *ctrl_map; /* initialized at startup */
 	struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment),
 	struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment),
 					     alloc/free on attach/detach */
 					     alloc/free on attach/detach */
 	u32 rht_out;		/* Number of checked out RHT entries */
 	u32 rht_out;		/* Number of checked out RHT entries */
 	u32 rht_perms;		/* User-defined permissions for RHT entries */
 	u32 rht_perms;		/* User-defined permissions for RHT entries */
 	struct llun_info **rht_lun;       /* Mapping of RHT entries to LUNs */
 	struct llun_info **rht_lun;       /* Mapping of RHT entries to LUNs */
-	bool *rht_needs_ws;	/* User-desired write-same function per RHTE */
+	u8 *rht_needs_ws;	/* User-desired write-same function per RHTE */
 
 
 	struct cxl_ioctl_start_work work;
 	struct cxl_ioctl_start_work work;
 	u64 ctxid;
 	u64 ctxid;
@@ -144,4 +146,6 @@ void cxlflash_ba_terminate(struct ba_lun *);
 
 
 int cxlflash_manage_lun(struct scsi_device *, struct dk_cxlflash_manage_lun *);
 int cxlflash_manage_lun(struct scsi_device *, struct dk_cxlflash_manage_lun *);
 
 
+int check_state(struct cxlflash_cfg *);
+
 #endif /* ifndef _CXLFLASH_SUPERPIPE_H */
 #endif /* ifndef _CXLFLASH_SUPERPIPE_H */

+ 49 - 19
drivers/scsi/cxlflash/vlun.c

@@ -132,7 +132,7 @@ static int ba_init(struct ba_lun *ba_lun)
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
-	/* Pass the allocated lun info as a handle to the user */
+	/* Pass the allocated LUN info as a handle to the user */
 	ba_lun->ba_lun_handle = bali;
 	ba_lun->ba_lun_handle = bali;
 
 
 	pr_debug("%s: Successfully initialized the LUN: "
 	pr_debug("%s: Successfully initialized the LUN: "
@@ -165,7 +165,7 @@ static int find_free_range(u32 low,
 			num_bits = (sizeof(*lam) * BITS_PER_BYTE);
 			num_bits = (sizeof(*lam) * BITS_PER_BYTE);
 			bit_pos = find_first_bit(lam, num_bits);
 			bit_pos = find_first_bit(lam, num_bits);
 
 
-			pr_devel("%s: Found free bit %llX in lun "
+			pr_devel("%s: Found free bit %llX in LUN "
 				 "map entry %llX at bitmap index = %X\n",
 				 "map entry %llX at bitmap index = %X\n",
 				 __func__, bit_pos, bali->lun_alloc_map[i],
 				 __func__, bit_pos, bali->lun_alloc_map[i],
 				 i);
 				 i);
@@ -400,6 +400,24 @@ static int init_vlun(struct llun_info *lli)
  * @lba:	Logical block address to start write same.
  * @lba:	Logical block address to start write same.
  * @nblks:	Number of logical blocks to write same.
  * @nblks:	Number of logical blocks to write same.
  *
  *
+ * The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur
+ * while in scsi_execute(), the EEH handler will attempt to recover. As part of
+ * the recovery, the handler drains all currently running ioctls, waiting until
+ * they have completed before proceeding with a reset. As this routine is used
+ * on the ioctl path, this can create a condition where the EEH handler becomes
+ * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
+ * temporarily unmark this thread as an ioctl thread by releasing the ioctl read
+ * semaphore. This will allow the EEH handler to proceed with a recovery while
+ * this thread is still running. Once the scsi_execute() returns, reacquire the
+ * ioctl read semaphore and check the adapter state in case it changed while
+ * inside of scsi_execute(). The state check will wait if the adapter is still
+ * being recovered or return a failure if the recovery failed. In the event that
+ * the adapter reset failed, simply return the failure as the ioctl would be
+ * unable to continue.
+ *
+ * Note that the above puts a requirement on this routine to only be called on
+ * an ioctl thread.
+ *
  * Return: 0 on success, -errno on failure
  * Return: 0 on success, -errno on failure
  */
  */
 static int write_same16(struct scsi_device *sdev,
 static int write_same16(struct scsi_device *sdev,
@@ -414,7 +432,7 @@ static int write_same16(struct scsi_device *sdev,
 	int ws_limit = SISLITE_MAX_WS_BLOCKS;
 	int ws_limit = SISLITE_MAX_WS_BLOCKS;
 	u64 offset = lba;
 	u64 offset = lba;
 	int left = nblks;
 	int left = nblks;
-	u32 tout = sdev->request_queue->rq_timeout;
+	u32 to = sdev->request_queue->rq_timeout;
 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
 	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
 	struct device *dev = &cfg->dev->dev;
 	struct device *dev = &cfg->dev->dev;
 
 
@@ -433,8 +451,20 @@ static int write_same16(struct scsi_device *sdev,
 		put_unaligned_be32(ws_limit < left ? ws_limit : left,
 		put_unaligned_be32(ws_limit < left ? ws_limit : left,
 				   &scsi_cmd[10]);
 				   &scsi_cmd[10]);
 
 
+		/* Drop the ioctl read semahpore across lengthy call */
+		up_read(&cfg->ioctl_rwsem);
 		result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
 		result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
-				      CMD_BUFSIZE, sense_buf, tout, 5, 0, NULL);
+				      CMD_BUFSIZE, sense_buf, to, CMD_RETRIES,
+				      0, NULL);
+		down_read(&cfg->ioctl_rwsem);
+		rc = check_state(cfg);
+		if (rc) {
+			dev_err(dev, "%s: Failed state! result=0x08%X\n",
+				__func__, result);
+			rc = -ENODEV;
+			goto out;
+		}
+
 		if (result) {
 		if (result) {
 			dev_err_ratelimited(dev, "%s: command failed for "
 			dev_err_ratelimited(dev, "%s: command failed for "
 					    "offset %lld result=0x%x\n",
 					    "offset %lld result=0x%x\n",
@@ -681,14 +711,14 @@ out:
 }
 }
 
 
 /**
 /**
- * _cxlflash_vlun_resize() - changes the size of a virtual lun
+ * _cxlflash_vlun_resize() - changes the size of a virtual LUN
  * @sdev:	SCSI device associated with LUN owning virtual LUN.
  * @sdev:	SCSI device associated with LUN owning virtual LUN.
  * @ctxi:	Context owning resources.
  * @ctxi:	Context owning resources.
  * @resize:	Resize ioctl data structure.
  * @resize:	Resize ioctl data structure.
  *
  *
  * On successful return, the user is informed of the new size (in blocks)
  * On successful return, the user is informed of the new size (in blocks)
- * of the virtual lun in last LBA format. When the size of the virtual
- * lun is zero, the last LBA is reflected as -1. See comment in the
+ * of the virtual LUN in last LBA format. When the size of the virtual
+ * LUN is zero, the last LBA is reflected as -1. See comment in the
  * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
  * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
  * on the error recovery list.
  * on the error recovery list.
  *
  *
@@ -785,7 +815,7 @@ void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
 	u32 chan;
 	u32 chan;
 	u32 lind;
 	u32 lind;
 	struct afu *afu = cfg->afu;
 	struct afu *afu = cfg->afu;
-	struct sisl_global_map *agm = &afu->afu_map->global;
+	struct sisl_global_map __iomem *agm = &afu->afu_map->global;
 
 
 	mutex_lock(&global.mutex);
 	mutex_lock(&global.mutex);
 
 
@@ -830,7 +860,7 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
 	u32 lind;
 	u32 lind;
 	int rc = 0;
 	int rc = 0;
 	struct afu *afu = cfg->afu;
 	struct afu *afu = cfg->afu;
-	struct sisl_global_map *agm = &afu->afu_map->global;
+	struct sisl_global_map __iomem *agm = &afu->afu_map->global;
 
 
 	mutex_lock(&global.mutex);
 	mutex_lock(&global.mutex);
 
 
@@ -885,8 +915,8 @@ out:
  * @arg:	UVirtual ioctl data structure.
  * @arg:	UVirtual ioctl data structure.
  *
  *
  * On successful return, the user is informed of the resource handle
  * On successful return, the user is informed of the resource handle
- * to be used to identify the virtual lun and the size (in blocks) of
- * the virtual lun in last LBA format. When the size of the virtual lun
+ * to be used to identify the virtual LUN and the size (in blocks) of
+ * the virtual LUN in last LBA format. When the size of the virtual LUN
  * is zero, the last LBA is reflected as -1.
  * is zero, the last LBA is reflected as -1.
  *
  *
  * Return: 0 on success, -errno on failure
  * Return: 0 on success, -errno on failure
@@ -914,16 +944,9 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
 
 
 	pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
 	pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
 
 
+	/* Setup the LUNs block allocator on first call */
 	mutex_lock(&gli->mutex);
 	mutex_lock(&gli->mutex);
 	if (gli->mode == MODE_NONE) {
 	if (gli->mode == MODE_NONE) {
-		/* Setup the LUN table and block allocator on first call */
-		rc = init_luntable(cfg, lli);
-		if (rc) {
-			dev_err(dev, "%s: call to init_luntable failed "
-				"rc=%d!\n", __func__, rc);
-			goto err0;
-		}
-
 		rc = init_vlun(lli);
 		rc = init_vlun(lli);
 		if (rc) {
 		if (rc) {
 			dev_err(dev, "%s: call to init_vlun failed rc=%d!\n",
 			dev_err(dev, "%s: call to init_vlun failed rc=%d!\n",
@@ -941,6 +964,13 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
 	}
 	}
 	mutex_unlock(&gli->mutex);
 	mutex_unlock(&gli->mutex);
 
 
+	rc = init_luntable(cfg, lli);
+	if (rc) {
+		dev_err(dev, "%s: call to init_luntable failed rc=%d!\n",
+			__func__, rc);
+		goto err1;
+	}
+
 	ctxi = get_context(cfg, rctxid, lli, 0);
 	ctxi = get_context(cfg, rctxid, lli, 0);
 	if (unlikely(!ctxi)) {
 	if (unlikely(!ctxi)) {
 		dev_err(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
 		dev_err(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);

+ 38 - 8
drivers/scsi/fnic/fnic_fcs.c

@@ -939,6 +939,7 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
 	struct sk_buff *skb;
 	struct sk_buff *skb;
 	u16 len;
 	u16 len;
 	dma_addr_t pa;
 	dma_addr_t pa;
+	int r;
 
 
 	len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
 	len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
 	skb = dev_alloc_skb(len);
 	skb = dev_alloc_skb(len);
@@ -952,8 +953,19 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
 	skb_reset_network_header(skb);
 	skb_reset_network_header(skb);
 	skb_put(skb, len);
 	skb_put(skb, len);
 	pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
 	pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
+
+	r = pci_dma_mapping_error(fnic->pdev, pa);
+	if (r) {
+		printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+		goto free_skb;
+	}
+
 	fnic_queue_rq_desc(rq, skb, pa, len);
 	fnic_queue_rq_desc(rq, skb, pa, len);
 	return 0;
 	return 0;
+
+free_skb:
+	kfree_skb(skb);
+	return r;
 }
 }
 
 
 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
@@ -981,6 +993,7 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 	struct ethhdr *eth_hdr;
 	struct ethhdr *eth_hdr;
 	struct vlan_ethhdr *vlan_hdr;
 	struct vlan_ethhdr *vlan_hdr;
 	unsigned long flags;
 	unsigned long flags;
+	int r;
 
 
 	if (!fnic->vlan_hw_insert) {
 	if (!fnic->vlan_hw_insert) {
 		eth_hdr = (struct ethhdr *)skb_mac_header(skb);
 		eth_hdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1003,18 +1016,27 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 
 
 	pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
 	pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
 
 
-	spin_lock_irqsave(&fnic->wq_lock[0], flags);
-	if (!vnic_wq_desc_avail(wq)) {
-		pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
-		spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
-		kfree_skb(skb);
-		return;
+	r = pci_dma_mapping_error(fnic->pdev, pa);
+	if (r) {
+		printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+		goto free_skb;
 	}
 	}
 
 
+	spin_lock_irqsave(&fnic->wq_lock[0], flags);
+	if (!vnic_wq_desc_avail(wq))
+		goto irq_restore;
+
 	fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
 	fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
 			       0 /* hw inserts cos value */,
 			       0 /* hw inserts cos value */,
 			       fnic->vlan_id, 1);
 			       fnic->vlan_id, 1);
 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
+	return;
+
+irq_restore:
+	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
+	pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
+free_skb:
+	kfree_skb(skb);
 }
 }
 
 
 /*
 /*
@@ -1071,6 +1093,12 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
 
 
 	pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
 	pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
 
 
+	ret = pci_dma_mapping_error(fnic->pdev, pa);
+	if (ret) {
+		printk(KERN_ERR "DMA map failed with error %d\n", ret);
+		goto free_skb_on_err;
+	}
+
 	if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
 	if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
 				(char *)eth_hdr, tot_len)) != 0) {
 				(char *)eth_hdr, tot_len)) != 0) {
 		printk(KERN_ERR "fnic ctlr frame trace error!!!");
 		printk(KERN_ERR "fnic ctlr frame trace error!!!");
@@ -1082,15 +1110,17 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
 		pci_unmap_single(fnic->pdev, pa,
 		pci_unmap_single(fnic->pdev, pa,
 				 tot_len, PCI_DMA_TODEVICE);
 				 tot_len, PCI_DMA_TODEVICE);
 		ret = -1;
 		ret = -1;
-		goto fnic_send_frame_end;
+		goto irq_restore;
 	}
 	}
 
 
 	fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
 	fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
 			   0 /* hw inserts cos value */,
 			   0 /* hw inserts cos value */,
 			   fnic->vlan_id, 1, 1, 1);
 			   fnic->vlan_id, 1, 1, 1);
-fnic_send_frame_end:
+
+irq_restore:
 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
 	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
 
 
+free_skb_on_err:
 	if (ret)
 	if (ret)
 		dev_kfree_skb_any(fp_skb(fp));
 		dev_kfree_skb_any(fp_skb(fp));
 
 

+ 16 - 0
drivers/scsi/fnic/fnic_scsi.c

@@ -330,6 +330,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 	int flags;
 	int flags;
 	u8 exch_flags;
 	u8 exch_flags;
 	struct scsi_lun fc_lun;
 	struct scsi_lun fc_lun;
+	int r;
 
 
 	if (sg_count) {
 	if (sg_count) {
 		/* For each SGE, create a device desc entry */
 		/* For each SGE, create a device desc entry */
@@ -346,6 +347,12 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 			 io_req->sgl_list,
 			 io_req->sgl_list,
 			 sizeof(io_req->sgl_list[0]) * sg_count,
 			 sizeof(io_req->sgl_list[0]) * sg_count,
 			 PCI_DMA_TODEVICE);
 			 PCI_DMA_TODEVICE);
+
+		r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
+		if (r) {
+			printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+			return SCSI_MLQUEUE_HOST_BUSY;
+		}
 	}
 	}
 
 
 	io_req->sense_buf_pa = pci_map_single(fnic->pdev,
 	io_req->sense_buf_pa = pci_map_single(fnic->pdev,
@@ -353,6 +360,15 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
 					      SCSI_SENSE_BUFFERSIZE,
 					      SCSI_SENSE_BUFFERSIZE,
 					      PCI_DMA_FROMDEVICE);
 					      PCI_DMA_FROMDEVICE);
 
 
+	r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa);
+	if (r) {
+		pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+				sizeof(io_req->sgl_list[0]) * sg_count,
+				PCI_DMA_TODEVICE);
+		printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
 	int_to_scsilun(sc->device->lun, &fc_lun);
 	int_to_scsilun(sc->device->lun, &fc_lun);
 
 
 	/* Enqueue the descriptor in the Copy WQ */
 	/* Enqueue the descriptor in the Copy WQ */

+ 5 - 3
drivers/scsi/lpfc/lpfc.h

@@ -495,15 +495,17 @@ struct unsol_rcv_ct_ctx {
 #define LPFC_USER_LINK_SPEED_8G		8	/* 8 Gigabaud */
 #define LPFC_USER_LINK_SPEED_8G		8	/* 8 Gigabaud */
 #define LPFC_USER_LINK_SPEED_10G	10	/* 10 Gigabaud */
 #define LPFC_USER_LINK_SPEED_10G	10	/* 10 Gigabaud */
 #define LPFC_USER_LINK_SPEED_16G	16	/* 16 Gigabaud */
 #define LPFC_USER_LINK_SPEED_16G	16	/* 16 Gigabaud */
-#define LPFC_USER_LINK_SPEED_MAX	LPFC_USER_LINK_SPEED_16G
-#define LPFC_USER_LINK_SPEED_BITMAP ((1 << LPFC_USER_LINK_SPEED_16G) | \
+#define LPFC_USER_LINK_SPEED_32G	32	/* 32 Gigabaud */
+#define LPFC_USER_LINK_SPEED_MAX	LPFC_USER_LINK_SPEED_32G
+#define LPFC_USER_LINK_SPEED_BITMAP  ((1ULL << LPFC_USER_LINK_SPEED_32G) | \
+				     (1 << LPFC_USER_LINK_SPEED_16G) | \
 				     (1 << LPFC_USER_LINK_SPEED_10G) | \
 				     (1 << LPFC_USER_LINK_SPEED_10G) | \
 				     (1 << LPFC_USER_LINK_SPEED_8G) | \
 				     (1 << LPFC_USER_LINK_SPEED_8G) | \
 				     (1 << LPFC_USER_LINK_SPEED_4G) | \
 				     (1 << LPFC_USER_LINK_SPEED_4G) | \
 				     (1 << LPFC_USER_LINK_SPEED_2G) | \
 				     (1 << LPFC_USER_LINK_SPEED_2G) | \
 				     (1 << LPFC_USER_LINK_SPEED_1G) | \
 				     (1 << LPFC_USER_LINK_SPEED_1G) | \
 				     (1 << LPFC_USER_LINK_SPEED_AUTO))
 				     (1 << LPFC_USER_LINK_SPEED_AUTO))
-#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16"
+#define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16, 32"
 
 
 enum nemb_type {
 enum nemb_type {
 	nemb_mse = 1,
 	nemb_mse = 1,

+ 13 - 10
drivers/scsi/lpfc/lpfc_attr.c

@@ -1642,8 +1642,6 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
 	struct lpfc_hba   *phba = vport->phba;\
 	struct lpfc_hba   *phba = vport->phba;\
-	uint val = 0;\
-	val = phba->cfg_##attr;\
 	return snprintf(buf, PAGE_SIZE, "%d\n",\
 	return snprintf(buf, PAGE_SIZE, "%d\n",\
 			phba->cfg_##attr);\
 			phba->cfg_##attr);\
 }
 }
@@ -1808,8 +1806,6 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
 { \
 { \
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
-	uint val = 0;\
-	val = vport->cfg_##attr;\
 	return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
 	return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_##attr);\
 }
 }
 
 
@@ -1835,8 +1831,6 @@ lpfc_##attr##_show(struct device *dev, struct device_attribute *attr, \
 { \
 { \
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct Scsi_Host  *shost = class_to_shost(dev);\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;\
-	uint val = 0;\
-	val = vport->cfg_##attr;\
 	return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
 	return snprintf(buf, PAGE_SIZE, "%#x\n", vport->cfg_##attr);\
 }
 }
 
 
@@ -3282,15 +3276,20 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
 
 
 	if (val >= 0 && val <= 6) {
 	if (val >= 0 && val <= 6) {
 		prev_val = phba->cfg_topology;
 		prev_val = phba->cfg_topology;
-		phba->cfg_topology = val;
 		if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
 		if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
 			val == 4) {
 			val == 4) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
 				"3113 Loop mode not supported at speed %d\n",
 				"3113 Loop mode not supported at speed %d\n",
-				phba->cfg_link_speed);
-			phba->cfg_topology = prev_val;
+				val);
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
+		if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
+			val == 4) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+				"3114 Loop mode not supported\n");
+			return -EINVAL;
+		}
+		phba->cfg_topology = val;
 		if (nolip)
 		if (nolip)
 			return strlen(buf);
 			return strlen(buf);
 
 
@@ -3731,7 +3730,8 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
 	    ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
 	    ((val == LPFC_USER_LINK_SPEED_4G) && !(phba->lmt & LMT_4Gb)) ||
 	    ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
 	    ((val == LPFC_USER_LINK_SPEED_8G) && !(phba->lmt & LMT_8Gb)) ||
 	    ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
 	    ((val == LPFC_USER_LINK_SPEED_10G) && !(phba->lmt & LMT_10Gb)) ||
-	    ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) {
+	    ((val == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb)) ||
+	    ((val == LPFC_USER_LINK_SPEED_32G) && !(phba->lmt & LMT_32Gb))) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"2879 lpfc_link_speed attribute cannot be set "
 				"2879 lpfc_link_speed attribute cannot be set "
 				"to %d. Speed is not supported by this port.\n",
 				"to %d. Speed is not supported by this port.\n",
@@ -5267,6 +5267,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
 		case LPFC_LINK_SPEED_16GHZ:
 		case LPFC_LINK_SPEED_16GHZ:
 			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
 			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
 			break;
 			break;
+		case LPFC_LINK_SPEED_32GHZ:
+			fc_host_speed(shost) = FC_PORTSPEED_32GBIT;
+			break;
 		default:
 		default:
 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 			break;
 			break;

+ 0 - 20
drivers/scsi/lpfc/lpfc_bsg.c

@@ -904,7 +904,6 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 {
 {
 	uint32_t evt_req_id = 0;
 	uint32_t evt_req_id = 0;
 	uint32_t cmd;
 	uint32_t cmd;
-	uint32_t len;
 	struct lpfc_dmabuf *dmabuf = NULL;
 	struct lpfc_dmabuf *dmabuf = NULL;
 	struct lpfc_bsg_event *evt;
 	struct lpfc_bsg_event *evt;
 	struct event_data *evt_dat = NULL;
 	struct event_data *evt_dat = NULL;
@@ -946,7 +945,6 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
 	ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
 	evt_req_id = ct_req->FsType;
 	evt_req_id = ct_req->FsType;
 	cmd = ct_req->CommandResponse.bits.CmdRsp;
 	cmd = ct_req->CommandResponse.bits.CmdRsp;
-	len = ct_req->CommandResponse.bits.Size;
 	if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
 	if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
 		lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
 		lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
 
 
@@ -2988,7 +2986,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 {
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_hba *phba = vport->phba;
-	struct diag_mode_test *diag_mode;
 	struct lpfc_bsg_event *evt;
 	struct lpfc_bsg_event *evt;
 	struct event_data *evdat;
 	struct event_data *evdat;
 	struct lpfc_sli *psli = &phba->sli;
 	struct lpfc_sli *psli = &phba->sli;
@@ -3031,8 +3028,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
 		rc = -EINVAL;
 		rc = -EINVAL;
 		goto loopback_test_exit;
 		goto loopback_test_exit;
 	}
 	}
-	diag_mode = (struct diag_mode_test *)
-		job->request->rqst_data.h_vendor.vendor_cmd;
 
 
 	if ((phba->link_state == LPFC_HBA_ERROR) ||
 	if ((phba->link_state == LPFC_HBA_ERROR) ||
 	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
 	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
@@ -3293,7 +3288,6 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
 {
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
 	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_hba *phba = vport->phba;
-	struct get_mgmt_rev *event_req;
 	struct get_mgmt_rev_reply *event_reply;
 	struct get_mgmt_rev_reply *event_reply;
 	int rc = 0;
 	int rc = 0;
 
 
@@ -3306,9 +3300,6 @@ lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
 		goto job_error;
 		goto job_error;
 	}
 	}
 
 
-	event_req = (struct get_mgmt_rev *)
-		job->request->rqst_data.h_vendor.vendor_cmd;
-
 	event_reply = (struct get_mgmt_rev_reply *)
 	event_reply = (struct get_mgmt_rev_reply *)
 		job->reply->reply_data.vendor_reply.vendor_rsp;
 		job->reply->reply_data.vendor_reply.vendor_rsp;
 
 
@@ -4348,7 +4339,6 @@ static int
 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
 			struct lpfc_dmabuf *dmabuf)
 			struct lpfc_dmabuf *dmabuf)
 {
 {
-	struct lpfc_sli_config_mbox *sli_cfg_mbx;
 	struct bsg_job_data *dd_data = NULL;
 	struct bsg_job_data *dd_data = NULL;
 	LPFC_MBOXQ_t *pmboxq = NULL;
 	LPFC_MBOXQ_t *pmboxq = NULL;
 	MAILBOX_t *pmb;
 	MAILBOX_t *pmb;
@@ -4362,9 +4352,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	phba->mbox_ext_buf_ctx.seqNum++;
 	phba->mbox_ext_buf_ctx.seqNum++;
 	nemb_tp = phba->mbox_ext_buf_ctx.nembType;
 	nemb_tp = phba->mbox_ext_buf_ctx.nembType;
 
 
-	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
-			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
-
 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
 	if (!dd_data) {
 	if (!dd_data) {
 		rc = -ENOMEM;
 		rc = -ENOMEM;
@@ -4606,7 +4593,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	uint32_t transmit_length, receive_length, mode;
 	uint32_t transmit_length, receive_length, mode;
 	struct lpfc_mbx_sli4_config *sli4_config;
 	struct lpfc_mbx_sli4_config *sli4_config;
 	struct lpfc_mbx_nembed_cmd *nembed_sge;
 	struct lpfc_mbx_nembed_cmd *nembed_sge;
-	struct mbox_header *header;
 	struct ulp_bde64 *bde;
 	struct ulp_bde64 *bde;
 	uint8_t *ext = NULL;
 	uint8_t *ext = NULL;
 	int rc = 0;
 	int rc = 0;
@@ -4804,8 +4790,6 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 				/* rebuild the command for sli4 using our
 				/* rebuild the command for sli4 using our
 				 * own buffers like we do for biu diags
 				 * own buffers like we do for biu diags
 				 */
 				 */
-				header = (struct mbox_header *)
-						&pmb->un.varWords[0];
 				nembed_sge = (struct lpfc_mbx_nembed_cmd *)
 				nembed_sge = (struct lpfc_mbx_nembed_cmd *)
 						&pmb->un.varWords[0];
 						&pmb->un.varWords[0];
 				receive_length = nembed_sge->sge[0].length;
 				receive_length = nembed_sge->sge[0].length;
@@ -5048,7 +5032,6 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
 	IOCB_t *cmd;
 	IOCB_t *cmd;
 	int rc = 0;
 	int rc = 0;
 	struct menlo_command *menlo_cmd;
 	struct menlo_command *menlo_cmd;
-	struct menlo_response *menlo_resp;
 	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
 	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
 	int request_nseg;
 	int request_nseg;
 	int reply_nseg;
 	int reply_nseg;
@@ -5088,9 +5071,6 @@ lpfc_menlo_cmd(struct fc_bsg_job *job)
 	menlo_cmd = (struct menlo_command *)
 	menlo_cmd = (struct menlo_command *)
 		job->request->rqst_data.h_vendor.vendor_cmd;
 		job->request->rqst_data.h_vendor.vendor_cmd;
 
 
-	menlo_resp = (struct menlo_response *)
-		job->reply->reply_data.vendor_reply.vendor_rsp;
-
 	/* allocate our bsg tracking structure */
 	/* allocate our bsg tracking structure */
 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
 	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
 	if (!dd_data) {
 	if (!dd_data) {

+ 6 - 5
drivers/scsi/lpfc/lpfc_ct.c

@@ -55,6 +55,7 @@
 #define HBA_PORTSPEED_10GBIT		0x0004	/* 10 GBit/sec */
 #define HBA_PORTSPEED_10GBIT		0x0004	/* 10 GBit/sec */
 #define HBA_PORTSPEED_8GBIT		0x0010	/* 8 GBit/sec */
 #define HBA_PORTSPEED_8GBIT		0x0010	/* 8 GBit/sec */
 #define HBA_PORTSPEED_16GBIT		0x0020	/* 16 GBit/sec */
 #define HBA_PORTSPEED_16GBIT		0x0020	/* 16 GBit/sec */
+#define HBA_PORTSPEED_32GBIT		0x0040  /* 32 GBit/sec */
 #define HBA_PORTSPEED_UNKNOWN		0x0800	/* Unknown */
 #define HBA_PORTSPEED_UNKNOWN		0x0800	/* Unknown */
 
 
 #define FOURBYTES	4
 #define FOURBYTES	4
@@ -575,7 +576,6 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	struct lpfc_vport *vport = cmdiocb->vport;
 	struct lpfc_vport *vport = cmdiocb->vport;
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	IOCB_t *irsp;
 	IOCB_t *irsp;
-	struct lpfc_dmabuf *bmp;
 	struct lpfc_dmabuf *outp;
 	struct lpfc_dmabuf *outp;
 	struct lpfc_sli_ct_request *CTrsp;
 	struct lpfc_sli_ct_request *CTrsp;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nodelist *ndlp;
@@ -588,7 +588,6 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	cmdiocb->context_un.rsp_iocb = rspiocb;
 	cmdiocb->context_un.rsp_iocb = rspiocb;
 
 
 	outp = (struct lpfc_dmabuf *) cmdiocb->context2;
 	outp = (struct lpfc_dmabuf *) cmdiocb->context2;
-	bmp = (struct lpfc_dmabuf *) cmdiocb->context3;
 	irsp = &rspiocb->iocb;
 	irsp = &rspiocb->iocb;
 
 
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
@@ -1733,12 +1732,9 @@ hba_out:
 	case SLI_MGMT_RPRT:
 	case SLI_MGMT_RPRT:
 	case SLI_MGMT_RPA:
 	case SLI_MGMT_RPA:
 		{
 		{
-			lpfc_vpd_t *vp;
 			struct serv_parm *hsp;
 			struct serv_parm *hsp;
 			int len = 0;
 			int len = 0;
 
 
-			vp = &phba->vpd;
-
 			if (cmdcode == SLI_MGMT_RPRT) {
 			if (cmdcode == SLI_MGMT_RPRT) {
 				rh = (struct lpfc_fdmi_reg_hba *)
 				rh = (struct lpfc_fdmi_reg_hba *)
 					&CtReq->un.PortID;
 					&CtReq->un.PortID;
@@ -1778,6 +1774,8 @@ hba_out:
 			ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED);
 			ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_SPEED);
 			ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
 			ad->AttrLen = cpu_to_be16(FOURBYTES + 4);
 			ae->un.SupportSpeed = 0;
 			ae->un.SupportSpeed = 0;
+			if (phba->lmt & LMT_32Gb)
+				ae->un.SupportSpeed |= HBA_PORTSPEED_32GBIT;
 			if (phba->lmt & LMT_16Gb)
 			if (phba->lmt & LMT_16Gb)
 				ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT;
 				ae->un.SupportSpeed |= HBA_PORTSPEED_16GBIT;
 			if (phba->lmt & LMT_10Gb)
 			if (phba->lmt & LMT_10Gb)
@@ -1821,6 +1819,9 @@ hba_out:
 			case LPFC_LINK_SPEED_16GHZ:
 			case LPFC_LINK_SPEED_16GHZ:
 				ae->un.PortSpeed = HBA_PORTSPEED_16GBIT;
 				ae->un.PortSpeed = HBA_PORTSPEED_16GBIT;
 				break;
 				break;
+			case LPFC_LINK_SPEED_32GHZ:
+				ae->un.PortSpeed = HBA_PORTSPEED_32GBIT;
+				break;
 			default:
 			default:
 				ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN;
 				ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN;
 				break;
 				break;

+ 28 - 72
drivers/scsi/lpfc/lpfc_els.c

@@ -457,11 +457,9 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
 	LPFC_MBOXQ_t *mboxq;
 	LPFC_MBOXQ_t *mboxq;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nodelist *ndlp;
-	struct serv_parm *sp;
 	struct lpfc_dmabuf *dmabuf;
 	struct lpfc_dmabuf *dmabuf;
 	int rc = 0;
 	int rc = 0;
 
 
-	sp = &phba->fc_fabparam;
 	/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
 	/* move forward in case of SLI4 FC port loopback test and pt2pt mode */
 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
 	    !(phba->link_flag & LS_LOOPBACK_MODE) &&
 	    !(phba->link_flag & LS_LOOPBACK_MODE) &&
@@ -1028,9 +1026,11 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 stop_rr_fcf_flogi:
 stop_rr_fcf_flogi:
 		/* FLOGI failure */
 		/* FLOGI failure */
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-				"2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
+				"2858 FLOGI failure Status:x%x/x%x TMO:x%x "
+				"Data x%x x%x\n",
 				irsp->ulpStatus, irsp->un.ulpWord[4],
 				irsp->ulpStatus, irsp->un.ulpWord[4],
-				irsp->ulpTimeout);
+				irsp->ulpTimeout, phba->hba_flag,
+				phba->fcf.fcf_flag);
 
 
 		/* Check for retry */
 		/* Check for retry */
 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
@@ -1154,6 +1154,9 @@ stop_rr_fcf_flogi:
 	}
 	}
 
 
 flogifail:
 flogifail:
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+	spin_unlock_irq(&phba->hbalock);
 	lpfc_nlp_put(ndlp);
 	lpfc_nlp_put(ndlp);
 
 
 	if (!lpfc_error_lost_link(irsp)) {
 	if (!lpfc_error_lost_link(irsp)) {
@@ -1205,14 +1208,11 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct serv_parm *sp;
 	struct serv_parm *sp;
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli_ring *pring;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	uint32_t tmo;
 	uint32_t tmo;
 	int rc;
 	int rc;
 
 
-	pring = &phba->sli.ring[LPFC_ELS_RING];
-
 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
 	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
 				     ndlp->nlp_DID, ELS_CMD_FLOGI);
 				     ndlp->nlp_DID, ELS_CMD_FLOGI);
@@ -1454,8 +1454,6 @@ lpfc_initial_fdisc(struct lpfc_vport *vport)
 void
 void
 lpfc_more_plogi(struct lpfc_vport *vport)
 lpfc_more_plogi(struct lpfc_vport *vport)
 {
 {
-	int sentplogi;
-
 	if (vport->num_disc_nodes)
 	if (vport->num_disc_nodes)
 		vport->num_disc_nodes--;
 		vport->num_disc_nodes--;
 
 
@@ -1468,7 +1466,7 @@ lpfc_more_plogi(struct lpfc_vport *vport)
 	/* Check to see if there are more PLOGIs to be sent */
 	/* Check to see if there are more PLOGIs to be sent */
 	if (vport->fc_flag & FC_NLP_MORE)
 	if (vport->fc_flag & FC_NLP_MORE)
 		/* go thru NPR nodes and issue any remaining ELS PLOGIs */
 		/* go thru NPR nodes and issue any remaining ELS PLOGIs */
-		sentplogi = lpfc_els_disc_plogi(vport);
+		lpfc_els_disc_plogi(vport);
 
 
 	return;
 	return;
 }
 }
@@ -1956,16 +1954,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 {
 {
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
 	struct serv_parm *sp;
 	struct serv_parm *sp;
-	IOCB_t *icmd;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	int ret;
 	int ret;
 
 
-	psli = &phba->sli;
-
 	ndlp = lpfc_findnode_did(vport, did);
 	ndlp = lpfc_findnode_did(vport, did);
 	if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
 	if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
 		ndlp = NULL;
 		ndlp = NULL;
@@ -1977,7 +1971,6 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 	if (!elsiocb)
 	if (!elsiocb)
 		return 1;
 		return 1;
 
 
-	icmd = &elsiocb->iocb;
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 
 
 	/* For PLOGI request, remainder of payload is service parameters */
 	/* For PLOGI request, remainder of payload is service parameters */
@@ -2034,10 +2027,8 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	struct lpfc_vport *vport = cmdiocb->vport;
 	struct lpfc_vport *vport = cmdiocb->vport;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	IOCB_t *irsp;
 	IOCB_t *irsp;
-	struct lpfc_sli *psli;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nodelist *ndlp;
 
 
-	psli = &phba->sli;
 	/* we pass cmdiocb to state machine which needs rspiocb as well */
 	/* we pass cmdiocb to state machine which needs rspiocb as well */
 	cmdiocb->context_un.rsp_iocb = rspiocb;
 	cmdiocb->context_un.rsp_iocb = rspiocb;
 
 
@@ -2117,7 +2108,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_hba *phba = vport->phba;
 	PRLI *npr;
 	PRLI *npr;
-	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
@@ -2128,7 +2118,6 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (!elsiocb)
 	if (!elsiocb)
 		return 1;
 		return 1;
 
 
-	icmd = &elsiocb->iocb;
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 
 
 	/* For PRLI request, remainder of payload is service parameters */
 	/* For PRLI request, remainder of payload is service parameters */
@@ -2413,7 +2402,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
 	ADISC *ap;
 	ADISC *ap;
-	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
@@ -2424,7 +2412,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (!elsiocb)
 	if (!elsiocb)
 		return 1;
 		return 1;
 
 
-	icmd = &elsiocb->iocb;
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 
 
 	/* For ADISC request, remainder of payload is service parameters */
 	/* For ADISC request, remainder of payload is service parameters */
@@ -2478,12 +2465,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
 	struct lpfc_vport *vport = ndlp->vport;
 	struct lpfc_vport *vport = ndlp->vport;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 	IOCB_t *irsp;
 	IOCB_t *irsp;
-	struct lpfc_sli *psli;
 	struct lpfcMboxq *mbox;
 	struct lpfcMboxq *mbox;
 	unsigned long flags;
 	unsigned long flags;
 	uint32_t skip_recovery = 0;
 	uint32_t skip_recovery = 0;
 
 
-	psli = &phba->sli;
 	/* we pass cmdiocb to state machine which needs rspiocb as well */
 	/* we pass cmdiocb to state machine which needs rspiocb as well */
 	cmdiocb->context_un.rsp_iocb = rspiocb;
 	cmdiocb->context_un.rsp_iocb = rspiocb;
 
 
@@ -2609,7 +2594,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 {
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
-	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
@@ -2628,7 +2612,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (!elsiocb)
 	if (!elsiocb)
 		return 1;
 		return 1;
 
 
-	icmd = &elsiocb->iocb;
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
 	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
 	pcmd += sizeof(uint32_t);
 	pcmd += sizeof(uint32_t);
@@ -2742,14 +2725,11 @@ int
 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 {
 {
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
-	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nodelist *ndlp;
 
 
-	psli = &phba->sli;
 	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
 	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
 
 
 	ndlp = lpfc_findnode_did(vport, nportid);
 	ndlp = lpfc_findnode_did(vport, nportid);
@@ -2776,7 +2756,6 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 		return 1;
 		return 1;
 	}
 	}
 
 
-	icmd = &elsiocb->iocb;
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 
 
 	*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
 	*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
@@ -2836,9 +2815,7 @@ static int
 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 {
 {
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
-	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli *psli;
 	FARP *fp;
 	FARP *fp;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint32_t *lp;
 	uint32_t *lp;
@@ -2846,7 +2823,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 	struct lpfc_nodelist *ondlp;
 	struct lpfc_nodelist *ondlp;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nodelist *ndlp;
 
 
-	psli = &phba->sli;
 	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
 	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
 
 
 	ndlp = lpfc_findnode_did(vport, nportid);
 	ndlp = lpfc_findnode_did(vport, nportid);
@@ -2872,7 +2848,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 		return 1;
 		return 1;
 	}
 	}
 
 
-	icmd = &elsiocb->iocb;
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 
 
 	*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
 	*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
@@ -3922,13 +3897,11 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	IOCB_t *oldcmd;
 	IOCB_t *oldcmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	int rc;
 	int rc;
 	ELS_PKT *els_pkt_ptr;
 	ELS_PKT *els_pkt_ptr;
 
 
-	psli = &phba->sli;
 	oldcmd = &oldiocb->iocb;
 	oldcmd = &oldiocb->iocb;
 
 
 	switch (flag) {
 	switch (flag) {
@@ -4061,12 +4034,10 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	IOCB_t *oldcmd;
 	IOCB_t *oldcmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	int rc;
 	int rc;
 
 
-	psli = &phba->sli;
 	cmdsize = 2 * sizeof(uint32_t);
 	cmdsize = 2 * sizeof(uint32_t);
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
 				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -4212,13 +4183,10 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 	IOCB_t *icmd;
 	IOCB_t *icmd;
 	IOCB_t *oldcmd;
 	IOCB_t *oldcmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	int rc;
 	int rc;
 
 
-	psli = &phba->sli;
-
 	cmdsize = sizeof(uint32_t) + sizeof(PRLI);
 	cmdsize = sizeof(uint32_t) + sizeof(PRLI);
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
 		ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
 		ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
@@ -4315,12 +4283,10 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
 	RNID *rn;
 	RNID *rn;
 	IOCB_t *icmd, *oldcmd;
 	IOCB_t *icmd, *oldcmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	int rc;
 	int rc;
 
 
-	psli = &phba->sli;
 	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
 	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
 					+ (2 * sizeof(struct lpfc_name));
 					+ (2 * sizeof(struct lpfc_name));
 	if (format)
 	if (format)
@@ -4447,12 +4413,10 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
 {
 {
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
-	struct lpfc_sli *psli;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
 	int rc;
 	int rc;
 
 
-	psli = &phba->sli;
 	cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
 	cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
 
 
 	/* The accumulated length can exceed the BPL_SIZE.  For
 	/* The accumulated length can exceed the BPL_SIZE.  For
@@ -4746,6 +4710,8 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
 
 
 	desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
 	desc->info.port_speed.speed = cpu_to_be16(rdp_speed);
 
 
+	if (phba->lmt & LMT_32Gb)
+		rdp_cap |= RDP_PS_32GB;
 	if (phba->lmt & LMT_16Gb)
 	if (phba->lmt & LMT_16Gb)
 		rdp_cap |= RDP_PS_16GB;
 		rdp_cap |= RDP_PS_16GB;
 	if (phba->lmt & LMT_10Gb)
 	if (phba->lmt & LMT_10Gb)
@@ -5181,14 +5147,12 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 {
 {
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_dmabuf *pcmd;
 	struct lpfc_dmabuf *pcmd;
-	IOCB_t *icmd;
 	uint8_t *lp;
 	uint8_t *lp;
 	struct fc_lcb_request_frame *beacon;
 	struct fc_lcb_request_frame *beacon;
 	struct lpfc_lcb_context *lcb_context;
 	struct lpfc_lcb_context *lcb_context;
 	uint8_t state, rjt_err;
 	uint8_t state, rjt_err;
 	struct ls_rjt stat;
 	struct ls_rjt stat;
 
 
-	icmd = &cmdiocb->iocb;
 	pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
 	pcmd = (struct lpfc_dmabuf *)cmdiocb->context2;
 	lp = (uint8_t *)pcmd->virt;
 	lp = (uint8_t *)pcmd->virt;
 	beacon = (struct fc_lcb_request_frame *)pcmd->virt;
 	beacon = (struct fc_lcb_request_frame *)pcmd->virt;
@@ -5444,7 +5408,7 @@ lpfc_send_rscn_event(struct lpfc_vport *vport,
 
 
 	fc_host_post_vendor_event(shost,
 	fc_host_post_vendor_event(shost,
 		fc_get_event_number(),
 		fc_get_event_number(),
-		sizeof(struct lpfc_els_event_header) + payload_len,
+		sizeof(struct lpfc_rscn_event_header) + payload_len,
 		(char *)rscn_event_data,
 		(char *)rscn_event_data,
 		LPFC_NL_VENDOR_ID);
 		LPFC_NL_VENDOR_ID);
 
 
@@ -5481,13 +5445,11 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_dmabuf *pcmd;
 	struct lpfc_dmabuf *pcmd;
 	uint32_t *lp, *datap;
 	uint32_t *lp, *datap;
-	IOCB_t *icmd;
 	uint32_t payload_len, length, nportid, *cmd;
 	uint32_t payload_len, length, nportid, *cmd;
 	int rscn_cnt;
 	int rscn_cnt;
 	int rscn_id = 0, hba_id = 0;
 	int rscn_id = 0, hba_id = 0;
 	int i;
 	int i;
 
 
-	icmd = &cmdiocb->iocb;
 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 	lp = (uint32_t *) pcmd->virt;
 	lp = (uint32_t *) pcmd->virt;
 
 
@@ -5893,6 +5855,13 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 		return 1;
 		return 1;
 	}
 	}
 
 
+	/* send our FLOGI first */
+	if (vport->port_state < LPFC_FLOGI) {
+		vport->fc_myDID = 0;
+		lpfc_initial_flogi(vport);
+		vport->fc_myDID = Fabric_DID;
+	}
+
 	/* Send back ACC */
 	/* Send back ACC */
 	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
 	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
 
 
@@ -5943,12 +5912,10 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 {
 {
 	struct lpfc_dmabuf *pcmd;
 	struct lpfc_dmabuf *pcmd;
 	uint32_t *lp;
 	uint32_t *lp;
-	IOCB_t *icmd;
 	RNID *rn;
 	RNID *rn;
 	struct ls_rjt stat;
 	struct ls_rjt stat;
 	uint32_t cmd;
 	uint32_t cmd;
 
 
-	icmd = &cmdiocb->iocb;
 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 	lp = (uint32_t *) pcmd->virt;
 	lp = (uint32_t *) pcmd->virt;
 
 
@@ -6259,7 +6226,6 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 {
 {
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_hba *phba = vport->phba;
 	LPFC_MBOXQ_t *mbox;
 	LPFC_MBOXQ_t *mbox;
-	struct lpfc_dmabuf *pcmd;
 	struct ls_rjt stat;
 	struct ls_rjt stat;
 
 
 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
 	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
@@ -6267,8 +6233,6 @@ lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 		/* reject the unsolicited RPS request and done with it */
 		/* reject the unsolicited RPS request and done with it */
 		goto reject_out;
 		goto reject_out;
 
 
-	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
-
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
 	if (mbox) {
 	if (mbox) {
 		lpfc_read_lnk_stat(phba, mbox);
 		lpfc_read_lnk_stat(phba, mbox);
@@ -6482,7 +6446,6 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 {
 {
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
 	struct RRQ *els_rrq;
 	struct RRQ *els_rrq;
-	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
@@ -6501,7 +6464,6 @@ lpfc_issue_els_rrq(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (!elsiocb)
 	if (!elsiocb)
 		return 1;
 		return 1;
 
 
-	icmd = &elsiocb->iocb;
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 
 
 	/* For RRQ request, remainder of payload is Exchange IDs */
 	/* For RRQ request, remainder of payload is Exchange IDs */
@@ -7374,6 +7336,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			 "Data: x%x x%x x%x x%x\n",
 			 "Data: x%x x%x x%x x%x\n",
 			cmd, did, vport->port_state, vport->fc_flag,
 			cmd, did, vport->port_state, vport->fc_flag,
 			vport->fc_myDID, vport->fc_prevDID);
 			vport->fc_myDID, vport->fc_prevDID);
+
+	/* reject till our FLOGI completes */
+	if ((vport->port_state < LPFC_FABRIC_CFG_LINK) &&
+		(cmd != ELS_CMD_FLOGI)) {
+		rjt_err = LSRJT_UNABLE_TPC;
+		rjt_exp = LSEXP_NOTHING_MORE;
+		goto lsrjt;
+	}
+
 	switch (cmd) {
 	switch (cmd) {
 	case ELS_CMD_PLOGI:
 	case ELS_CMD_PLOGI:
 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
@@ -7411,20 +7382,6 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 				rjt_exp = LSEXP_NOTHING_MORE;
 				rjt_exp = LSEXP_NOTHING_MORE;
 				break;
 				break;
 			}
 			}
-			/* We get here, and drop thru, if we are PT2PT with
-			 * another NPort and the other side has initiated
-			 * the PLOGI before responding to our FLOGI.
-			 */
-			if (phba->sli_rev == LPFC_SLI_REV4 &&
-			    (phba->fc_topology_changed ||
-			     vport->fc_myDID != vport->fc_prevDID)) {
-				lpfc_unregister_fcf_prep(phba);
-				spin_lock_irq(shost->host_lock);
-				vport->fc_flag &= ~FC_VFI_REGISTERED;
-				spin_unlock_irq(shost->host_lock);
-				phba->fc_topology_changed = 0;
-				lpfc_issue_reg_vfi(vport);
-			}
 		}
 		}
 
 
 		spin_lock_irq(shost->host_lock);
 		spin_lock_irq(shost->host_lock);
@@ -7655,6 +7612,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 		break;
 		break;
 	}
 	}
 
 
+lsrjt:
 	/* check if need to LS_RJT received ELS cmd */
 	/* check if need to LS_RJT received ELS cmd */
 	if (rjt_err) {
 	if (rjt_err) {
 		memset(&stat, 0, sizeof(stat));
 		memset(&stat, 0, sizeof(stat));
@@ -8428,7 +8386,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
 {
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
-	IOCB_t *icmd;
 	struct lpfc_iocbq *elsiocb;
 	struct lpfc_iocbq *elsiocb;
 	uint8_t *pcmd;
 	uint8_t *pcmd;
 	uint16_t cmdsize;
 	uint16_t cmdsize;
@@ -8439,7 +8396,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 	if (!elsiocb)
 	if (!elsiocb)
 		return 1;
 		return 1;
 
 
-	icmd = &elsiocb->iocb;
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
 	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
 	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
 	pcmd += sizeof(uint32_t);
 	pcmd += sizeof(uint32_t);

+ 19 - 8
drivers/scsi/lpfc/lpfc_hbadisc.c

@@ -800,7 +800,6 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_hba  *phba = vport->phba;
 	struct lpfc_nodelist *ndlp, *next_ndlp;
 	struct lpfc_nodelist *ndlp, *next_ndlp;
-	int  rc;
 
 
 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
 		if (!NLP_CHK_NODE_ACT(ndlp))
 		if (!NLP_CHK_NODE_ACT(ndlp))
@@ -816,10 +815,10 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
 		if ((phba->sli_rev < LPFC_SLI_REV4) &&
 		if ((phba->sli_rev < LPFC_SLI_REV4) &&
 		    (!remove && ndlp->nlp_type & NLP_FABRIC))
 		    (!remove && ndlp->nlp_type & NLP_FABRIC))
 			continue;
 			continue;
-		rc = lpfc_disc_state_machine(vport, ndlp, NULL,
-					     remove
-					     ? NLP_EVT_DEVICE_RM
-					     : NLP_EVT_DEVICE_RECOVERY);
+		lpfc_disc_state_machine(vport, ndlp, NULL,
+					remove
+					? NLP_EVT_DEVICE_RM
+					: NLP_EVT_DEVICE_RECOVERY);
 	}
 	}
 	if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
 	if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
 		if (phba->sli_rev == LPFC_SLI_REV4)
 		if (phba->sli_rev == LPFC_SLI_REV4)
@@ -1774,7 +1773,6 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
 			     uint16_t *next_fcf_index)
 			     uint16_t *next_fcf_index)
 {
 {
 	void *virt_addr;
 	void *virt_addr;
-	dma_addr_t phys_addr;
 	struct lpfc_mbx_sge sge;
 	struct lpfc_mbx_sge sge;
 	struct lpfc_mbx_read_fcf_tbl *read_fcf;
 	struct lpfc_mbx_read_fcf_tbl *read_fcf;
 	uint32_t shdr_status, shdr_add_status, if_type;
 	uint32_t shdr_status, shdr_add_status, if_type;
@@ -1785,7 +1783,6 @@ lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
 	 * routine only uses a single SGE.
 	 * routine only uses a single SGE.
 	 */
 	 */
 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
-	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
 	if (unlikely(!mboxq->sge_array)) {
 	if (unlikely(!mboxq->sge_array)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
 				"2524 Failed to get the non-embedded SGE "
 				"2524 Failed to get the non-embedded SGE "
@@ -2977,7 +2974,8 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 	MAILBOX_t *mb = &pmb->u.mb;
 	MAILBOX_t *mb = &pmb->u.mb;
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
 	struct lpfc_vport  *vport = pmb->vport;
 	struct lpfc_vport  *vport = pmb->vport;
-
+	struct serv_parm *sp = &vport->fc_sparam;
+	uint32_t ed_tov;
 
 
 	/* Check for error */
 	/* Check for error */
 	if (mb->mbxStatus) {
 	if (mb->mbxStatus) {
@@ -2992,6 +2990,18 @@ lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
 
 	memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
 	memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
 	       sizeof (struct serv_parm));
 	       sizeof (struct serv_parm));
+
+	ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
+	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
+		ed_tov = (ed_tov + 999999) / 1000000;
+
+	phba->fc_edtov = ed_tov;
+	phba->fc_ratov = (2 * ed_tov) / 1000;
+	if (phba->fc_ratov < FF_DEF_RATOV) {
+		/* RA_TOV should be atleast 10sec for initial flogi */
+		phba->fc_ratov = FF_DEF_RATOV;
+	}
+
 	lpfc_update_vport_wwn(vport);
 	lpfc_update_vport_wwn(vport);
 	if (vport->port_type == LPFC_PHYSICAL_PORT) {
 	if (vport->port_type == LPFC_PHYSICAL_PORT) {
 		memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
 		memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
@@ -3032,6 +3042,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
 	case LPFC_LINK_SPEED_8GHZ:
 	case LPFC_LINK_SPEED_8GHZ:
 	case LPFC_LINK_SPEED_10GHZ:
 	case LPFC_LINK_SPEED_10GHZ:
 	case LPFC_LINK_SPEED_16GHZ:
 	case LPFC_LINK_SPEED_16GHZ:
+	case LPFC_LINK_SPEED_32GHZ:
 		phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
 		phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
 		break;
 		break;
 	default:
 	default:

+ 5 - 1
drivers/scsi/lpfc/lpfc_hw.h

@@ -33,7 +33,7 @@
 
 
 #define FF_DEF_EDTOV          2000	/* Default E_D_TOV (2000ms) */
 #define FF_DEF_EDTOV          2000	/* Default E_D_TOV (2000ms) */
 #define FF_DEF_ALTOV            15	/* Default AL_TIME (15ms) */
 #define FF_DEF_ALTOV            15	/* Default AL_TIME (15ms) */
-#define FF_DEF_RATOV             2	/* Default RA_TOV (2s) */
+#define FF_DEF_RATOV            10	/* Default RA_TOV (10s) */
 #define FF_DEF_ARBTOV         1900	/* Default ARB_TOV (1900ms) */
 #define FF_DEF_ARBTOV         1900	/* Default ARB_TOV (1900ms) */
 
 
 #define LPFC_BUF_RING0        64	/* Number of buffers to post to RING
 #define LPFC_BUF_RING0        64	/* Number of buffers to post to RING
@@ -1400,6 +1400,7 @@ struct lpfc_fdmi_reg_portattr {
 #define PCI_DEVICE_ID_LANCER_FC_VF  0xe208
 #define PCI_DEVICE_ID_LANCER_FC_VF  0xe208
 #define PCI_DEVICE_ID_LANCER_FCOE   0xe260
 #define PCI_DEVICE_ID_LANCER_FCOE   0xe260
 #define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
 #define PCI_DEVICE_ID_LANCER_FCOE_VF 0xe268
+#define PCI_DEVICE_ID_LANCER_G6_FC  0xe300
 #define PCI_DEVICE_ID_SAT_SMB       0xf011
 #define PCI_DEVICE_ID_SAT_SMB       0xf011
 #define PCI_DEVICE_ID_SAT_MID       0xf015
 #define PCI_DEVICE_ID_SAT_MID       0xf015
 #define PCI_DEVICE_ID_RFLY          0xf095
 #define PCI_DEVICE_ID_RFLY          0xf095
@@ -2075,6 +2076,7 @@ typedef struct {
 #define LINK_SPEED_8G   0x8     /* 8 Gigabaud */
 #define LINK_SPEED_8G   0x8     /* 8 Gigabaud */
 #define LINK_SPEED_10G  0x10    /* 10 Gigabaud */
 #define LINK_SPEED_10G  0x10    /* 10 Gigabaud */
 #define LINK_SPEED_16G  0x11    /* 16 Gigabaud */
 #define LINK_SPEED_16G  0x11    /* 16 Gigabaud */
+#define LINK_SPEED_32G  0x14    /* 32 Gigabaud */
 
 
 } INIT_LINK_VAR;
 } INIT_LINK_VAR;
 
 
@@ -2246,6 +2248,7 @@ typedef struct {
 #define LMT_8Gb       0x080
 #define LMT_8Gb       0x080
 #define LMT_10Gb      0x100
 #define LMT_10Gb      0x100
 #define LMT_16Gb      0x200
 #define LMT_16Gb      0x200
+#define LMT_32Gb      0x400
 	uint32_t rsvd2;
 	uint32_t rsvd2;
 	uint32_t rsvd3;
 	uint32_t rsvd3;
 	uint32_t max_xri;
 	uint32_t max_xri;
@@ -2727,6 +2730,7 @@ struct lpfc_mbx_read_top {
 #define LPFC_LINK_SPEED_8GHZ	0x20
 #define LPFC_LINK_SPEED_8GHZ	0x20
 #define LPFC_LINK_SPEED_10GHZ	0x40
 #define LPFC_LINK_SPEED_10GHZ	0x40
 #define LPFC_LINK_SPEED_16GHZ	0x80
 #define LPFC_LINK_SPEED_16GHZ	0x80
+#define LPFC_LINK_SPEED_32GHZ	0x90
 };
 };
 
 
 /* Structure for MB Command CLEAR_LA (22) */
 /* Structure for MB Command CLEAR_LA (22) */

+ 23 - 13
drivers/scsi/lpfc/lpfc_init.c

@@ -699,7 +699,9 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
 	     !(phba->lmt & LMT_10Gb)) ||
 	     !(phba->lmt & LMT_10Gb)) ||
 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
 	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
-	     !(phba->lmt & LMT_16Gb))) {
+	     !(phba->lmt & LMT_16Gb)) ||
+	    ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
+	     !(phba->lmt & LMT_32Gb))) {
 		/* Reset link speed to auto */
 		/* Reset link speed to auto */
 		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
 			"1302 Invalid speed for this board:%d "
 			"1302 Invalid speed for this board:%d "
@@ -2035,7 +2037,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 		&& descp && descp[0] != '\0')
 		&& descp && descp[0] != '\0')
 		return;
 		return;
 
 
-	if (phba->lmt & LMT_16Gb)
+	if (phba->lmt & LMT_32Gb)
+		max_speed = 32;
+	else if (phba->lmt & LMT_16Gb)
 		max_speed = 16;
 		max_speed = 16;
 	else if (phba->lmt & LMT_10Gb)
 	else if (phba->lmt & LMT_10Gb)
 		max_speed = 10;
 		max_speed = 10;
@@ -2229,6 +2233,9 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 		m = (typeof(m)){"OCe15100", "PCIe",
 		m = (typeof(m)){"OCe15100", "PCIe",
 				"Obsolete, Unsupported FCoE"};
 				"Obsolete, Unsupported FCoE"};
 		break;
 		break;
+	case PCI_DEVICE_ID_LANCER_G6_FC:
+		m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
+		break;
 	case PCI_DEVICE_ID_SKYHAWK:
 	case PCI_DEVICE_ID_SKYHAWK:
 	case PCI_DEVICE_ID_SKYHAWK_VF:
 	case PCI_DEVICE_ID_SKYHAWK_VF:
 		oneConnect = 1;
 		oneConnect = 1;
@@ -2253,7 +2260,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 				phba->Port);
 				phba->Port);
 		else if (max_speed == 0)
 		else if (max_speed == 0)
 			snprintf(descp, 255,
 			snprintf(descp, 255,
-				"Emulex %s %s %s ",
+				"Emulex %s %s %s",
 				m.name, m.bus, m.function);
 				m.name, m.bus, m.function);
 		else
 		else
 			snprintf(descp, 255,
 			snprintf(descp, 255,
@@ -3491,6 +3498,8 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
 				 sizeof fc_host_symbolic_name(shost));
 				 sizeof fc_host_symbolic_name(shost));
 
 
 	fc_host_supported_speeds(shost) = 0;
 	fc_host_supported_speeds(shost) = 0;
+	if (phba->lmt & LMT_32Gb)
+		fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
 	if (phba->lmt & LMT_16Gb)
 	if (phba->lmt & LMT_16Gb)
 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
 		fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
 	if (phba->lmt & LMT_10Gb)
 	if (phba->lmt & LMT_10Gb)
@@ -3854,6 +3863,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
 		case LPFC_FC_LA_SPEED_16G:
 		case LPFC_FC_LA_SPEED_16G:
 			port_speed = 16000;
 			port_speed = 16000;
 			break;
 			break;
+		case LPFC_FC_LA_SPEED_32G:
+			port_speed = 32000;
+			break;
 		default:
 		default:
 			port_speed = 0;
 			port_speed = 0;
 		}
 		}
@@ -4982,8 +4994,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
 	}
 	}
 
 
 	if (!phba->sli.ring)
 	if (!phba->sli.ring)
-		phba->sli.ring = (struct lpfc_sli_ring *)
-			kzalloc(LPFC_SLI3_MAX_RING *
+		phba->sli.ring = kzalloc(LPFC_SLI3_MAX_RING *
 			sizeof(struct lpfc_sli_ring), GFP_KERNEL);
 			sizeof(struct lpfc_sli_ring), GFP_KERNEL);
 	if (!phba->sli.ring)
 	if (!phba->sli.ring)
 		return -ENOMEM;
 		return -ENOMEM;
@@ -4995,7 +5006,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
 
 
 	/* Initialize the host templates the configured values. */
 	/* Initialize the host templates the configured values. */
 	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
 	lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-	lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+	lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt;
 
 
 	/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
 	/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
 	if (phba->cfg_enable_bg) {
 	if (phba->cfg_enable_bg) {
@@ -8679,7 +8690,6 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
 #ifdef CONFIG_X86
 #ifdef CONFIG_X86
 	struct cpuinfo_x86 *cpuinfo;
 	struct cpuinfo_x86 *cpuinfo;
 #endif
 #endif
-	struct cpumask *mask;
 	uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
 	uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
 
 
 	/* If there is no mapping, just return */
 	/* If there is no mapping, just return */
@@ -8773,11 +8783,8 @@ found:
 			first_cpu = cpu;
 			first_cpu = cpu;
 
 
 		/* Now affinitize to the selected CPU */
 		/* Now affinitize to the selected CPU */
-		mask = &cpup->maskbits;
-		cpumask_clear(mask);
-		cpumask_set_cpu(cpu, mask);
 		i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
 		i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
-					  vector, mask);
+					  vector, get_cpu_mask(cpu));
 
 
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 				"3330 Set Affinity: CPU %d channel %d "
 				"3330 Set Affinity: CPU %d channel %d "
@@ -10287,7 +10294,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
 	struct lpfc_hba   *phba;
 	struct lpfc_hba   *phba;
 	struct lpfc_vport *vport = NULL;
 	struct lpfc_vport *vport = NULL;
 	struct Scsi_Host  *shost = NULL;
 	struct Scsi_Host  *shost = NULL;
-	int error, ret;
+	int error;
 	uint32_t cfg_mode, intr_mode;
 	uint32_t cfg_mode, intr_mode;
 	int adjusted_fcp_io_channel;
 	int adjusted_fcp_io_channel;
 
 
@@ -10411,7 +10418,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
 
 
 	/* check for firmware upgrade or downgrade */
 	/* check for firmware upgrade or downgrade */
 	if (phba->cfg_request_firmware_upgrade)
 	if (phba->cfg_request_firmware_upgrade)
-		ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
+		lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
 
 
 	/* Check if there are static vports to be created. */
 	/* Check if there are static vports to be created. */
 	lpfc_create_static_vport(phba);
 	lpfc_create_static_vport(phba);
@@ -11354,6 +11361,8 @@ static struct pci_device_id lpfc_id_table[] = {
 		PCI_ANY_ID, PCI_ANY_ID, },
 		PCI_ANY_ID, PCI_ANY_ID, },
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_FCOE_VF,
 		PCI_ANY_ID, PCI_ANY_ID, },
 		PCI_ANY_ID, PCI_ANY_ID, },
+	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LANCER_G6_FC,
+		PCI_ANY_ID, PCI_ANY_ID, },
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK,
 		PCI_ANY_ID, PCI_ANY_ID, },
 		PCI_ANY_ID, PCI_ANY_ID, },
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
 	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SKYHAWK_VF,
@@ -11477,6 +11486,7 @@ lpfc_exit(void)
 		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
 		free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
 	}
 	}
 	kfree(lpfc_used_cpu);
 	kfree(lpfc_used_cpu);
+	idr_destroy(&lpfc_hba_index);
 }
 }
 
 
 module_init(lpfc_init);
 module_init(lpfc_init);

+ 11 - 8
drivers/scsi/lpfc/lpfc_mbox.c

@@ -289,9 +289,7 @@ lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
 		   struct lpfc_dmabuf *mp)
 		   struct lpfc_dmabuf *mp)
 {
 {
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
-	struct lpfc_sli *psli;
 
 
-	psli = &phba->sli;
 	mb = &pmb->u.mb;
 	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
@@ -483,13 +481,11 @@ lpfc_init_link(struct lpfc_hba * phba,
 	       LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
 	       LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
 {
 {
 	lpfc_vpd_t *vpd;
 	lpfc_vpd_t *vpd;
-	struct lpfc_sli *psli;
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
 
 
 	mb = &pmb->u.mb;
 	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
-	psli = &phba->sli;
 	switch (topology) {
 	switch (topology) {
 	case FLAGS_TOPOLOGY_MODE_LOOP_PT:
 	case FLAGS_TOPOLOGY_MODE_LOOP_PT:
 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
 		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
@@ -510,6 +506,13 @@ lpfc_init_link(struct lpfc_hba * phba,
 		break;
 		break;
 	}
 	}
 
 
+	if (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC &&
+		mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) {
+		/* Failover is not tried for Lancer G6 */
+		mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
+		phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT;
+	}
+
 	/* Enable asynchronous ABTS responses from firmware */
 	/* Enable asynchronous ABTS responses from firmware */
 	mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
 	mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
 
 
@@ -543,6 +546,10 @@ lpfc_init_link(struct lpfc_hba * phba,
 			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
 			mb->un.varInitLnk.link_flags |=	FLAGS_LINK_SPEED;
 			mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
 			mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
 			break;
 			break;
+		case LPFC_USER_LINK_SPEED_32G:
+			mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
+			mb->un.varInitLnk.link_speed = LINK_SPEED_32G;
+			break;
 		case LPFC_USER_LINK_SPEED_AUTO:
 		case LPFC_USER_LINK_SPEED_AUTO:
 		default:
 		default:
 			mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
 			mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
@@ -585,9 +592,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
 {
 {
 	struct lpfc_dmabuf *mp;
 	struct lpfc_dmabuf *mp;
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
-	struct lpfc_sli *psli;
 
 
-	psli = &phba->sli;
 	mb = &pmb->u.mb;
 	mb = &pmb->u.mb;
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 	memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
 
@@ -2010,7 +2015,6 @@ lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
 			   uint16_t fcf_index)
 			   uint16_t fcf_index)
 {
 {
 	void *virt_addr;
 	void *virt_addr;
-	dma_addr_t phys_addr;
 	uint8_t *bytep;
 	uint8_t *bytep;
 	struct lpfc_mbx_sge sge;
 	struct lpfc_mbx_sge sge;
 	uint32_t alloc_len, req_len;
 	uint32_t alloc_len, req_len;
@@ -2039,7 +2043,6 @@ lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
 	 * routine only uses a single SGE.
 	 * routine only uses a single SGE.
 	 */
 	 */
 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
-	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
 	virt_addr = mboxq->sge_array->addr[0];
 	virt_addr = mboxq->sge_array->addr[0];
 	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
 	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
 
 

+ 0 - 2
drivers/scsi/lpfc/lpfc_nportdisc.c

@@ -820,7 +820,6 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 {
 {
 	struct lpfc_hba *phba;
 	struct lpfc_hba *phba;
 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
 	LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
-	MAILBOX_t *mb;
 	uint16_t rpi;
 	uint16_t rpi;
 
 
 	phba = vport->phba;
 	phba = vport->phba;
@@ -828,7 +827,6 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
 		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
 		(evt == NLP_EVT_CMPL_REG_LOGIN) &&
 		(!pmb->u.mb.mbxStatus)) {
 		(!pmb->u.mb.mbxStatus)) {
-		mb = &pmb->u.mb;
 		rpi = pmb->u.mb.un.varWords[0];
 		rpi = pmb->u.mb.un.varWords[0];
 		lpfc_release_rpi(phba, vport, rpi);
 		lpfc_release_rpi(phba, vport, rpi);
 	}
 	}

+ 1 - 13
drivers/scsi/lpfc/lpfc_scsi.c

@@ -1293,7 +1293,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
 		uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
 {
 {
 	struct scatterlist *sgpe; /* s/g prot entry */
 	struct scatterlist *sgpe; /* s/g prot entry */
-	struct scatterlist *sgde; /* s/g data entry */
 	struct lpfc_scsi_buf *lpfc_cmd = NULL;
 	struct lpfc_scsi_buf *lpfc_cmd = NULL;
 	struct scsi_dif_tuple *src = NULL;
 	struct scsi_dif_tuple *src = NULL;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_nodelist *ndlp;
@@ -1309,7 +1308,6 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		return 0;
 		return 0;
 
 
 	sgpe = scsi_prot_sglist(sc);
 	sgpe = scsi_prot_sglist(sc);
-	sgde = scsi_sglist(sc);
 	lba = scsi_get_lba(sc);
 	lba = scsi_get_lba(sc);
 
 
 	/* First check if we need to match the LBA */
 	/* First check if we need to match the LBA */
@@ -1882,7 +1880,6 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 #endif
 #endif
 	uint32_t checking = 1;
 	uint32_t checking = 1;
 	uint32_t reftag;
 	uint32_t reftag;
-	unsigned blksize;
 	uint8_t txop, rxop;
 	uint8_t txop, rxop;
 
 
 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
 	status  = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
@@ -1890,7 +1887,6 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		goto out;
 		goto out;
 
 
 	/* extract some info from the scsi command for pde*/
 	/* extract some info from the scsi command for pde*/
-	blksize = lpfc_cmd_blksize(sc);
 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -2263,7 +2259,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 	dma_addr_t physaddr;
 	dma_addr_t physaddr;
 	int i = 0, num_sge = 0, status;
 	int i = 0, num_sge = 0, status;
 	uint32_t reftag;
 	uint32_t reftag;
-	unsigned blksize;
 	uint8_t txop, rxop;
 	uint8_t txop, rxop;
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 	uint32_t rc;
 	uint32_t rc;
@@ -2277,7 +2272,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
 		goto out;
 		goto out;
 
 
 	/* extract some info from the scsi command for pde*/
 	/* extract some info from the scsi command for pde*/
-	blksize = lpfc_cmd_blksize(sc);
 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 	reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
 
 
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@@ -2881,7 +2875,7 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
 	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
 	struct scsi_dif_tuple *src = NULL;
 	struct scsi_dif_tuple *src = NULL;
 	uint8_t *data_src = NULL;
 	uint8_t *data_src = NULL;
-	uint16_t guard_tag, guard_type;
+	uint16_t guard_tag;
 	uint16_t start_app_tag, app_tag;
 	uint16_t start_app_tag, app_tag;
 	uint32_t start_ref_tag, ref_tag;
 	uint32_t start_ref_tag, ref_tag;
 	int prot, protsegcnt;
 	int prot, protsegcnt;
@@ -2922,7 +2916,6 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 		data_len = sgde->length;
 		data_len = sgde->length;
 		if ((data_len & (blksize - 1)) == 0)
 		if ((data_len & (blksize - 1)) == 0)
 			chk_guard = 1;
 			chk_guard = 1;
-		guard_type = scsi_host_get_guard(cmd->device->host);
 
 
 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
 		src = (struct scsi_dif_tuple *)sg_virt(sgpe);
 		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
 		start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
@@ -3908,12 +3901,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
 	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
 	struct lpfc_nodelist *pnode = rdata->pnode;
 	struct lpfc_nodelist *pnode = rdata->pnode;
 	struct scsi_cmnd *cmd;
 	struct scsi_cmnd *cmd;
-	int result;
 	int depth;
 	int depth;
 	unsigned long flags;
 	unsigned long flags;
 	struct lpfc_fast_path_event *fast_path_evt;
 	struct lpfc_fast_path_event *fast_path_evt;
 	struct Scsi_Host *shost;
 	struct Scsi_Host *shost;
-	uint32_t queue_depth, scsi_id;
 	uint32_t logit = LOG_FCP;
 	uint32_t logit = LOG_FCP;
 
 
 	/* Sanity check on return of outstanding command */
 	/* Sanity check on return of outstanding command */
@@ -4095,7 +4086,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 	}
 	}
 
 
 	lpfc_update_stats(phba, lpfc_cmd);
 	lpfc_update_stats(phba, lpfc_cmd);
-	result = cmd->result;
 	if (vport->cfg_max_scsicmpl_time &&
 	if (vport->cfg_max_scsicmpl_time &&
 	   time_after(jiffies, lpfc_cmd->start_time +
 	   time_after(jiffies, lpfc_cmd->start_time +
 		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
 		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
@@ -4132,8 +4122,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
 	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
 
 
 	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
 	/* The sdev is not guaranteed to be valid post scsi_done upcall. */
-	queue_depth = cmd->device->queue_depth;
-	scsi_id = cmd->device->id;
 	cmd->scsi_done(cmd);
 	cmd->scsi_done(cmd);
 
 
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
 	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {

+ 1 - 9
drivers/scsi/lpfc/lpfc_sli.c

@@ -6696,7 +6696,7 @@ lpfc_mbox_timeout(unsigned long ptr)
  * This function checks if any mailbox completions are present on the mailbox
  * This function checks if any mailbox completions are present on the mailbox
  * completion queue.
  * completion queue.
  **/
  **/
-bool
+static bool
 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
 {
 {
 
 
@@ -12491,12 +12491,10 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
 	struct lpfc_eqe *eqe;
 	struct lpfc_eqe *eqe;
 	unsigned long iflag;
 	unsigned long iflag;
 	int ecount = 0;
 	int ecount = 0;
-	uint32_t eqidx;
 
 
 	/* Get the driver's phba structure from the dev_id */
 	/* Get the driver's phba structure from the dev_id */
 	fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
 	fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
 	phba = fcp_eq_hdl->phba;
 	phba = fcp_eq_hdl->phba;
-	eqidx = fcp_eq_hdl->idx;
 
 
 	if (unlikely(!phba))
 	if (unlikely(!phba))
 		return IRQ_NONE;
 		return IRQ_NONE;
@@ -12831,12 +12829,8 @@ out_fail:
 static void __iomem *
 static void __iomem *
 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
 {
 {
-	struct pci_dev *pdev;
-
 	if (!phba->pcidev)
 	if (!phba->pcidev)
 		return NULL;
 		return NULL;
-	else
-		pdev = phba->pcidev;
 
 
 	switch (pci_barset) {
 	switch (pci_barset) {
 	case WQ_PCI_BAR_0_AND_1:
 	case WQ_PCI_BAR_0_AND_1:
@@ -15920,7 +15914,6 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
 	LPFC_MBOXQ_t *mboxq;
 	LPFC_MBOXQ_t *mboxq;
 	uint8_t *bytep;
 	uint8_t *bytep;
 	void *virt_addr;
 	void *virt_addr;
-	dma_addr_t phys_addr;
 	struct lpfc_mbx_sge sge;
 	struct lpfc_mbx_sge sge;
 	uint32_t alloc_len, req_len;
 	uint32_t alloc_len, req_len;
 	uint32_t fcfindex;
 	uint32_t fcfindex;
@@ -15953,7 +15946,6 @@ lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
 	 * routine only uses a single SGE.
 	 * routine only uses a single SGE.
 	 */
 	 */
 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
-	phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
 	virt_addr = mboxq->sge_array->addr[0];
 	virt_addr = mboxq->sge_array->addr[0];
 	/*
 	/*
 	 * Configure the FCF record for FCFI 0.  This is the driver's
 	 * Configure the FCF record for FCFI 0.  This is the driver's

+ 0 - 1
drivers/scsi/lpfc/lpfc_sli4.h

@@ -454,7 +454,6 @@ struct lpfc_vector_map_info {
 	uint16_t	core_id;
 	uint16_t	core_id;
 	uint16_t	irq;
 	uint16_t	irq;
 	uint16_t	channel_id;
 	uint16_t	channel_id;
-	struct cpumask	maskbits;
 };
 };
 #define LPFC_VECTOR_MAP_EMPTY	0xffff
 #define LPFC_VECTOR_MAP_EMPTY	0xffff
 
 

+ 1 - 1
drivers/scsi/lpfc/lpfc_version.h

@@ -18,7 +18,7 @@
  * included with this package.                                     *
  * included with this package.                                     *
  *******************************************************************/
  *******************************************************************/
 
 
-#define LPFC_DRIVER_VERSION "10.7.0.0."
+#define LPFC_DRIVER_VERSION "11.0.0.0."
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_DRIVER_NAME		"lpfc"
 
 
 /* Used for SLI 2/3 */
 /* Used for SLI 2/3 */

+ 76 - 105
drivers/scsi/scsi_devinfo.c

@@ -390,25 +390,57 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
 EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
 EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
 
 
 /**
 /**
- * scsi_dev_info_list_del_keyed - remove one dev_info list entry.
+ * scsi_dev_info_list_find - find a matching dev_info list entry.
  * @vendor:	vendor string
  * @vendor:	vendor string
  * @model:	model (product) string
  * @model:	model (product) string
  * @key:	specify list to use
  * @key:	specify list to use
  *
  *
  * Description:
  * Description:
- * 	Remove and destroy one dev_info entry for @vendor, @model
+ *	Finds the first dev_info entry matching @vendor, @model
  * 	in list specified by @key.
  * 	in list specified by @key.
  *
  *
- * Returns: 0 OK, -error on failure.
+ * Returns: pointer to matching entry, or ERR_PTR on failure.
  **/
  **/
-int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
+		const char *model, int key)
 {
 {
-	struct scsi_dev_info_list *devinfo, *found = NULL;
+	struct scsi_dev_info_list *devinfo;
 	struct scsi_dev_info_list_table *devinfo_table =
 	struct scsi_dev_info_list_table *devinfo_table =
 		scsi_devinfo_lookup_by_key(key);
 		scsi_devinfo_lookup_by_key(key);
+	size_t vmax, mmax;
+	const char *vskip, *mskip;
 
 
 	if (IS_ERR(devinfo_table))
 	if (IS_ERR(devinfo_table))
-		return PTR_ERR(devinfo_table);
+		return (struct scsi_dev_info_list *) devinfo_table;
+
+	/* Prepare for "compatible" matches */
+
+	/*
+	 * XXX why skip leading spaces? If an odd INQUIRY
+	 * value, that should have been part of the
+	 * scsi_static_device_list[] entry, such as "  FOO"
+	 * rather than "FOO". Since this code is already
+	 * here, and we don't know what device it is
+	 * trying to work with, leave it as-is.
+	 */
+	vmax = 8;	/* max length of vendor */
+	vskip = vendor;
+	while (vmax > 0 && *vskip == ' ') {
+		vmax--;
+		vskip++;
+	}
+	/* Also skip trailing spaces */
+	while (vmax > 0 && vskip[vmax - 1] == ' ')
+		--vmax;
+
+	mmax = 16;	/* max length of model */
+	mskip = model;
+	while (mmax > 0 && *mskip == ' ') {
+		mmax--;
+		mskip++;
+	}
+	while (mmax > 0 && mskip[mmax - 1] == ' ')
+		--mmax;
 
 
 	list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
 	list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
 			    dev_info_list) {
 			    dev_info_list) {
@@ -416,61 +448,48 @@ int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
 			/*
 			/*
 			 * Behave like the older version of get_device_flags.
 			 * Behave like the older version of get_device_flags.
 			 */
 			 */
-			size_t max;
-			/*
-			 * XXX why skip leading spaces? If an odd INQUIRY
-			 * value, that should have been part of the
-			 * scsi_static_device_list[] entry, such as "  FOO"
-			 * rather than "FOO". Since this code is already
-			 * here, and we don't know what device it is
-			 * trying to work with, leave it as-is.
-			 */
-			max = 8;	/* max length of vendor */
-			while ((max > 0) && *vendor == ' ') {
-				max--;
-				vendor++;
-			}
-			/*
-			 * XXX removing the following strlen() would be
-			 * good, using it means that for a an entry not in
-			 * the list, we scan every byte of every vendor
-			 * listed in scsi_static_device_list[], and never match
-			 * a single one (and still have to compare at
-			 * least the first byte of each vendor).
-			 */
-			if (memcmp(devinfo->vendor, vendor,
-				    min(max, strlen(devinfo->vendor))))
+			if (memcmp(devinfo->vendor, vskip, vmax) ||
+					devinfo->vendor[vmax])
 				continue;
 				continue;
-			/*
-			 * Skip spaces again.
-			 */
-			max = 16;	/* max length of model */
-			while ((max > 0) && *model == ' ') {
-				max--;
-				model++;
-			}
-			if (memcmp(devinfo->model, model,
-				   min(max, strlen(devinfo->model))))
+			if (memcmp(devinfo->model, mskip, mmax) ||
+					devinfo->model[mmax])
 				continue;
 				continue;
-			found = devinfo;
+			return devinfo;
 		} else {
 		} else {
 			if (!memcmp(devinfo->vendor, vendor,
 			if (!memcmp(devinfo->vendor, vendor,
 				     sizeof(devinfo->vendor)) &&
 				     sizeof(devinfo->vendor)) &&
 			     !memcmp(devinfo->model, model,
 			     !memcmp(devinfo->model, model,
 				      sizeof(devinfo->model)))
 				      sizeof(devinfo->model)))
-				found = devinfo;
+				return devinfo;
 		}
 		}
-		if (found)
-			break;
 	}
 	}
 
 
-	if (found) {
-		list_del(&found->dev_info_list);
-		kfree(found);
-		return 0;
-	}
+	return ERR_PTR(-ENOENT);
+}
+
+/**
+ * scsi_dev_info_list_del_keyed - remove one dev_info list entry.
+ * @vendor:	vendor string
+ * @model:	model (product) string
+ * @key:	specify list to use
+ *
+ * Description:
+ *	Remove and destroy one dev_info entry for @vendor, @model
+ *	in list specified by @key.
+ *
+ * Returns: 0 OK, -error on failure.
+ **/
+int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+{
+	struct scsi_dev_info_list *found;
 
 
-	return -ENOENT;
+	found = scsi_dev_info_list_find(vendor, model, key);
+	if (IS_ERR(found))
+		return PTR_ERR(found);
+
+	list_del(&found->dev_info_list);
+	kfree(found);
+	return 0;
 }
 }
 EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
 EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
 
 
@@ -565,64 +584,16 @@ int scsi_get_device_flags_keyed(struct scsi_device *sdev,
 				int key)
 				int key)
 {
 {
 	struct scsi_dev_info_list *devinfo;
 	struct scsi_dev_info_list *devinfo;
-	struct scsi_dev_info_list_table *devinfo_table;
+	int err;
 
 
-	devinfo_table = scsi_devinfo_lookup_by_key(key);
+	devinfo = scsi_dev_info_list_find(vendor, model, key);
+	if (!IS_ERR(devinfo))
+		return devinfo->flags;
 
 
-	if (IS_ERR(devinfo_table))
-		return PTR_ERR(devinfo_table);
+	err = PTR_ERR(devinfo);
+	if (err != -ENOENT)
+		return err;
 
 
-	list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
-			    dev_info_list) {
-		if (devinfo->compatible) {
-			/*
-			 * Behave like the older version of get_device_flags.
-			 */
-			size_t max;
-			/*
-			 * XXX why skip leading spaces? If an odd INQUIRY
-			 * value, that should have been part of the
-			 * scsi_static_device_list[] entry, such as "  FOO"
-			 * rather than "FOO". Since this code is already
-			 * here, and we don't know what device it is
-			 * trying to work with, leave it as-is.
-			 */
-			max = 8;	/* max length of vendor */
-			while ((max > 0) && *vendor == ' ') {
-				max--;
-				vendor++;
-			}
-			/*
-			 * XXX removing the following strlen() would be
-			 * good, using it means that for a an entry not in
-			 * the list, we scan every byte of every vendor
-			 * listed in scsi_static_device_list[], and never match
-			 * a single one (and still have to compare at
-			 * least the first byte of each vendor).
-			 */
-			if (memcmp(devinfo->vendor, vendor,
-				    min(max, strlen(devinfo->vendor))))
-				continue;
-			/*
-			 * Skip spaces again.
-			 */
-			max = 16;	/* max length of model */
-			while ((max > 0) && *model == ' ') {
-				max--;
-				model++;
-			}
-			if (memcmp(devinfo->model, model,
-				   min(max, strlen(devinfo->model))))
-				continue;
-			return devinfo->flags;
-		} else {
-			if (!memcmp(devinfo->vendor, vendor,
-				     sizeof(devinfo->vendor)) &&
-			     !memcmp(devinfo->model, model,
-				      sizeof(devinfo->model)))
-				return devinfo->flags;
-		}
-	}
 	/* nothing found, return nothing */
 	/* nothing found, return nothing */
 	if (key != SCSI_DEVINFO_GLOBAL)
 	if (key != SCSI_DEVINFO_GLOBAL)
 		return 0;
 		return 0;