Browse Source

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "This update includes the usual round of major driver updates (hpsa,
  be2iscsi, hisi_sas, zfcp, cxlflash). There's a new incarnation of hpsa
  called smartpqi for which a driver is added, there's some cleanup work
  of the ibm vscsi target and updates to libfc, plus a whole host of
  minor fixes and updates and finally the removal of several ISA drivers
  which seem not to have been used for years"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (173 commits)
  scsi: mvsas: Mark symbols static where possible
  scsi: pm8001: Mark symbols static where possible
  scsi: arcmsr: Simplify user_len checking
  scsi: fcoe: fix off by one in eth2fc_speed()
  scsi: dtc: remove from tree
  scsi: t128: remove from tree
  scsi: pas16: remove from tree
  scsi: u14-34f: remove from tree
  scsi: ultrastor: remove from tree
  scsi: in2000: remove from tree
  scsi: wd7000: remove from tree
  scsi: scsi_dh_alua: Fix memory leak in alua_rtpg()
  scsi: lpfc: Mark symbols static where possible
  scsi: hpsa: correct call to hpsa_do_reset
  scsi: ufs: Get a TM service response from the correct offset
  scsi: ibmvfc: Fix I/O hang when port is not mapped
  scsi: megaraid_sas: clean function declarations in megaraid_sas_base.c up
  scsi: ipr: Remove redundant messages at adapter init time
  scsi: ipr: Don't log unnecessary 9084 error details
  scsi: smartpqi: raid bypass lba calculation fix
  ...
Linus Torvalds 8 years ago
parent
commit
4dfddf5036
100 changed files with 12093 additions and 8190 deletions
  1. 39 5
      Documentation/powerpc/cxlflash.txt
  2. 0 2
      Documentation/scsi/00-INDEX
  3. 0 43
      Documentation/scsi/dtc3x80.txt
  4. 0 202
      Documentation/scsi/in2000.txt
  5. 0 18
      Documentation/scsi/scsi-parameters.txt
  6. 80 0
      Documentation/scsi/smartpqi.txt
  7. 17 21
      MAINTAINERS
  8. 4 3
      drivers/message/fusion/mptbase.c
  9. 5 2
      drivers/message/fusion/mptfc.c
  10. 144 18
      drivers/s390/scsi/zfcp_dbf.c
  11. 11 3
      drivers/s390/scsi/zfcp_dbf.h
  12. 9 3
      drivers/s390/scsi/zfcp_erp.c
  13. 5 3
      drivers/s390/scsi/zfcp_ext.h
  14. 17 5
      drivers/s390/scsi/zfcp_fsf.c
  15. 3 1
      drivers/s390/scsi/zfcp_fsf.h
  16. 7 1
      drivers/s390/scsi/zfcp_scsi.c
  17. 1 135
      drivers/scsi/Kconfig
  18. 1 7
      drivers/scsi/Makefile
  19. 4 11
      drivers/scsi/NCR5380.c
  20. 9 1
      drivers/scsi/NCR5380.h
  21. 1 1
      drivers/scsi/aacraid/src.c
  22. 1 1
      drivers/scsi/aic94xx/aic94xx_hwi.c
  23. 10 2
      drivers/scsi/arcmsr/arcmsr_hba.c
  24. 9 6
      drivers/scsi/be2iscsi/be.h
  25. 756 340
      drivers/scsi/be2iscsi/be_cmds.c
  26. 107 35
      drivers/scsi/be2iscsi/be_cmds.h
  27. 191 217
      drivers/scsi/be2iscsi/be_iscsi.c
  28. 10 15
      drivers/scsi/be2iscsi/be_iscsi.h
  29. 1268 1212
      drivers/scsi/be2iscsi/be_main.c
  30. 119 101
      drivers/scsi/be2iscsi/be_main.h
  31. 567 930
      drivers/scsi/be2iscsi/be_mgmt.c
  32. 20 31
      drivers/scsi/be2iscsi/be_mgmt.h
  33. 2 2
      drivers/scsi/bfa/bfa_fcs_lport.c
  34. 2 2
      drivers/scsi/bnx2fc/bnx2fc_els.c
  35. 6 6
      drivers/scsi/bnx2fc/bnx2fc_fcoe.c
  36. 1 1
      drivers/scsi/bnx2fc/bnx2fc_hwi.c
  37. 1 1
      drivers/scsi/bnx2fc/bnx2fc_io.c
  38. 2 3
      drivers/scsi/csiostor/csio_scsi.c
  39. 35 46
      drivers/scsi/cxlflash/main.c
  40. 78 102
      drivers/scsi/cxlflash/superpipe.c
  41. 2 1
      drivers/scsi/cxlflash/superpipe.h
  42. 2 11
      drivers/scsi/cxlflash/vlun.c
  43. 1 0
      drivers/scsi/device_handler/scsi_dh_alua.c
  44. 0 447
      drivers/scsi/dtc.c
  45. 0 42
      drivers/scsi/dtc.h
  46. 0 4
      drivers/scsi/esas2r/esas2r_init.c
  47. 1 1
      drivers/scsi/esas2r/esas2r_main.c
  48. 36 17
      drivers/scsi/fcoe/fcoe_transport.c
  49. 17 1
      drivers/scsi/hisi_sas/hisi_sas.h
  50. 202 36
      drivers/scsi/hisi_sas/hisi_sas_main.c
  51. 13 23
      drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
  52. 94 36
      drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
  53. 8 4
      drivers/scsi/hosts.c
  54. 112 27
      drivers/scsi/hpsa.c
  55. 0 1
      drivers/scsi/hpsa.h
  56. 1 0
      drivers/scsi/hpsa_cmd.h
  57. 11 1
      drivers/scsi/ibmvscsi/ibmvfc.c
  58. 1 0
      drivers/scsi/ibmvscsi/ibmvfc.h
  59. 7 30
      drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
  60. 0 2302
      drivers/scsi/in2000.c
  61. 0 412
      drivers/scsi/in2000.h
  62. 118 16
      drivers/scsi/ipr.c
  63. 6 2
      drivers/scsi/ipr.h
  64. 0 1
      drivers/scsi/libfc/fc_exch.c
  65. 23 3
      drivers/scsi/libfc/fc_rport.c
  66. 41 41
      drivers/scsi/lpfc/lpfc_ct.c
  67. 17 17
      drivers/scsi/lpfc/lpfc_els.c
  68. 2 2
      drivers/scsi/lpfc/lpfc_mbox.c
  69. 2 2
      drivers/scsi/lpfc/lpfc_sli.c
  70. 5 23
      drivers/scsi/megaraid/megaraid_sas_base.c
  71. 9 0
      drivers/scsi/megaraid/megaraid_sas_fusion.h
  72. 107 152
      drivers/scsi/mpt3sas/mpt3sas_base.c
  73. 7 17
      drivers/scsi/mpt3sas/mpt3sas_base.h
  74. 2 5
      drivers/scsi/mpt3sas/mpt3sas_config.c
  75. 19 30
      drivers/scsi/mpt3sas/mpt3sas_ctl.c
  76. 79 90
      drivers/scsi/mpt3sas/mpt3sas_scsih.c
  77. 8 20
      drivers/scsi/mpt3sas/mpt3sas_transport.c
  78. 10 9
      drivers/scsi/mvsas/mv_64xx.c
  79. 21 20
      drivers/scsi/mvsas/mv_94xx.c
  80. 8 8
      drivers/scsi/mvsas/mv_sas.c
  81. 0 565
      drivers/scsi/pas16.c
  82. 0 121
      drivers/scsi/pas16.h
  83. 2 2
      drivers/scsi/pm8001/pm8001_hwi.c
  84. 1 1
      drivers/scsi/pm8001/pm8001_sas.c
  85. 4 4
      drivers/scsi/pmcraid.c
  86. 0 10
      drivers/scsi/qla2xxx/qla_def.h
  87. 1 1
      drivers/scsi/qla2xxx/qla_isr.c
  88. 9 9
      drivers/scsi/qla2xxx/qla_os.c
  89. 1 1
      drivers/scsi/qla4xxx/ql4_nx.c
  90. 27 27
      drivers/scsi/scsi_debug.c
  91. 2 0
      drivers/scsi/scsi_priv.h
  92. 0 2
      drivers/scsi/scsi_scan.c
  93. 6 5
      drivers/scsi/sd.c
  94. 0 30
      drivers/scsi/sd.h
  95. 5 5
      drivers/scsi/sd_dif.c
  96. 5 15
      drivers/scsi/sg.c
  97. 54 0
      drivers/scsi/smartpqi/Kconfig
  98. 3 0
      drivers/scsi/smartpqi/Makefile
  99. 1136 0
      drivers/scsi/smartpqi/smartpqi.h
  100. 6303 0
      drivers/scsi/smartpqi/smartpqi_init.c

+ 39 - 5
Documentation/powerpc/cxlflash.txt

@@ -121,7 +121,7 @@ Block library API
     below.
     below.
 
 
     The block library can be found on GitHub:
     The block library can be found on GitHub:
-    http://www.github.com/mikehollinger/ibmcapikv
+    http://github.com/open-power/capiflash
 
 
 
 
 CXL Flash Driver IOCTLs
 CXL Flash Driver IOCTLs
@@ -171,11 +171,30 @@ DK_CXLFLASH_ATTACH
           destroyed, the tokens are to be considered stale and subsequent
           destroyed, the tokens are to be considered stale and subsequent
           usage will result in errors.
           usage will result in errors.
 
 
+	- A valid adapter file descriptor (fd2 >= 0) is only returned on
+	  the initial attach for a context. Subsequent attaches to an
+	  existing context (DK_CXLFLASH_ATTACH_REUSE_CONTEXT flag present)
+	  do not provide the adapter file descriptor as it was previously
+	  made known to the application.
+
         - When a context is no longer needed, the user shall detach from
         - When a context is no longer needed, the user shall detach from
-          the context via the DK_CXLFLASH_DETACH ioctl.
+          the context via the DK_CXLFLASH_DETACH ioctl. When this ioctl
+	  returns with a valid adapter file descriptor and the return flag
+	  DK_CXLFLASH_APP_CLOSE_ADAP_FD is present, the application _must_
+	  close the adapter file descriptor following a successful detach.
+
+	- When this ioctl returns with a valid fd2 and the return flag
+	  DK_CXLFLASH_APP_CLOSE_ADAP_FD is present, the application _must_
+	  close fd2 in the following circumstances:
+
+		+ Following a successful detach of the last user of the context
+		+ Following a successful recovery on the context's original fd2
+		+ In the child process of a fork(), following a clone ioctl,
+		  on the fd2 associated with the source context
 
 
-        - A close on fd2 will invalidate the tokens. This operation is not
-          required by the user.
+        - At any time, a close on fd2 will invalidate the tokens. Applications
+	  should exercise caution to only close fd2 when appropriate (outlined
+	  in the previous bullet) to avoid premature loss of I/O.
 
 
 DK_CXLFLASH_USER_DIRECT
 DK_CXLFLASH_USER_DIRECT
 -----------------------
 -----------------------
@@ -254,6 +273,10 @@ DK_CXLFLASH_DETACH
     success, all "tokens" which had been provided to the user from the
     success, all "tokens" which had been provided to the user from the
     DK_CXLFLASH_ATTACH onward are no longer valid.
     DK_CXLFLASH_ATTACH onward are no longer valid.
 
 
+    When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
+    attach, the application _must_ close the fd2 associated with the context
+    following the detach of the final user of the context.
+
 DK_CXLFLASH_VLUN_CLONE
 DK_CXLFLASH_VLUN_CLONE
 ----------------------
 ----------------------
     This ioctl is responsible for cloning a previously created
     This ioctl is responsible for cloning a previously created
@@ -261,7 +284,7 @@ DK_CXLFLASH_VLUN_CLONE
     support maintaining user space access to storage after a process
     support maintaining user space access to storage after a process
     forks. Upon success, the child process (which invoked the ioctl)
     forks. Upon success, the child process (which invoked the ioctl)
     will have access to the same LUNs via the same resource handle(s)
     will have access to the same LUNs via the same resource handle(s)
-    and fd2 as the parent, but under a different context.
+    as the parent, but under a different context.
 
 
     Context sharing across processes is not supported with CXL and
     Context sharing across processes is not supported with CXL and
     therefore each fork must be met with establishing a new context
     therefore each fork must be met with establishing a new context
@@ -275,6 +298,12 @@ DK_CXLFLASH_VLUN_CLONE
     translation tables are copied from the parent context to the child's
     translation tables are copied from the parent context to the child's
     and then synced with the AFU.
     and then synced with the AFU.
 
 
+    When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
+    attach, the application _must_ close the fd2 associated with the source
+    context (still resident/accessible in the parent process) following the
+    clone. This is to avoid a stale entry in the file descriptor table of the
+    child process.
+
 DK_CXLFLASH_VERIFY
 DK_CXLFLASH_VERIFY
 ------------------
 ------------------
     This ioctl is used to detect various changes such as the capacity of
     This ioctl is used to detect various changes such as the capacity of
@@ -309,6 +338,11 @@ DK_CXLFLASH_RECOVER_AFU
     at which time the context/resources they held will be freed as part of
     at which time the context/resources they held will be freed as part of
     the release fop.
     the release fop.
 
 
+    When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
+    attach, the application _must_ unmap and close the fd2 associated with the
+    original context following this ioctl returning success and indicating that
+    the context was recovered (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET).
+
 DK_CXLFLASH_MANAGE_LUN
 DK_CXLFLASH_MANAGE_LUN
 ----------------------
 ----------------------
     This ioctl is used to switch a LUN from a mode where it is available
     This ioctl is used to switch a LUN from a mode where it is available

+ 0 - 2
Documentation/scsi/00-INDEX

@@ -64,8 +64,6 @@ hpsa.txt
 	- HP Smart Array Controller SCSI driver.
 	- HP Smart Array Controller SCSI driver.
 hptiop.txt
 hptiop.txt
 	- HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
 	- HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
-in2000.txt
-	- info on in2000 driver
 libsas.txt
 libsas.txt
 	- Serial Attached SCSI management layer.
 	- Serial Attached SCSI management layer.
 link_power_management_policy.txt
 link_power_management_policy.txt

+ 0 - 43
Documentation/scsi/dtc3x80.txt

@@ -1,43 +0,0 @@
-README file for the Linux DTC3180/3280 scsi driver.
-by Ray Van Tassle (rayvt@comm.mot.com)  March 1996
-Based on the generic & core NCR5380 code by Drew Eckhard
-
-SCSI device driver for the DTC 3180/3280.
-Data Technology Corp---a division of Qume.
-
-The 3280 has a standard floppy interface.
-
-The 3180 does not.  Otherwise, they are identical.
-
-The DTC3x80 does not support DMA but it does have Pseudo-DMA which is
-supported by the driver.
-
-Its DTC406 scsi chip is supposedly compatible with the NCR 53C400.
-It is memory mapped, uses an IRQ, but no dma or io-port.  There is
-internal DMA, between SCSI bus and an on-chip 128-byte buffer.  Double
-buffering is done automagically by the chip.  Data is transferred
-between the on-chip buffer and CPU/RAM via memory moves.
-
-The driver detects the possible memory addresses (jumper selectable):
-	CC00, DC00, C800, and D800
-The possible IRQ's (jumper selectable) are:
-	IRQ 10, 11, 12, 15
-Parity is supported by the chip, but not by this driver.
-Information can be obtained from /proc/scsi/dtc3c80/N.
-
-Note on interrupts:
-
-The documentation says that it can be set to interrupt whenever the
-on-chip buffer needs CPU attention.  I couldn't get this to work.  So
-the driver polls for data-ready in the pseudo-DMA transfer routine.
-The interrupt support routines in the NCR3280.c core modules handle
-scsi disconnect/reconnect, and this (mostly) works.  However.....  I
-have tested it with 4 totally different hard drives (both SCSI-1 and
-SCSI-2), and one CDROM drive.  Interrupts works great for all but one
-specific hard drive.  For this one, the driver will eventually hang in
-the transfer state.  I have tested with: "dd bs=4k count=2k
-of=/dev/null if=/dev/sdb".  It reads ok for a while, then hangs.
-After beating my head against this for a couple of weeks, getting
-nowhere, I give up.  So.....This driver does NOT use interrupts, even
-if you have the card jumpered to an IRQ.  Probably nobody will ever
-care.

+ 0 - 202
Documentation/scsi/in2000.txt

@@ -1,202 +0,0 @@
-
-UPDATE NEWS: version 1.33 - 26 Aug 98
-
-   Interrupt management in this driver has become, over
-   time, increasingly odd and difficult to explain - this
-   has been mostly due to my own mental inadequacies. In
-   recent kernels, it has failed to function at all when
-   compiled for SMP. I've fixed that problem, and after
-   taking a fresh look at interrupts in general, greatly
-   reduced the number of places where they're fiddled
-   with. Done some heavy testing and it looks very good.
-   The driver now makes use of the __initfunc() and
-   __initdata macros to save about 4k of kernel memory.
-   Once again, the same code works for both 2.0.xx and
-   2.1.xx kernels.
-
-UPDATE NEWS: version 1.32 - 28 Mar 98
-
-   Removed the check for legal IN2000 hardware versions:
-   It appears that the driver works fine with serial
-   EPROMs (the 8-pin chip that defines hardware rev) as
-   old as 2.1, so we'll assume that all cards are OK.
-
-UPDATE NEWS: version 1.31 - 6 Jul 97
-
-   Fixed a bug that caused incorrect SCSI status bytes to be
-   returned from commands sent to LUNs greater than 0. This
-   means that CDROM changers work now! Fixed a bug in the
-   handling of command-line arguments when loaded as a module.
-   Also put all the header data in in2000.h where it belongs.
-   There are no longer any differences between this driver in
-   the 2.1.xx source tree and the 2.0.xx tree, as of 2.0.31
-   and 2.1.45 (or is it .46?) - this makes things much easier
-   for me...
-
-UPDATE NEWS: version 1.30 - 14 Oct 96
-
-   Fixed a bug in the code that sets the transfer direction
-   bit (DESTID_DPD in the WD_DESTINATION_ID register). There
-   are quite a few SCSI commands that do a write-to-device;
-   now we deal with all of them correctly. Thanks to Joerg
-   Dorchain for catching this one.
-
-UPDATE NEWS: version 1.29 - 24 Sep 96
-
-   The memory-mapped hardware on the card is now accessed via
-   the 'readb()' and 'readl()' macros - required by the new
-   memory management scheme in the 2.1.x kernel series.
-   As suggested by Andries Brouwer, 'bios_param()' no longer
-   forces an artificial 1023 track limit on drives. Also
-   removed some kludge-code left over from struggles with
-   older (buggy) compilers.
-
-UPDATE NEWS: version 1.28 - 07 May 96
-
-   Tightened up the "interrupts enabled/disabled" discipline
-   in 'in2000_queuecommand()' and maybe 1 or 2 other places.
-   I _think_ it may have been a little too lax, causing an
-   occasional crash during full moon. A fully functional
-   /proc interface is now in place - if you want to play
-   with it, start by doing 'cat /proc/scsi/in2000/0'. You
-   can also use it to change a few run-time parameters on
-   the fly, but it's mostly for debugging. The curious
-   should take a good look at 'in2000_proc_info()' in the
-   in2000.c file to get an understanding of what it's all
-   about; I figure that people who are really into it will
-   want to add features suited to their own needs...
-   Also, sync is now DISABLED by default.
-
-UPDATE NEWS: version 1.27 - 10 Apr 96
-
-   Fixed a well-hidden bug in the adaptive-disconnect code
-   that would show up every now and then during extreme
-   heavy loads involving 2 or more simultaneously active
-   devices. Thanks to Joe Mack for keeping my nose to the
-   grindstone on this one.
-
-UPDATE NEWS: version 1.26 - 07 Mar 96
-
-   1.25 had a nasty bug that bit people with swap partitions
-   and tape drives. Also, in my attempt to guess my way
-   through Intel assembly language, I made an error in the
-   inline code for IO writes. Made a few other changes and
-   repairs - this version (fingers crossed) should work well.
-
-UPDATE NEWS: version 1.25 - 05 Mar 96
-
-   Kernel 1.3.70 interrupt mods added; old kernels still OK.
-   Big help from Bill Earnest and David Willmore on speed
-   testing and optimizing: I think there's a real improvement
-   in this area.
-   New! User-friendly command-line interface for LILO and
-   module loading - the old method is gone, so you'll need
-   to read the comments for 'setup_strings' near the top
-   of in2000.c. For people with CDROM's or other devices
-   that have a tough time with sync negotiation, you can
-   now selectively disable sync on individual devices -
-   search for the 'nosync' keyword in the command-line
-   comments. Some of you disable the BIOS on the card, which
-   caused the auto-detect function to fail; there is now a
-   command-line option to force detection of a ROM-less card.
-
-UPDATE NEWS: version 1.24a - 24 Feb 96
-
-   There was a bug in the synchronous transfer code. Only
-   a few people downloaded before I caught it - could have
-   been worse.
-
-UPDATE NEWS: version 1.24 - 23 Feb 96
-
-   Lots of good changes. Advice from Bill Earnest resulted
-   in much better detection of cards, more efficient usage
-   of the fifo, and (hopefully) faster data transfers. The
-   jury is still out on speed - I hope it's improved some.
-   One nifty new feature is a cool way of doing disconnect/
-   reselect. The driver defaults to what I'm calling
-   'adaptive disconnect' - meaning that each command is
-   evaluated individually as to whether or not it should be
-   run with the option to disconnect/reselect (if the device
-   chooses), or as a "SCSI-bus-hog". When several devices
-   are operating simultaneously, disconnects are usually an
-   advantage. In a single device system, or if only 1 device
-   is being accessed, transfers usually go faster if disconnects
-   are not allowed.
-
-
-
-The default arguments (you get these when you don't give an 'in2000'
-command-line argument, or you give a blank argument) will cause
-the driver to do adaptive disconnect, synchronous transfers, and a
-minimum of debug messages. If you want to fool with the options,
-search for 'setup_strings' near the top of the in2000.c file and
-check the 'hostdata->args' section in in2000.h - but be warned! Not
-everything is working yet (some things will never work, probably).
-I believe that disabling disconnects (DIS_NEVER) will allow you
-to choose a LEVEL2 value higher than 'L2_BASIC', but I haven't
-spent a lot of time testing this. You might try 'ENABLE_CLUSTERING'
-to see what happens: my tests showed little difference either way.
-There's also a define called 'DEFAULT_SX_PER'; this sets the data
-transfer speed for the asynchronous mode. I've put it at 500 ns
-despite the fact that the card could handle settings of 376 or
-252, because higher speeds may be a problem with poor quality
-cables or improper termination; 500 ns is a compromise. You can
-choose your own default through the command-line with the
-'period' keyword.
-
-
-------------------------------------------------
-***********  DIP switch settings  **************
-------------------------------------------------
-
-   sw1-1 sw1-2    BIOS address (hex)
-   -----------------------------------------
-    off   off     C8000 - CBFF0
-    on    off     D8000 - DBFF0
-    off   on      D0000 - D3FF0
-    on    on      BIOS disabled
-
-   sw1-3 sw1-4    IO port address (hex)
-   ------------------------------------
-    off   off     220 - 22F
-    on    off     200 - 20F
-    off   on      110 - 11F
-    on    on      100 - 10F
-
-   sw1-5 sw1-6 sw1-7    Interrupt
-   ------------------------------
-    off   off   off     15
-    off   on    off     14
-    off   off   on      11
-    off   on    on      10
-    on    -     -       disabled
-
-   sw1-8 function depends on BIOS version. In earlier versions this
-   controlled synchronous data transfer support for MSDOS:
-      off = disabled
-      on  = enabled
-   In later ROMs (starting with 01.3 in April 1994) sw1-8 controls
-   the "greater than 2 disk drive" feature that first appeared in
-   MSDOS 5.0 (ignored by Linux):
-      off = 2 drives maximum
-      on  = 7 drives maximum
-
-   sw1-9    Floppy controller
-   --------------------------
-    off     disabled
-    on      enabled
-
-------------------------------------------------
-
-   I should mention that Drew Eckhardt's 'Generic NCR5380' sources
-   were my main inspiration, with lots of reference to the IN2000
-   driver currently distributed in the kernel source. I also owe
-   much to a driver written by Hamish Macdonald for Linux-m68k(!).
-   And to Eric Wright for being an ALPHA guinea pig. And to Bill
-   Earnest for 2 tons of great input and information. And to David
-   Willmore for extensive 'bonnie' testing. And to Joe Mack for
-   continual testing and feedback.
-
-
-            John Shifflett    jshiffle@netcom.com
-

+ 0 - 18
Documentation/scsi/scsi-parameters.txt

@@ -34,9 +34,6 @@ parameters may be changed at runtime by the command
 			See drivers/scsi/BusLogic.c, comment before function
 			See drivers/scsi/BusLogic.c, comment before function
 			BusLogic_ParseDriverOptions().
 			BusLogic_ParseDriverOptions().
 
 
-	dtc3181e=	[HW,SCSI]
-			See Documentation/scsi/g_NCR5380.txt.
-
 	eata=		[HW,SCSI]
 	eata=		[HW,SCSI]
 
 
 	fdomain=	[HW,SCSI]
 	fdomain=	[HW,SCSI]
@@ -47,9 +44,6 @@ parameters may be changed at runtime by the command
 
 
 	gvp11=		[HW,SCSI]
 	gvp11=		[HW,SCSI]
 
 
-	in2000=		[HW,SCSI]
-			See header of drivers/scsi/in2000.c.
-
 	ips=		[HW,SCSI] Adaptec / IBM ServeRAID controller
 	ips=		[HW,SCSI] Adaptec / IBM ServeRAID controller
 			See header of drivers/scsi/ips.c.
 			See header of drivers/scsi/ips.c.
 
 
@@ -83,9 +77,6 @@ parameters may be changed at runtime by the command
 			Format: <buffer_size>,<write_threshold>
 			Format: <buffer_size>,<write_threshold>
 			See also Documentation/scsi/st.txt.
 			See also Documentation/scsi/st.txt.
 
 
-	pas16=		[HW,SCSI]
-			See header of drivers/scsi/pas16.c.
-
 	scsi_debug_*=	[SCSI]
 	scsi_debug_*=	[SCSI]
 			See drivers/scsi/scsi_debug.c.
 			See drivers/scsi/scsi_debug.c.
 
 
@@ -119,18 +110,9 @@ parameters may be changed at runtime by the command
 	sym53c416=	[HW,SCSI]
 	sym53c416=	[HW,SCSI]
 			See header of drivers/scsi/sym53c416.c.
 			See header of drivers/scsi/sym53c416.c.
 
 
-	t128=		[HW,SCSI]
-			See header of drivers/scsi/t128.c.
-
 	tmscsim=	[HW,SCSI]
 	tmscsim=	[HW,SCSI]
 			See comment before function dc390_setup() in
 			See comment before function dc390_setup() in
 			drivers/scsi/tmscsim.c.
 			drivers/scsi/tmscsim.c.
 
 
-	u14-34f=	[HW,SCSI] UltraStor 14F/34F SCSI host adapter
-			See header of drivers/scsi/u14-34f.c.
-
 	wd33c93=	[HW,SCSI]
 	wd33c93=	[HW,SCSI]
 			See header of drivers/scsi/wd33c93.c.
 			See header of drivers/scsi/wd33c93.c.
-
-	wd7000=		[HW,SCSI]
-			See header of drivers/scsi/wd7000.c.

+ 80 - 0
Documentation/scsi/smartpqi.txt

@@ -0,0 +1,80 @@
+
+SMARTPQI - Microsemi Smart PQI Driver
+-----------------------------------------
+
+This file describes the smartpqi SCSI driver for Microsemi
+(http://www.microsemi.com) PQI controllers. The smartpqi driver
+is the next generation SCSI driver for Microsemi Corp. The smartpqi
+driver is the first SCSI driver to implement the PQI queuing model.
+
+The smartpqi driver will replace the aacraid driver for Adaptec Series 9
+controllers. Customers running an older kernel (Pre-4.9) using an Adaptec
+Series 9 controller will have to configure the smartpqi driver or their
+volumes will not be added to the OS.
+
+For Microsemi smartpqi controller support, enable the smartpqi driver
+when configuring the kernel.
+
+For more information on the PQI Queuing Interface, please see:
+http://www.t10.org/drafts.htm
+http://www.t10.org/members/w_pqi2.htm
+
+Supported devices:
+------------------
+<Controller names to be added as they become publically available.>
+
+smartpqi specific entries in /sys
+-----------------------------
+
+  smartpqi host attributes:
+  -------------------------
+  /sys/class/scsi_host/host*/rescan
+  /sys/class/scsi_host/host*/version
+
+  The host rescan attribute is a write only attribute. Writing to this
+  attribute will trigger the driver to scan for new, changed, or removed
+  devices and notify the SCSI mid-layer of any changes detected.
+
+  The version attribute is read-only and will return the driver version
+  and the controller firmware version.
+  For example:
+              driver: 0.9.13-370
+              firmware: 0.01-522
+
+  smartpqi sas device attributes
+  ------------------------------
+  HBA devices are added to the SAS transport layer. These attributes are
+  automatically added by the SAS transport layer.
+
+  /sys/class/sas_device/end_device-X:X/sas_address
+  /sys/class/sas_device/end_device-X:X/enclosure_identifier
+  /sys/class/sas_device/end_device-X:X/scsi_target_id
+
+smartpqi specific ioctls:
+-------------------------
+
+  For compatibility with applications written for the cciss protocol.
+
+  CCISS_DEREGDISK
+  CCISS_REGNEWDISK
+  CCISS_REGNEWD
+
+  The above three ioctls all do exactly the same thing, which is to cause the driver
+  to rescan for new devices.  This does exactly the same thing as writing to the
+  smartpqi specific host "rescan" attribute.
+
+  CCISS_GETPCIINFO
+
+	Returns PCI domain, bus, device and function and "board ID" (PCI subsystem ID).
+
+  CCISS_GETDRIVVER
+
+	Returns driver version in three bytes encoded as:
+	(DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | (DRIVER_RELEASE << 16) | DRIVER_REVISION;
+
+  CCISS_PASSTHRU
+
+	Allows "BMIC" and "CISS" commands to be passed through to the Smart Storage Array.
+	These are used extensively by the SSA Array Configuration Utility, SNMP storage
+	agents, etc.
+

+ 17 - 21
MAINTAINERS

@@ -7973,6 +7973,18 @@ W:	http://www.melexis.com
 S:	Supported
 S:	Supported
 F:	drivers/iio/temperature/mlx90614.c
 F:	drivers/iio/temperature/mlx90614.c
 
 
+MICROSEMI SMART ARRAY SMARTPQI DRIVER (smartpqi)
+M:	Don Brace <don.brace@microsemi.com>
+L:	esc.storagedev@microsemi.com
+L:	linux-scsi@vger.kernel.org
+S:	Supported
+F:	drivers/scsi/smartpqi/smartpqi*.[ch]
+F:	drivers/scsi/smartpqi/Kconfig
+F:	drivers/scsi/smartpqi/Makefile
+F:	include/linux/cciss*.h
+F:	include/uapi/linux/cciss*.h
+F:	Documentation/scsi/smartpqi.txt
+
 MN88472 MEDIA DRIVER
 MN88472 MEDIA DRIVER
 M:	Antti Palosaari <crope@iki.fi>
 M:	Antti Palosaari <crope@iki.fi>
 L:	linux-media@vger.kernel.org
 L:	linux-media@vger.kernel.org
@@ -8185,20 +8197,16 @@ M:	Michael Schmitz <schmitzmic@gmail.com>
 L:	linux-scsi@vger.kernel.org
 L:	linux-scsi@vger.kernel.org
 S:	Maintained
 S:	Maintained
 F:	Documentation/scsi/g_NCR5380.txt
 F:	Documentation/scsi/g_NCR5380.txt
-F:	Documentation/scsi/dtc3x80.txt
 F:	drivers/scsi/NCR5380.*
 F:	drivers/scsi/NCR5380.*
 F:	drivers/scsi/arm/cumana_1.c
 F:	drivers/scsi/arm/cumana_1.c
 F:	drivers/scsi/arm/oak.c
 F:	drivers/scsi/arm/oak.c
 F:	drivers/scsi/atari_scsi.*
 F:	drivers/scsi/atari_scsi.*
 F:	drivers/scsi/dmx3191d.c
 F:	drivers/scsi/dmx3191d.c
-F:	drivers/scsi/dtc.*
 F:	drivers/scsi/g_NCR5380.*
 F:	drivers/scsi/g_NCR5380.*
 F:	drivers/scsi/g_NCR5380_mmio.c
 F:	drivers/scsi/g_NCR5380_mmio.c
 F:	drivers/scsi/mac_scsi.*
 F:	drivers/scsi/mac_scsi.*
-F:	drivers/scsi/pas16.*
 F:	drivers/scsi/sun3_scsi.*
 F:	drivers/scsi/sun3_scsi.*
 F:	drivers/scsi/sun3_scsi_vme.c
 F:	drivers/scsi/sun3_scsi_vme.c
-F:	drivers/scsi/t128.*
 
 
 NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
 NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
 M:	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
 M:	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
@@ -10740,12 +10748,12 @@ S:	Maintained
 F:	drivers/misc/phantom.c
 F:	drivers/misc/phantom.c
 F:	include/uapi/linux/phantom.h
 F:	include/uapi/linux/phantom.h
 
 
-SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
-M:	Jayamohan Kallickal <jayamohan.kallickal@avagotech.com>
-M:	Ketan Mukadam <ketan.mukadam@avagotech.com>
-M:	John Soni Jose <sony.john@avagotech.com>
+Emulex 10Gbps iSCSI - OneConnect DRIVER
+M:	Subbu Seetharaman <subbu.seetharaman@broadcom.com>
+M:	Ketan Mukadam <ketan.mukadam@broadcom.com>
+M:	Jitendra Bhivare <jitendra.bhivare@broadcom.com>
 L:	linux-scsi@vger.kernel.org
 L:	linux-scsi@vger.kernel.org
-W:	http://www.avagotech.com
+W:	http://www.broadcom.com
 S:	Supported
 S:	Supported
 F:	drivers/scsi/be2iscsi/
 F:	drivers/scsi/be2iscsi/
 
 
@@ -12143,12 +12151,6 @@ S:	Maintained
 F:	drivers/tc/
 F:	drivers/tc/
 F:	include/linux/tc.h
 F:	include/linux/tc.h
 
 
-U14-34F SCSI DRIVER
-M:	Dario Ballabio <ballabio_dario@emc.com>
-L:	linux-scsi@vger.kernel.org
-S:	Maintained
-F:	drivers/scsi/u14-34f.c
-
 UBI FILE SYSTEM (UBIFS)
 UBI FILE SYSTEM (UBIFS)
 M:	Richard Weinberger <richard@nod.at>
 M:	Richard Weinberger <richard@nod.at>
 M:	Artem Bityutskiy <dedekind1@gmail.com>
 M:	Artem Bityutskiy <dedekind1@gmail.com>
@@ -12876,12 +12878,6 @@ F:	drivers/watchdog/
 F:	include/linux/watchdog.h
 F:	include/linux/watchdog.h
 F:	include/uapi/linux/watchdog.h
 F:	include/uapi/linux/watchdog.h
 
 
-WD7000 SCSI DRIVER
-M:	Miroslav Zagorac <zaga@fly.cc.fer.hr>
-L:	linux-scsi@vger.kernel.org
-S:	Maintained
-F:	drivers/scsi/wd7000.c
-
 WIIMOTE HID DRIVER
 WIIMOTE HID DRIVER
 M:	David Herrmann <dh.herrmann@googlemail.com>
 M:	David Herrmann <dh.herrmann@googlemail.com>
 L:	linux-input@vger.kernel.org
 L:	linux-input@vger.kernel.org

+ 4 - 3
drivers/message/fusion/mptbase.c

@@ -1865,8 +1865,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
 
 
 	snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
 	snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
 		 "mpt_poll_%d", ioc->id);
 		 "mpt_poll_%d", ioc->id);
-	ioc->reset_work_q =
-		create_singlethread_workqueue(ioc->reset_work_q_name);
+	ioc->reset_work_q = alloc_workqueue(ioc->reset_work_q_name,
+					    WQ_MEM_RECLAIM, 0);
 	if (!ioc->reset_work_q) {
 	if (!ioc->reset_work_q) {
 		printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
 		printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
 		    ioc->name);
 		    ioc->name);
@@ -1992,7 +1992,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
 	INIT_LIST_HEAD(&ioc->fw_event_list);
 	INIT_LIST_HEAD(&ioc->fw_event_list);
 	spin_lock_init(&ioc->fw_event_lock);
 	spin_lock_init(&ioc->fw_event_lock);
 	snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
 	snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
-	ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
+	ioc->fw_event_q = alloc_workqueue(ioc->fw_event_q_name,
+					  WQ_MEM_RECLAIM, 0);
 	if (!ioc->fw_event_q) {
 	if (!ioc->fw_event_q) {
 		printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
 		printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
 		    ioc->name);
 		    ioc->name);

+ 5 - 2
drivers/message/fusion/mptfc.c

@@ -1324,9 +1324,12 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 	snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name),
 	snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name),
 		 "mptfc_wq_%d", sh->host_no);
 		 "mptfc_wq_%d", sh->host_no);
 	ioc->fc_rescan_work_q =
 	ioc->fc_rescan_work_q =
-		create_singlethread_workqueue(ioc->fc_rescan_work_q_name);
-	if (!ioc->fc_rescan_work_q)
+		alloc_ordered_workqueue(ioc->fc_rescan_work_q_name,
+					WQ_MEM_RECLAIM);
+	if (!ioc->fc_rescan_work_q) {
+		error = -ENOMEM;
 		goto out_mptfc_probe;
 		goto out_mptfc_probe;
+	}
 
 
 	/*
 	/*
 	 *  Pre-fetch FC port WWN and stuff...
 	 *  Pre-fetch FC port WWN and stuff...

+ 144 - 18
drivers/s390/scsi/zfcp_dbf.c

@@ -3,7 +3,7 @@
  *
  *
  * Debug traces for zfcp.
  * Debug traces for zfcp.
  *
  *
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2016
  */
  */
 
 
 #define KMSG_COMPONENT "zfcp"
 #define KMSG_COMPONENT "zfcp"
@@ -65,7 +65,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
  * @tag: tag indicating which kind of unsolicited status has been received
  * @tag: tag indicating which kind of unsolicited status has been received
  * @req: request for which a response was received
  * @req: request for which a response was received
  */
  */
-void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
+void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
 {
 {
 	struct zfcp_dbf *dbf = req->adapter->dbf;
 	struct zfcp_dbf *dbf = req->adapter->dbf;
 	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
 	struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
@@ -85,6 +85,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
 	rec->u.res.req_issued = req->issued;
 	rec->u.res.req_issued = req->issued;
 	rec->u.res.prot_status = q_pref->prot_status;
 	rec->u.res.prot_status = q_pref->prot_status;
 	rec->u.res.fsf_status = q_head->fsf_status;
 	rec->u.res.fsf_status = q_head->fsf_status;
+	rec->u.res.port_handle = q_head->port_handle;
+	rec->u.res.lun_handle = q_head->lun_handle;
 
 
 	memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
 	memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
 	       FSF_PROT_STATUS_QUAL_SIZE);
 	       FSF_PROT_STATUS_QUAL_SIZE);
@@ -97,7 +99,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
 				  rec->pl_len, "fsf_res", req->req_id);
 				  rec->pl_len, "fsf_res", req->req_id);
 	}
 	}
 
 
-	debug_event(dbf->hba, 1, rec, sizeof(*rec));
+	debug_event(dbf->hba, level, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
 	spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
 }
 
 
@@ -241,7 +243,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
 	if (sdev) {
 	if (sdev) {
 		rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
 		rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
 		rec->lun = zfcp_scsi_dev_lun(sdev);
 		rec->lun = zfcp_scsi_dev_lun(sdev);
-	}
+	} else
+		rec->lun = ZFCP_DBF_INVALID_LUN;
 }
 }
 
 
 /**
 /**
@@ -320,13 +323,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 }
 
 
+/**
+ * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
+ * @tag: identifier for event
+ * @wka_port: well known address port
+ * @req_id: request ID to correlate with potential HBA trace record
+ */
+void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
+			  u64 req_id)
+{
+	struct zfcp_dbf *dbf = wka_port->adapter->dbf;
+	struct zfcp_dbf_rec *rec = &dbf->rec_buf;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dbf->rec_lock, flags);
+	memset(rec, 0, sizeof(*rec));
+
+	rec->id = ZFCP_DBF_REC_RUN;
+	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->port_status = wka_port->status;
+	rec->d_id = wka_port->d_id;
+	rec->lun = ZFCP_DBF_INVALID_LUN;
+
+	rec->u.run.fsf_req_id = req_id;
+	rec->u.run.rec_status = ~0;
+	rec->u.run.rec_step = ~0;
+	rec->u.run.rec_action = ~0;
+	rec->u.run.rec_count = ~0;
+
+	debug_event(dbf->rec, 1, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->rec_lock, flags);
+}
+
 static inline
 static inline
-void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
-		  u64 req_id, u32 d_id)
+void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
+		  char *paytag, struct scatterlist *sg, u8 id, u16 len,
+		  u64 req_id, u32 d_id, u16 cap_len)
 {
 {
 	struct zfcp_dbf_san *rec = &dbf->san_buf;
 	struct zfcp_dbf_san *rec = &dbf->san_buf;
 	u16 rec_len;
 	u16 rec_len;
 	unsigned long flags;
 	unsigned long flags;
+	struct zfcp_dbf_pay *payload = &dbf->pay_buf;
+	u16 pay_sum = 0;
 
 
 	spin_lock_irqsave(&dbf->san_lock, flags);
 	spin_lock_irqsave(&dbf->san_lock, flags);
 	memset(rec, 0, sizeof(*rec));
 	memset(rec, 0, sizeof(*rec));
@@ -334,10 +372,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
 	rec->id = id;
 	rec->id = id;
 	rec->fsf_req_id = req_id;
 	rec->fsf_req_id = req_id;
 	rec->d_id = d_id;
 	rec->d_id = d_id;
-	rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
-	memcpy(rec->payload, data, rec_len);
 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
 	memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+	rec->pl_len = len; /* full length even if we cap pay below */
+	if (!sg)
+		goto out;
+	rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
+	memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
+	if (len <= rec_len)
+		goto out; /* skip pay record if full content in rec->payload */
+
+	/* if (len > rec_len):
+	 * dump data up to cap_len ignoring small duplicate in rec->payload
+	 */
+	spin_lock_irqsave(&dbf->pay_lock, flags);
+	memset(payload, 0, sizeof(*payload));
+	memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
+	payload->fsf_req_id = req_id;
+	payload->counter = 0;
+	for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
+		u16 pay_len, offset = 0;
+
+		while (offset < sg->length && pay_sum < cap_len) {
+			pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
+				      (u16)(sg->length - offset));
+			/* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
+			memcpy(payload->data, sg_virt(sg) + offset, pay_len);
+			debug_event(dbf->pay, 1, payload,
+				    zfcp_dbf_plen(pay_len));
+			payload->counter++;
+			offset += pay_len;
+			pay_sum += pay_len;
+		}
+	}
+	spin_unlock(&dbf->pay_lock);
 
 
+out:
 	debug_event(dbf->san, 1, rec, sizeof(*rec));
 	debug_event(dbf->san, 1, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->san_lock, flags);
 	spin_unlock_irqrestore(&dbf->san_lock, flags);
 }
 }
@@ -354,9 +423,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
 	u16 length;
 	u16 length;
 
 
-	length = (u16)(ct_els->req->length + FC_CT_HDR_LEN);
-	zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length,
-		     fsf->req_id, d_id);
+	length = (u16)zfcp_qdio_real_bytes(ct_els->req);
+	zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
+		     length, fsf->req_id, d_id, length);
+}
+
+static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
+					      struct zfcp_fsf_req *fsf,
+					      u16 len)
+{
+	struct zfcp_fsf_ct_els *ct_els = fsf->data;
+	struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
+	struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
+	struct scatterlist *resp_entry = ct_els->resp;
+	struct fc_gpn_ft_resp *acc;
+	int max_entries, x, last = 0;
+
+	if (!(memcmp(tag, "fsscth2", 7) == 0
+	      && ct_els->d_id == FC_FID_DIR_SERV
+	      && reqh->ct_rev == FC_CT_REV
+	      && reqh->ct_in_id[0] == 0
+	      && reqh->ct_in_id[1] == 0
+	      && reqh->ct_in_id[2] == 0
+	      && reqh->ct_fs_type == FC_FST_DIR
+	      && reqh->ct_fs_subtype == FC_NS_SUBTYPE
+	      && reqh->ct_options == 0
+	      && reqh->_ct_resvd1 == 0
+	      && reqh->ct_cmd == FC_NS_GPN_FT
+	      /* reqh->ct_mr_size can vary so do not match but read below */
+	      && reqh->_ct_resvd2 == 0
+	      && reqh->ct_reason == 0
+	      && reqh->ct_explan == 0
+	      && reqh->ct_vendor == 0
+	      && reqn->fn_resvd == 0
+	      && reqn->fn_domain_id_scope == 0
+	      && reqn->fn_area_id_scope == 0
+	      && reqn->fn_fc4_type == FC_TYPE_FCP))
+		return len; /* not GPN_FT response so do not cap */
+
+	acc = sg_virt(resp_entry);
+	max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+		+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
+		     * to account for header as 1st pseudo "entry" */;
+
+	/* the basic CT_IU preamble is the same size as one entry in the GPN_FT
+	 * response, allowing us to skip special handling for it - just skip it
+	 */
+	for (x = 1; x < max_entries && !last; x++) {
+		if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
+			acc++;
+		else
+			acc = sg_virt(++resp_entry);
+
+		last = acc->fp_flags & FC_NS_FID_LAST;
+	}
+	len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
+	return len; /* cap after last entry */
 }
 }
 
 
 /**
 /**
@@ -370,9 +492,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
 	struct zfcp_fsf_ct_els *ct_els = fsf->data;
 	u16 length;
 	u16 length;
 
 
-	length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN);
-	zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length,
-		     fsf->req_id, 0);
+	length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
+	zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
+		     length, fsf->req_id, ct_els->d_id,
+		     zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
 }
 }
 
 
 /**
 /**
@@ -386,11 +509,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
 	struct fsf_status_read_buffer *srb =
 	struct fsf_status_read_buffer *srb =
 		(struct fsf_status_read_buffer *) fsf->data;
 		(struct fsf_status_read_buffer *) fsf->data;
 	u16 length;
 	u16 length;
+	struct scatterlist sg;
 
 
 	length = (u16)(srb->length -
 	length = (u16)(srb->length -
 			offsetof(struct fsf_status_read_buffer, payload));
 			offsetof(struct fsf_status_read_buffer, payload));
-	zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length,
-		     fsf->req_id, ntoh24(srb->d_id));
+	sg_init_one(&sg, srb->payload.data, length);
+	zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
+		     fsf->req_id, ntoh24(srb->d_id), length);
 }
 }
 
 
 /**
 /**
@@ -399,7 +524,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
  * @sc: pointer to struct scsi_cmnd
  * @sc: pointer to struct scsi_cmnd
  * @fsf: pointer to struct zfcp_fsf_req
  * @fsf: pointer to struct zfcp_fsf_req
  */
  */
-void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
+		   struct zfcp_fsf_req *fsf)
 {
 {
 	struct zfcp_adapter *adapter =
 	struct zfcp_adapter *adapter =
 		(struct zfcp_adapter *) sc->device->host->hostdata[0];
 		(struct zfcp_adapter *) sc->device->host->hostdata[0];
@@ -442,7 +568,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
 		}
 		}
 	}
 	}
 
 
-	debug_event(dbf->scsi, 1, rec, sizeof(*rec));
+	debug_event(dbf->scsi, level, rec, sizeof(*rec));
 	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
 	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
 }
 }
 
 

+ 11 - 3
drivers/s390/scsi/zfcp_dbf.h

@@ -2,7 +2,7 @@
  * zfcp device driver
  * zfcp device driver
  * debug feature declarations
  * debug feature declarations
  *
  *
- * Copyright IBM Corp. 2008, 2010
+ * Copyright IBM Corp. 2008, 2015
  */
  */
 
 
 #ifndef ZFCP_DBF_H
 #ifndef ZFCP_DBF_H
@@ -17,6 +17,11 @@
 
 
 #define ZFCP_DBF_INVALID_LUN	0xFFFFFFFFFFFFFFFFull
 #define ZFCP_DBF_INVALID_LUN	0xFFFFFFFFFFFFFFFFull
 
 
+enum zfcp_dbf_pseudo_erp_act_type {
+	ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff,
+	ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe,
+};
+
 /**
 /**
  * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
  * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
  * @ready: number of ready recovery actions
  * @ready: number of ready recovery actions
@@ -110,6 +115,7 @@ struct zfcp_dbf_san {
 	u32 d_id;
 	u32 d_id;
 #define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
 #define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
 	char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
 	char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
+	u16 pl_len;
 } __packed;
 } __packed;
 
 
 /**
 /**
@@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res {
 	u8  prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
 	u8  prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
 	u32 fsf_status;
 	u32 fsf_status;
 	u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
 	u8  fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
+	u32 port_handle;
+	u32 lun_handle;
 } __packed;
 } __packed;
 
 
 /**
 /**
@@ -279,7 +287,7 @@ static inline
 void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
 void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
 {
 {
 	if (debug_level_enabled(req->adapter->dbf->hba, level))
 	if (debug_level_enabled(req->adapter->dbf->hba, level))
-		zfcp_dbf_hba_fsf_res(tag, req);
+		zfcp_dbf_hba_fsf_res(tag, level, req);
 }
 }
 
 
 /**
 /**
@@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
 					scmd->device->host->hostdata[0];
 					scmd->device->host->hostdata[0];
 
 
 	if (debug_level_enabled(adapter->dbf->scsi, level))
 	if (debug_level_enabled(adapter->dbf->scsi, level))
-		zfcp_dbf_scsi(tag, scmd, req);
+		zfcp_dbf_scsi(tag, level, scmd, req);
 }
 }
 
 
 /**
 /**

+ 9 - 3
drivers/s390/scsi/zfcp_erp.c

@@ -3,7 +3,7 @@
  *
  *
  * Error Recovery Procedures (ERP).
  * Error Recovery Procedures (ERP).
  *
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
  */
  */
 
 
 #define KMSG_COMPONENT "zfcp"
 #define KMSG_COMPONENT "zfcp"
@@ -1217,8 +1217,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
 		break;
 		break;
 
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
-		if (result == ZFCP_ERP_SUCCEEDED)
-			zfcp_scsi_schedule_rport_register(port);
+		/* This switch case might also happen after a forced reopen
+		 * was successfully done and thus overwritten with a new
+		 * non-forced reopen at `ersfs_2'. In this case, we must not
+		 * do the clean-up of the non-forced version.
+		 */
+		if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
+			if (result == ZFCP_ERP_SUCCEEDED)
+				zfcp_scsi_schedule_rport_register(port);
 		/* fall through */
 		/* fall through */
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
 		put_device(&port->dev);
 		put_device(&port->dev);

+ 5 - 3
drivers/s390/scsi/zfcp_ext.h

@@ -3,7 +3,7 @@
  *
  *
  * External function declarations.
  * External function declarations.
  *
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
  */
  */
 
 
 #ifndef ZFCP_EXT_H
 #ifndef ZFCP_EXT_H
@@ -35,8 +35,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
 extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
 extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
 			      struct zfcp_port *, struct scsi_device *, u8, u8);
 			      struct zfcp_port *, struct scsi_device *, u8, u8);
 extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
 extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
+extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
 extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
 extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
@@ -44,7 +45,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
 extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
 extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
 extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
-extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
+			  struct zfcp_fsf_req *);
 
 
 /* zfcp_erp.c */
 /* zfcp_erp.c */
 extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
 extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);

+ 17 - 5
drivers/s390/scsi/zfcp_fsf.c

@@ -3,7 +3,7 @@
  *
  *
  * Implementation of FSF commands.
  * Implementation of FSF commands.
  *
  *
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2015
  */
  */
 
 
 #define KMSG_COMPONENT "zfcp"
 #define KMSG_COMPONENT "zfcp"
@@ -508,7 +508,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
 		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
 		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
 		break;
 		break;
 	case FSF_TOPO_FABRIC:
 	case FSF_TOPO_FABRIC:
-		fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+		if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
+			fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+		else
+			fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
 		break;
 		break;
 	case FSF_TOPO_AL:
 	case FSF_TOPO_AL:
 		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
 		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
@@ -613,7 +616,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
 
 
 	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
 	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
 		fc_host_permanent_port_name(shost) = bottom->wwpn;
 		fc_host_permanent_port_name(shost) = bottom->wwpn;
-		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
 	} else
 	} else
 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
 	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
 	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
@@ -982,8 +984,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
 	if (zfcp_adapter_multi_buffer_active(adapter)) {
 	if (zfcp_adapter_multi_buffer_active(adapter)) {
 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
 			return -EIO;
 			return -EIO;
+		qtcb->bottom.support.req_buf_length =
+			zfcp_qdio_real_bytes(sg_req);
 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
 			return -EIO;
 			return -EIO;
+		qtcb->bottom.support.resp_buf_length =
+			zfcp_qdio_real_bytes(sg_resp);
 
 
 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
 					zfcp_qdio_sbale_count(sg_req));
 					zfcp_qdio_sbale_count(sg_req));
@@ -1073,6 +1079,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
 
 
 	req->handler = zfcp_fsf_send_ct_handler;
 	req->handler = zfcp_fsf_send_ct_handler;
 	req->qtcb->header.port_handle = wka_port->handle;
 	req->qtcb->header.port_handle = wka_port->handle;
+	ct->d_id = wka_port->d_id;
 	req->data = ct;
 	req->data = ct;
 
 
 	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
 	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
@@ -1169,6 +1176,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
 
 
 	hton24(req->qtcb->bottom.support.d_id, d_id);
 	hton24(req->qtcb->bottom.support.d_id, d_id);
 	req->handler = zfcp_fsf_send_els_handler;
 	req->handler = zfcp_fsf_send_els_handler;
+	els->d_id = d_id;
 	req->data = els;
 	req->data = els;
 
 
 	zfcp_dbf_san_req("fssels1", req, d_id);
 	zfcp_dbf_san_req("fssels1", req, d_id);
@@ -1575,7 +1583,7 @@ out:
 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
 {
 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
-	struct zfcp_fsf_req *req;
+	struct zfcp_fsf_req *req = NULL;
 	int retval = -EIO;
 	int retval = -EIO;
 
 
 	spin_lock_irq(&qdio->req_q_lock);
 	spin_lock_irq(&qdio->req_q_lock);
@@ -1604,6 +1612,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
 		zfcp_fsf_req_free(req);
 		zfcp_fsf_req_free(req);
 out:
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	spin_unlock_irq(&qdio->req_q_lock);
+	if (req && !IS_ERR(req))
+		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
 	return retval;
 	return retval;
 }
 }
 
 
@@ -1628,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 {
 {
 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
-	struct zfcp_fsf_req *req;
+	struct zfcp_fsf_req *req = NULL;
 	int retval = -EIO;
 	int retval = -EIO;
 
 
 	spin_lock_irq(&qdio->req_q_lock);
 	spin_lock_irq(&qdio->req_q_lock);
@@ -1657,6 +1667,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
 		zfcp_fsf_req_free(req);
 		zfcp_fsf_req_free(req);
 out:
 out:
 	spin_unlock_irq(&qdio->req_q_lock);
 	spin_unlock_irq(&qdio->req_q_lock);
+	if (req && !IS_ERR(req))
+		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
 	return retval;
 	return retval;
 }
 }
 
 

+ 3 - 1
drivers/s390/scsi/zfcp_fsf.h

@@ -3,7 +3,7 @@
  *
  *
  * Interface to the FSF support functions.
  * Interface to the FSF support functions.
  *
  *
- * Copyright IBM Corp. 2002, 2010
+ * Copyright IBM Corp. 2002, 2015
  */
  */
 
 
 #ifndef FSF_H
 #ifndef FSF_H
@@ -436,6 +436,7 @@ struct zfcp_blk_drv_data {
  * @handler_data: data passed to handler function
  * @handler_data: data passed to handler function
  * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
  * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
  * @status: used to pass error status to calling function
  * @status: used to pass error status to calling function
+ * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS
  */
  */
 struct zfcp_fsf_ct_els {
 struct zfcp_fsf_ct_els {
 	struct scatterlist *req;
 	struct scatterlist *req;
@@ -444,6 +445,7 @@ struct zfcp_fsf_ct_els {
 	void *handler_data;
 	void *handler_data;
 	struct zfcp_port *port;
 	struct zfcp_port *port;
 	int status;
 	int status;
+	u32 d_id;
 };
 };
 
 
 #endif				/* FSF_H */
 #endif				/* FSF_H */

+ 7 - 1
drivers/s390/scsi/zfcp_scsi.c

@@ -3,7 +3,7 @@
  *
  *
  * Interface to Linux SCSI midlayer.
  * Interface to Linux SCSI midlayer.
  *
  *
- * Copyright IBM Corp. 2002, 2013
+ * Copyright IBM Corp. 2002, 2015
  */
  */
 
 
 #define KMSG_COMPONENT "zfcp"
 #define KMSG_COMPONENT "zfcp"
@@ -556,6 +556,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
 	ids.port_id = port->d_id;
 	ids.port_id = port->d_id;
 	ids.roles = FC_RPORT_ROLE_FCP_TARGET;
 	ids.roles = FC_RPORT_ROLE_FCP_TARGET;
 
 
+	zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
+			  ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+			  ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
 	rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
 	rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
 	if (!rport) {
 	if (!rport) {
 		dev_err(&port->adapter->ccw_device->dev,
 		dev_err(&port->adapter->ccw_device->dev,
@@ -577,6 +580,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
 	struct fc_rport *rport = port->rport;
 	struct fc_rport *rport = port->rport;
 
 
 	if (rport) {
 	if (rport) {
+		zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
+				  ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+				  ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
 		fc_remote_port_delete(rport);
 		fc_remote_port_delete(rport);
 		port->rport = NULL;
 		port->rport = NULL;
 	}
 	}

+ 1 - 135
drivers/scsi/Kconfig

@@ -396,18 +396,6 @@ config SCSI_3W_SAS
 	  Please read the comments at the top of
 	  Please read the comments at the top of
 	  <file:drivers/scsi/3w-sas.c>.
 	  <file:drivers/scsi/3w-sas.c>.
 
 
-config SCSI_7000FASST
-	tristate "7000FASST SCSI support"
-	depends on ISA && SCSI && ISA_DMA_API
-	select CHECK_SIGNATURE
-	help
-	  This driver supports the Western Digital 7000 SCSI host adapter
-	  family.  Some information is in the source:
-	  <file:drivers/scsi/wd7000.c>.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called wd7000.
-
 config SCSI_ACARD
 config SCSI_ACARD
 	tristate "ACARD SCSI support"
 	tristate "ACARD SCSI support"
 	depends on PCI && SCSI
 	depends on PCI && SCSI
@@ -512,18 +500,6 @@ config SCSI_ADVANSYS
 	  To compile this driver as a module, choose M here: the
 	  To compile this driver as a module, choose M here: the
 	  module will be called advansys.
 	  module will be called advansys.
 
 
-config SCSI_IN2000
-	tristate "Always IN2000 SCSI support"
-	depends on ISA && SCSI
-	help
-	  This is support for an ISA bus SCSI host adapter.  You'll find more
-	  information in <file:Documentation/scsi/in2000.txt>. If it doesn't work
-	  out of the box, you may have to change the jumpers for IRQ or
-	  address selection.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called in2000.
-
 config SCSI_ARCMSR
 config SCSI_ARCMSR
 	tristate "ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Adapter"
 	tristate "ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Adapter"
 	depends on PCI && SCSI
 	depends on PCI && SCSI
@@ -540,6 +516,7 @@ config SCSI_ARCMSR
 source "drivers/scsi/esas2r/Kconfig"
 source "drivers/scsi/esas2r/Kconfig"
 source "drivers/scsi/megaraid/Kconfig.megaraid"
 source "drivers/scsi/megaraid/Kconfig.megaraid"
 source "drivers/scsi/mpt3sas/Kconfig"
 source "drivers/scsi/mpt3sas/Kconfig"
+source "drivers/scsi/smartpqi/Kconfig"
 source "drivers/scsi/ufs/Kconfig"
 source "drivers/scsi/ufs/Kconfig"
 
 
 config SCSI_HPTIOP
 config SCSI_HPTIOP
@@ -660,20 +637,6 @@ config SCSI_DMX3191D
 	  To compile this driver as a module, choose M here: the
 	  To compile this driver as a module, choose M here: the
 	  module will be called dmx3191d.
 	  module will be called dmx3191d.
 
 
-config SCSI_DTC3280
-	tristate "DTC3180/3280 SCSI support"
-	depends on ISA && SCSI
-	select SCSI_SPI_ATTRS
-	select CHECK_SIGNATURE
-	help
-	  This is support for DTC 3180/3280 SCSI Host Adapters.  Please read
-	  the SCSI-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>, and the file
-	  <file:Documentation/scsi/dtc3x80.txt>.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called dtc.
-
 config SCSI_EATA
 config SCSI_EATA
 	tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support"
 	tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support"
 	depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
 	depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
@@ -1248,20 +1211,6 @@ config SCSI_NCR53C8XX_NO_DISCONNECT
 	  not allow targets to disconnect is not reasonable if there is more
 	  not allow targets to disconnect is not reasonable if there is more
 	  than 1 device on a SCSI bus. The normal answer therefore is N.
 	  than 1 device on a SCSI bus. The normal answer therefore is N.
 
 
-config SCSI_PAS16
-	tristate "PAS16 SCSI support"
-	depends on ISA && SCSI
-	select SCSI_SPI_ATTRS
-	---help---
-	  This is support for a SCSI host adapter.  It is explained in section
-	  3.10 of the SCSI-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.  If it doesn't work out
-	  of the box, you may have to change some settings in
-	  <file:drivers/scsi/pas16.h>.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called pas16.
-
 config SCSI_QLOGIC_FAS
 config SCSI_QLOGIC_FAS
 	tristate "Qlogic FAS SCSI support"
 	tristate "Qlogic FAS SCSI support"
 	depends on ISA && SCSI
 	depends on ISA && SCSI
@@ -1382,89 +1331,6 @@ config SCSI_AM53C974
 	  To compile this driver as a module, choose M here: the
 	  To compile this driver as a module, choose M here: the
 	  module will be called am53c974.
 	  module will be called am53c974.
 
 
-config SCSI_T128
-	tristate "Trantor T128/T128F/T228 SCSI support"
-	depends on ISA && SCSI
-	select SCSI_SPI_ATTRS
-	select CHECK_SIGNATURE
-	---help---
-	  This is support for a SCSI host adapter. It is explained in section
-	  3.11 of the SCSI-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.  If it doesn't work out
-	  of the box, you may have to change some settings in
-	  <file:drivers/scsi/t128.h>.  Note that Trantor was purchased by
-	  Adaptec, and some former Trantor products are being sold under the
-	  Adaptec name.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called t128.
-
-config SCSI_U14_34F
-	tristate "UltraStor 14F/34F support"
-	depends on ISA && SCSI && ISA_DMA_API
-	---help---
-	  This is support for the UltraStor 14F and 34F SCSI-2 host adapters.
-	  The source at <file:drivers/scsi/u14-34f.c> contains some
-	  information about this hardware.  If the driver doesn't work out of
-	  the box, you may have to change some settings in
-	  <file: drivers/scsi/u14-34f.c>.  Read the SCSI-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.  Note that there is also
-	  another driver for the same hardware: "UltraStor SCSI support",
-	  below.  You should say Y to both only if you want 24F support as
-	  well.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called u14-34f.
-
-config SCSI_U14_34F_TAGGED_QUEUE
-	bool "enable tagged command queueing"
-	depends on SCSI_U14_34F
-	help
-	  This is a feature of SCSI-2 which improves performance: the host
-	  adapter can send several SCSI commands to a device's queue even if
-	  previous commands haven't finished yet.
-	  This is equivalent to the "u14-34f=tc:y" boot option.
-
-config SCSI_U14_34F_LINKED_COMMANDS
-	bool "enable elevator sorting"
-	depends on SCSI_U14_34F
-	help
-	  This option enables elevator sorting for all probed SCSI disks and
-	  CD-ROMs. It definitely reduces the average seek distance when doing
-	  random seeks, but this does not necessarily result in a noticeable
-	  performance improvement: your mileage may vary...
-	  This is equivalent to the "u14-34f=lc:y" boot option.
-
-config SCSI_U14_34F_MAX_TAGS
-	int "maximum number of queued commands"
-	depends on SCSI_U14_34F
-	default "8"
-	help
-	  This specifies how many SCSI commands can be maximally queued for
-	  each probed SCSI device. You should reduce the default value of 8
-	  only if you have disks with buggy or limited tagged command support.
-	  Minimum is 2 and maximum is 14. This value is also the window size
-	  used by the elevator sorting option above. The effective value used
-	  by the driver for each probed SCSI device is reported at boot time.
-	  This is equivalent to the "u14-34f=mq:8" boot option.
-
-config SCSI_ULTRASTOR
-	tristate "UltraStor SCSI support"
-	depends on X86 && ISA && SCSI && ISA_DMA_API
-	---help---
-	  This is support for the UltraStor 14F, 24F and 34F SCSI-2 host
-	  adapter family.  This driver is explained in section 3.12 of the
-	  SCSI-HOWTO, available from
-	  <http://www.tldp.org/docs.html#howto>.  If it doesn't work out
-	  of the box, you may have to change some settings in
-	  <file:drivers/scsi/ultrastor.h>.
-
-	  Note that there is also another driver for the same hardware:
-	  "UltraStor 14F/34F support", above.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called ultrastor.
-
 config SCSI_NSP32
 config SCSI_NSP32
 	tristate "Workbit NinjaSCSI-32Bi/UDE support"
 	tristate "Workbit NinjaSCSI-32Bi/UDE support"
 	depends on PCI && SCSI && !64BIT
 	depends on PCI && SCSI && !64BIT

+ 1 - 7
drivers/scsi/Makefile

@@ -61,9 +61,7 @@ obj-$(CONFIG_SCSI_SIM710)	+= 53c700.o	sim710.o
 obj-$(CONFIG_SCSI_ADVANSYS)	+= advansys.o
 obj-$(CONFIG_SCSI_ADVANSYS)	+= advansys.o
 obj-$(CONFIG_SCSI_BUSLOGIC)	+= BusLogic.o
 obj-$(CONFIG_SCSI_BUSLOGIC)	+= BusLogic.o
 obj-$(CONFIG_SCSI_DPT_I2O)	+= dpt_i2o.o
 obj-$(CONFIG_SCSI_DPT_I2O)	+= dpt_i2o.o
-obj-$(CONFIG_SCSI_U14_34F)	+= u14-34f.o
 obj-$(CONFIG_SCSI_ARCMSR)	+= arcmsr/
 obj-$(CONFIG_SCSI_ARCMSR)	+= arcmsr/
-obj-$(CONFIG_SCSI_ULTRASTOR)	+= ultrastor.o
 obj-$(CONFIG_SCSI_AHA152X)	+= aha152x.o
 obj-$(CONFIG_SCSI_AHA152X)	+= aha152x.o
 obj-$(CONFIG_SCSI_AHA1542)	+= aha1542.o
 obj-$(CONFIG_SCSI_AHA1542)	+= aha1542.o
 obj-$(CONFIG_SCSI_AHA1740)	+= aha1740.o
 obj-$(CONFIG_SCSI_AHA1740)	+= aha1740.o
@@ -75,7 +73,6 @@ obj-$(CONFIG_SCSI_PM8001)	+= pm8001/
 obj-$(CONFIG_SCSI_ISCI)		+= isci/
 obj-$(CONFIG_SCSI_ISCI)		+= isci/
 obj-$(CONFIG_SCSI_IPS)		+= ips.o
 obj-$(CONFIG_SCSI_IPS)		+= ips.o
 obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
 obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
-obj-$(CONFIG_SCSI_IN2000)	+= in2000.o
 obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
 obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
 obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o
 obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o
 obj-$(CONFIG_SCSI_NCR53C406A)	+= NCR53c406a.o
 obj-$(CONFIG_SCSI_NCR53C406A)	+= NCR53c406a.o
@@ -90,15 +87,12 @@ obj-$(CONFIG_SCSI_QLA_ISCSI)	+= libiscsi.o qla4xxx/
 obj-$(CONFIG_SCSI_LPFC)		+= lpfc/
 obj-$(CONFIG_SCSI_LPFC)		+= lpfc/
 obj-$(CONFIG_SCSI_BFA_FC)	+= bfa/
 obj-$(CONFIG_SCSI_BFA_FC)	+= bfa/
 obj-$(CONFIG_SCSI_CHELSIO_FCOE)	+= csiostor/
 obj-$(CONFIG_SCSI_CHELSIO_FCOE)	+= csiostor/
-obj-$(CONFIG_SCSI_PAS16)	+= pas16.o
-obj-$(CONFIG_SCSI_T128)		+= t128.o
 obj-$(CONFIG_SCSI_DMX3191D)	+= dmx3191d.o
 obj-$(CONFIG_SCSI_DMX3191D)	+= dmx3191d.o
 obj-$(CONFIG_SCSI_HPSA)		+= hpsa.o
 obj-$(CONFIG_SCSI_HPSA)		+= hpsa.o
-obj-$(CONFIG_SCSI_DTC3280)	+= dtc.o
+obj-$(CONFIG_SCSI_SMARTPQI)	+= smartpqi/
 obj-$(CONFIG_SCSI_SYM53C8XX_2)	+= sym53c8xx_2/
 obj-$(CONFIG_SCSI_SYM53C8XX_2)	+= sym53c8xx_2/
 obj-$(CONFIG_SCSI_ZALON)	+= zalon7xx.o
 obj-$(CONFIG_SCSI_ZALON)	+= zalon7xx.o
 obj-$(CONFIG_SCSI_EATA_PIO)	+= eata_pio.o
 obj-$(CONFIG_SCSI_EATA_PIO)	+= eata_pio.o
-obj-$(CONFIG_SCSI_7000FASST)	+= wd7000.o
 obj-$(CONFIG_SCSI_EATA)		+= eata.o
 obj-$(CONFIG_SCSI_EATA)		+= eata.o
 obj-$(CONFIG_SCSI_DC395x)	+= dc395x.o
 obj-$(CONFIG_SCSI_DC395x)	+= dc395x.o
 obj-$(CONFIG_SCSI_AM53C974)	+= esp_scsi.o	am53c974.o
 obj-$(CONFIG_SCSI_AM53C974)	+= esp_scsi.o	am53c974.o

+ 4 - 11
drivers/scsi/NCR5380.c

@@ -230,13 +230,6 @@ static int NCR5380_poll_politely2(struct Scsi_Host *instance,
 	return -ETIMEDOUT;
 	return -ETIMEDOUT;
 }
 }
 
 
-static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
-                                        int reg, int bit, int val, int wait)
-{
-	return NCR5380_poll_politely2(instance, reg, bit, val,
-	                                        reg, bit, val, wait);
-}
-
 #if NDEBUG
 #if NDEBUG
 static struct {
 static struct {
 	unsigned char mask;
 	unsigned char mask;
@@ -1854,11 +1847,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
 						/* XXX - need to source or sink data here, as appropriate */
 						/* XXX - need to source or sink data here, as appropriate */
 					}
 					}
 				} else {
 				} else {
-					/* Break up transfer into 3 ms chunks,
-					 * presuming 6 accesses per handshake.
+					/* Transfer a small chunk so that the
+					 * irq mode lock is not held too long.
 					 */
 					 */
-					transfersize = min((unsigned long)cmd->SCp.this_residual,
-					                   hostdata->accesses_per_ms / 2);
+					transfersize = min(cmd->SCp.this_residual,
+							   NCR5380_PIO_CHUNK_SIZE);
 					len = transfersize;
 					len = transfersize;
 					NCR5380_transfer_pio(instance, &phase, &len,
 					NCR5380_transfer_pio(instance, &phase, &len,
 					                     (unsigned char **)&cmd->SCp.ptr);
 					                     (unsigned char **)&cmd->SCp.ptr);

+ 9 - 1
drivers/scsi/NCR5380.h

@@ -250,6 +250,8 @@ struct NCR5380_cmd {
 
 
 #define NCR5380_CMD_SIZE		(sizeof(struct NCR5380_cmd))
 #define NCR5380_CMD_SIZE		(sizeof(struct NCR5380_cmd))
 
 
+#define NCR5380_PIO_CHUNK_SIZE		256
+
 static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
 static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
 {
 {
 	return ((struct scsi_cmnd *)ncmd_ptr) - 1;
 	return ((struct scsi_cmnd *)ncmd_ptr) - 1;
@@ -292,8 +294,14 @@ static void NCR5380_reselect(struct Scsi_Host *instance);
 static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
 static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
 static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
 static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
 static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
 static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
-static int NCR5380_poll_politely(struct Scsi_Host *, int, int, int, int);
 static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int);
 static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int);
 
 
+static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
+					int reg, int bit, int val, int wait)
+{
+	return NCR5380_poll_politely2(instance, reg, bit, val,
+						reg, bit, val, wait);
+}
+
 #endif				/* __KERNEL__ */
 #endif				/* __KERNEL__ */
 #endif				/* NCR5380_H */
 #endif				/* NCR5380_H */

+ 1 - 1
drivers/scsi/aacraid/src.c

@@ -613,7 +613,7 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
  *	@dev: Adapter
  *	@dev: Adapter
  *	@comm: communications method
  *	@comm: communications method
  */
  */
-int aac_src_select_comm(struct aac_dev *dev, int comm)
+static int aac_src_select_comm(struct aac_dev *dev, int comm)
 {
 {
 	switch (comm) {
 	switch (comm) {
 	case AAC_COMM_MESSAGE:
 	case AAC_COMM_MESSAGE:

+ 1 - 1
drivers/scsi/aic94xx/aic94xx_hwi.c

@@ -632,7 +632,7 @@ int asd_init_hw(struct asd_ha_struct *asd_ha)
 			   pci_name(asd_ha->pcidev));
 			   pci_name(asd_ha->pcidev));
 		return err;
 		return err;
 	}
 	}
-	pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
+	err = pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
 					v | SC_TMR_DIS);
 					v | SC_TMR_DIS);
 	if (err) {
 	if (err) {
 		asd_printk("couldn't disable split completion timer of %s\n",
 		asd_printk("couldn't disable split completion timer of %s\n",

+ 10 - 2
drivers/scsi/arcmsr/arcmsr_hba.c

@@ -2388,15 +2388,23 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
 	}
 	}
 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
 		unsigned char *ver_addr;
 		unsigned char *ver_addr;
-		int32_t user_len, cnt2end;
+		uint32_t user_len;
+		int32_t cnt2end;
 		uint8_t *pQbuffer, *ptmpuserbuffer;
 		uint8_t *pQbuffer, *ptmpuserbuffer;
+
+		user_len = pcmdmessagefld->cmdmessage.Length;
+		if (user_len > ARCMSR_API_DATA_BUFLEN) {
+			retvalue = ARCMSR_MESSAGE_FAIL;
+			goto message_out;
+		}
+
 		ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
 		ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
 		if (!ver_addr) {
 		if (!ver_addr) {
 			retvalue = ARCMSR_MESSAGE_FAIL;
 			retvalue = ARCMSR_MESSAGE_FAIL;
 			goto message_out;
 			goto message_out;
 		}
 		}
 		ptmpuserbuffer = ver_addr;
 		ptmpuserbuffer = ver_addr;
-		user_len = pcmdmessagefld->cmdmessage.Length;
+
 		memcpy(ptmpuserbuffer,
 		memcpy(ptmpuserbuffer,
 			pcmdmessagefld->messagedatabuffer, user_len);
 			pcmdmessagefld->messagedatabuffer, user_len);
 		spin_lock_irqsave(&acb->wqbuffer_lock, flags);
 		spin_lock_irqsave(&acb->wqbuffer_lock, flags);

+ 9 - 6
drivers/scsi/be2iscsi/be.h

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -8,7 +8,7 @@
  * Public License is included in this distribution in the file called COPYING.
  * Public License is included in this distribution in the file called COPYING.
  *
  *
  * Contact Information:
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  *
  * Emulex
  * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
@@ -89,7 +89,7 @@ struct be_aic_obj {		/* Adaptive interrupt coalescing (AIC) info */
 	u32 max_eqd;		/* in usecs */
 	u32 max_eqd;		/* in usecs */
 	u32 prev_eqd;		/* in usecs */
 	u32 prev_eqd;		/* in usecs */
 	u32 et_eqd;		/* configured val when aic is off */
 	u32 et_eqd;		/* configured val when aic is off */
-	ulong jiffs;
+	ulong jiffies;
 	u64 eq_prev;		/* Used to calculate eqe */
 	u64 eq_prev;		/* Used to calculate eqe */
 };
 };
 
 
@@ -100,7 +100,7 @@ struct be_eq_obj {
 	struct be_queue_info q;
 	struct be_queue_info q;
 	struct beiscsi_hba *phba;
 	struct beiscsi_hba *phba;
 	struct be_queue_info *cq;
 	struct be_queue_info *cq;
-	struct work_struct work_cqs; /* Work Item */
+	struct work_struct mcc_work; /* Work Item */
 	struct irq_poll	iopoll;
 	struct irq_poll	iopoll;
 };
 };
 
 
@@ -111,8 +111,11 @@ struct be_mcc_obj {
 
 
 struct beiscsi_mcc_tag_state {
 struct beiscsi_mcc_tag_state {
 	unsigned long tag_state;
 	unsigned long tag_state;
-#define MCC_TAG_STATE_RUNNING	1
-#define MCC_TAG_STATE_TIMEOUT	2
+#define MCC_TAG_STATE_RUNNING	0
+#define MCC_TAG_STATE_TIMEOUT	1
+#define MCC_TAG_STATE_ASYNC	2
+#define MCC_TAG_STATE_IGNORE	3
+	void (*cbfn)(struct beiscsi_hba *, unsigned int);
 	struct be_dma_mem tag_mem_state;
 	struct be_dma_mem tag_mem_state;
 };
 };
 
 

+ 756 - 340
drivers/scsi/be2iscsi/be_cmds.c

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -8,7 +8,7 @@
  * Public License is included in this distribution in the file called COPYING.
  * Public License is included in this distribution in the file called COPYING.
  *
  *
  * Contact Information:
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  *
  * Emulex
  * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
@@ -21,110 +21,77 @@
 #include "be.h"
 #include "be.h"
 #include "be_mgmt.h"
 #include "be_mgmt.h"
 
 
-int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
-{
-	u32 sreset;
-	u8 *pci_reset_offset = 0;
-	u8 *pci_online0_offset = 0;
-	u8 *pci_online1_offset = 0;
-	u32 pconline0 = 0;
-	u32 pconline1 = 0;
-	u32 i;
-
-	pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET;
-	pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0;
-	pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1;
-	sreset = readl((void *)pci_reset_offset);
-	sreset |= BE2_SET_RESET;
-	writel(sreset, (void *)pci_reset_offset);
-
-	i = 0;
-	while (sreset & BE2_SET_RESET) {
-		if (i > 64)
-			break;
-		msleep(100);
-		sreset = readl((void *)pci_reset_offset);
-		i++;
-	}
-
-	if (sreset & BE2_SET_RESET) {
-		printk(KERN_ERR DRV_NAME
-		       " Soft Reset  did not deassert\n");
-		return -EIO;
-	}
-	pconline1 = BE2_MPU_IRAM_ONLINE;
-	writel(pconline0, (void *)pci_online0_offset);
-	writel(pconline1, (void *)pci_online1_offset);
-
-	sreset |= BE2_SET_RESET;
-	writel(sreset, (void *)pci_reset_offset);
-
-	i = 0;
-	while (sreset & BE2_SET_RESET) {
-		if (i > 64)
-			break;
-		msleep(1);
-		sreset = readl((void *)pci_reset_offset);
-		i++;
-	}
-	if (sreset & BE2_SET_RESET) {
-		printk(KERN_ERR DRV_NAME
-		       " MPU Online Soft Reset did not deassert\n");
-		return -EIO;
-	}
-	return 0;
-}
-
-int be_chk_reset_complete(struct beiscsi_hba *phba)
-{
-	unsigned int num_loop;
-	u8 *mpu_sem = 0;
-	u32 status;
-
-	num_loop = 1000;
-	mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
-	msleep(5000);
-
-	while (num_loop) {
-		status = readl((void *)mpu_sem);
-
-		if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
-			break;
-		msleep(60);
-		num_loop--;
-	}
-
-	if ((status & 0x80000000) || (!num_loop)) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BC_%d : Failed in be_chk_reset_complete"
-			    "status = 0x%x\n", status);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
-{
-	unsigned int tag = 0;
+/* UE Status Low CSR */
+static const char * const desc_ue_status_low[] = {
+	"CEV",
+	"CTX",
+	"DBUF",
+	"ERX",
+	"Host",
+	"MPU",
+	"NDMA",
+	"PTC ",
+	"RDMA ",
+	"RXF ",
+	"RXIPS ",
+	"RXULP0 ",
+	"RXULP1 ",
+	"RXULP2 ",
+	"TIM ",
+	"TPOST ",
+	"TPRE ",
+	"TXIPS ",
+	"TXULP0 ",
+	"TXULP1 ",
+	"UC ",
+	"WDMA ",
+	"TXULP2 ",
+	"HOST1 ",
+	"P0_OB_LINK ",
+	"P1_OB_LINK ",
+	"HOST_GPIO ",
+	"MBOX ",
+	"AXGMAC0",
+	"AXGMAC1",
+	"JTAG",
+	"MPU_INTPEND"
+};
 
 
-	spin_lock(&phba->ctrl.mcc_lock);
-	if (phba->ctrl.mcc_tag_available) {
-		tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
-		phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
-		phba->ctrl.mcc_tag_status[tag] = 0;
-		phba->ctrl.ptag_state[tag].tag_state = 0;
-	}
-	if (tag) {
-		phba->ctrl.mcc_tag_available--;
-		if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
-			phba->ctrl.mcc_alloc_index = 0;
-		else
-			phba->ctrl.mcc_alloc_index++;
-	}
-	spin_unlock(&phba->ctrl.mcc_lock);
-	return tag;
-}
+/* UE Status High CSR */
+static const char * const desc_ue_status_hi[] = {
+	"LPCMEMHOST",
+	"MGMT_MAC",
+	"PCS0ONLINE",
+	"MPU_IRAM",
+	"PCS1ONLINE",
+	"PCTL0",
+	"PCTL1",
+	"PMEM",
+	"RR",
+	"TXPB",
+	"RXPP",
+	"XAUI",
+	"TXP",
+	"ARM",
+	"IPC",
+	"HOST2",
+	"HOST3",
+	"HOST4",
+	"HOST5",
+	"HOST6",
+	"HOST7",
+	"HOST8",
+	"HOST9",
+	"NETC",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown",
+	"Unknown"
+};
 
 
 struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
 struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
 				 unsigned int *ref_tag)
 				 unsigned int *ref_tag)
@@ -133,7 +100,7 @@ struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
 	struct be_mcc_wrb *wrb = NULL;
 	struct be_mcc_wrb *wrb = NULL;
 	unsigned int tag;
 	unsigned int tag;
 
 
-	spin_lock_bh(&phba->ctrl.mcc_lock);
+	spin_lock(&phba->ctrl.mcc_lock);
 	if (mccq->used == mccq->len) {
 	if (mccq->used == mccq->len) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -160,6 +127,7 @@ struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
 	phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
 	phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
 	phba->ctrl.mcc_tag_status[tag] = 0;
 	phba->ctrl.mcc_tag_status[tag] = 0;
 	phba->ctrl.ptag_state[tag].tag_state = 0;
 	phba->ctrl.ptag_state[tag].tag_state = 0;
+	phba->ctrl.ptag_state[tag].cbfn = NULL;
 	phba->ctrl.mcc_tag_available--;
 	phba->ctrl.mcc_tag_available--;
 	if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
 	if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
 		phba->ctrl.mcc_alloc_index = 0;
 		phba->ctrl.mcc_alloc_index = 0;
@@ -174,7 +142,7 @@ struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
 	mccq->used++;
 	mccq->used++;
 
 
 alloc_failed:
 alloc_failed:
-	spin_unlock_bh(&phba->ctrl.mcc_lock);
+	spin_unlock(&phba->ctrl.mcc_lock);
 	return wrb;
 	return wrb;
 }
 }
 
 
@@ -182,7 +150,7 @@ void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
 {
 {
 	struct be_queue_info *mccq = &ctrl->mcc_obj.q;
 	struct be_queue_info *mccq = &ctrl->mcc_obj.q;
 
 
-	spin_lock_bh(&ctrl->mcc_lock);
+	spin_lock(&ctrl->mcc_lock);
 	tag = tag & MCC_Q_CMD_TAG_MASK;
 	tag = tag & MCC_Q_CMD_TAG_MASK;
 	ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
 	ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
 	if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
 	if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
@@ -191,16 +159,71 @@ void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
 		ctrl->mcc_free_index++;
 		ctrl->mcc_free_index++;
 	ctrl->mcc_tag_available++;
 	ctrl->mcc_tag_available++;
 	mccq->used--;
 	mccq->used--;
-	spin_unlock_bh(&ctrl->mcc_lock);
+	spin_unlock(&ctrl->mcc_lock);
 }
 }
 
 
-/**
- * beiscsi_fail_session(): Closing session with appropriate error
- * @cls_session: ptr to session
- **/
-void beiscsi_fail_session(struct iscsi_cls_session *cls_session)
+/*
+ * beiscsi_mcc_compl_status - Return the status of MCC completion
+ * @phba: Driver private structure
+ * @tag: Tag for the MBX Command
+ * @wrb: the WRB used for the MBX Command
+ * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ */
+int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
+			       unsigned int tag,
+			       struct be_mcc_wrb **wrb,
+			       struct be_dma_mem *mbx_cmd_mem)
 {
 {
-	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+	uint16_t status = 0, addl_status = 0, wrb_num = 0;
+	struct be_cmd_resp_hdr *mbx_resp_hdr;
+	struct be_cmd_req_hdr *mbx_hdr;
+	struct be_mcc_wrb *temp_wrb;
+	uint32_t mcc_tag_status;
+	int rc = 0;
+
+	mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
+	status = (mcc_tag_status & CQE_STATUS_MASK);
+	addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
+			CQE_STATUS_ADDL_SHIFT);
+
+	if (mbx_cmd_mem) {
+		mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
+	} else {
+		wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
+			  CQE_STATUS_WRB_SHIFT;
+		temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
+		mbx_hdr = embedded_payload(temp_wrb);
+
+		if (wrb)
+			*wrb = temp_wrb;
+	}
+
+	if (status || addl_status) {
+		beiscsi_log(phba, KERN_WARNING,
+			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+			    BEISCSI_LOG_CONFIG,
+			    "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
+			    mbx_hdr->subsystem, mbx_hdr->opcode,
+			    status, addl_status);
+		rc = -EIO;
+		if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
+			mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr;
+			beiscsi_log(phba, KERN_WARNING,
+				    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+				    BEISCSI_LOG_CONFIG,
+				    "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
+				    mbx_resp_hdr->response_length,
+				    mbx_resp_hdr->actual_resp_len);
+			rc = -EAGAIN;
+		}
+	}
+
+	return rc;
 }
 }
 
 
 /*
 /*
@@ -217,26 +240,34 @@ void beiscsi_fail_session(struct iscsi_cls_session *cls_session)
  * Failure: Non-Zero
  * Failure: Non-Zero
  **/
  **/
 int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
 int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
-			    uint32_t tag, struct be_mcc_wrb **wrb,
+			    unsigned int tag,
+			    struct be_mcc_wrb **wrb,
 			    struct be_dma_mem *mbx_cmd_mem)
 			    struct be_dma_mem *mbx_cmd_mem)
 {
 {
 	int rc = 0;
 	int rc = 0;
-	uint32_t mcc_tag_status;
-	uint16_t status = 0, addl_status = 0, wrb_num = 0;
-	struct be_mcc_wrb *temp_wrb;
-	struct be_cmd_req_hdr *mbx_hdr;
-	struct be_cmd_resp_hdr *mbx_resp_hdr;
-	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
 
 
-	if (beiscsi_error(phba))
-		return -EPERM;
+	if (beiscsi_hba_in_error(phba)) {
+		clear_bit(MCC_TAG_STATE_RUNNING,
+			  &phba->ctrl.ptag_state[tag].tag_state);
+		return -EIO;
+	}
 
 
 	/* wait for the mccq completion */
 	/* wait for the mccq completion */
-	rc = wait_event_interruptible_timeout(
-				phba->ctrl.mcc_wait[tag],
-				phba->ctrl.mcc_tag_status[tag],
-				msecs_to_jiffies(
-				BEISCSI_HOST_MBX_TIMEOUT));
+	rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag],
+					      phba->ctrl.mcc_tag_status[tag],
+					      msecs_to_jiffies(
+						BEISCSI_HOST_MBX_TIMEOUT));
+	/**
+	 * Return EIO if port is being disabled. Associated DMA memory, if any,
+	 * is freed by the caller. When port goes offline, MCCQ is cleaned up
+	 * so does WRB.
+	 */
+	if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
+		clear_bit(MCC_TAG_STATE_RUNNING,
+			  &phba->ctrl.ptag_state[tag].tag_state);
+		return -EIO;
+	}
+
 	/**
 	/**
 	 * If MBOX cmd timeout expired, tag and resource allocated
 	 * If MBOX cmd timeout expired, tag and resource allocated
 	 * for cmd is not freed until FW returns completion.
 	 * for cmd is not freed until FW returns completion.
@@ -270,47 +301,7 @@ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 
 
-	rc = 0;
-	mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
-	status = (mcc_tag_status & CQE_STATUS_MASK);
-	addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
-			CQE_STATUS_ADDL_SHIFT);
-
-	if (mbx_cmd_mem) {
-		mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
-	} else {
-		wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
-			   CQE_STATUS_WRB_SHIFT;
-		temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
-		mbx_hdr = embedded_payload(temp_wrb);
-
-		if (wrb)
-			*wrb = temp_wrb;
-	}
-
-	if (status || addl_status) {
-		beiscsi_log(phba, KERN_WARNING,
-			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
-			    BEISCSI_LOG_CONFIG,
-			    "BC_%d : MBX Cmd Failed for "
-			    "Subsys : %d Opcode : %d with "
-			    "Status : %d and Extd_Status : %d\n",
-			    mbx_hdr->subsystem,
-			    mbx_hdr->opcode,
-			    status, addl_status);
-		rc = -EIO;
-		if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
-			mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
-			beiscsi_log(phba, KERN_WARNING,
-				    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
-				    BEISCSI_LOG_CONFIG,
-				    "BC_%d : Insufficient Buffer Error "
-				    "Resp_Len : %d Actual_Resp_Len : %d\n",
-				    mbx_resp_hdr->response_length,
-				    mbx_resp_hdr->actual_resp_len);
-			rc = -EAGAIN;
-		}
-	}
+	rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem);
 
 
 	free_mcc_wrb(&phba->ctrl, tag);
 	free_mcc_wrb(&phba->ctrl, tag);
 	return rc;
 	return rc;
@@ -330,11 +321,10 @@ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
 static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
 static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
 				      struct be_mcc_compl *compl)
 				      struct be_mcc_compl *compl)
 {
 {
-	u16 compl_status, extd_status;
 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
 	struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
 	struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
-	struct be_cmd_resp_hdr *resp_hdr;
+	u16 compl_status, extd_status;
 
 
 	/**
 	/**
 	 * To check if valid bit is set, check the entire word as we don't know
 	 * To check if valid bit is set, check the entire word as we don't know
@@ -368,14 +358,7 @@ static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 		    "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
 		    "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
 		    hdr->subsystem, hdr->opcode, compl_status, extd_status);
 		    hdr->subsystem, hdr->opcode, compl_status, extd_status);
-
-	if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
-		/* if status is insufficient buffer, check the length */
-		resp_hdr = (struct be_cmd_resp_hdr *) hdr;
-		if (resp_hdr->response_length)
-			return 0;
-	}
-	return -EINVAL;
+	return compl_status;
 }
 }
 
 
 static void beiscsi_process_async_link(struct beiscsi_hba *phba,
 static void beiscsi_process_async_link(struct beiscsi_hba *phba,
@@ -391,18 +374,19 @@ static void beiscsi_process_async_link(struct beiscsi_hba *phba,
 	 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
 	 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
 	 **/
 	 **/
 	if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
 	if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
-		phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
-		phba->get_boot = BE_GET_BOOT_RETRIES;
+		set_bit(BEISCSI_HBA_LINK_UP, &phba->state);
+		if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
+			beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
 		__beiscsi_log(phba, KERN_ERR,
 		__beiscsi_log(phba, KERN_ERR,
 			      "BC_%d : Link Up on Port %d tag 0x%x\n",
 			      "BC_%d : Link Up on Port %d tag 0x%x\n",
 			      evt->physical_port, evt->event_tag);
 			      evt->physical_port, evt->event_tag);
 	} else {
 	} else {
-		phba->state = BE_ADAPTER_LINK_DOWN;
+		clear_bit(BEISCSI_HBA_LINK_UP, &phba->state);
 		__beiscsi_log(phba, KERN_ERR,
 		__beiscsi_log(phba, KERN_ERR,
 			      "BC_%d : Link Down on Port %d tag 0x%x\n",
 			      "BC_%d : Link Down on Port %d tag 0x%x\n",
 			      evt->physical_port, evt->event_tag);
 			      evt->physical_port, evt->event_tag);
 		iscsi_host_for_each_session(phba->shost,
 		iscsi_host_for_each_session(phba->shost,
-					    beiscsi_fail_session);
+					    beiscsi_session_fail);
 	}
 	}
 }
 }
 
 
@@ -482,8 +466,8 @@ void beiscsi_process_async_event(struct beiscsi_hba *phba,
 		beiscsi_process_async_link(phba, compl);
 		beiscsi_process_async_link(phba, compl);
 		break;
 		break;
 	case ASYNC_EVENT_CODE_ISCSI:
 	case ASYNC_EVENT_CODE_ISCSI:
-		phba->state |= BE_ADAPTER_CHECK_BOOT;
-		phba->get_boot = BE_GET_BOOT_RETRIES;
+		if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
+			beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
 		sev = KERN_ERR;
 		sev = KERN_ERR;
 		break;
 		break;
 	case ASYNC_EVENT_CODE_SLI:
 	case ASYNC_EVENT_CODE_SLI:
@@ -519,6 +503,9 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
 		return 0;
 		return 0;
 	}
 	}
 
 
+	/* end MCC with this tag */
+	clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
+
 	if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
 	if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
 		beiscsi_log(phba, KERN_WARNING,
 		beiscsi_log(phba, KERN_WARNING,
 			    BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
 			    BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
@@ -529,9 +516,11 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
 		 * Only for non-embedded cmd, PCI resource is allocated.
 		 * Only for non-embedded cmd, PCI resource is allocated.
 		 **/
 		 **/
 		tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
 		tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
-		if (tag_mem->size)
+		if (tag_mem->size) {
 			pci_free_consistent(ctrl->pdev, tag_mem->size,
 			pci_free_consistent(ctrl->pdev, tag_mem->size,
 					tag_mem->va, tag_mem->dma);
 					tag_mem->va, tag_mem->dma);
+			tag_mem->size = 0;
+		}
 		free_mcc_wrb(ctrl, tag);
 		free_mcc_wrb(ctrl, tag);
 		return 0;
 		return 0;
 	}
 	}
@@ -550,57 +539,25 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
 				     CQE_STATUS_ADDL_MASK;
 				     CQE_STATUS_ADDL_MASK;
 	ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
 	ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
 
 
-	/* write ordering forced in wake_up_interruptible */
-	clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
-	wake_up_interruptible(&ctrl->mcc_wait[tag]);
-	return 0;
-}
-
-/*
- * be_mcc_compl_poll()- Wait for MBX completion
- * @phba: driver private structure
- *
- * Wait till no more pending mcc requests are present
- *
- * return
- * Success: 0
- * Failure: Non-Zero
- *
- **/
-int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag)
-{
-	struct be_ctrl_info *ctrl = &phba->ctrl;
-	int i;
-
-	if (!test_bit(MCC_TAG_STATE_RUNNING,
-		      &ctrl->ptag_state[tag].tag_state)) {
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-			    "BC_%d: tag %u state not running\n", tag);
+	if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) {
+		if (ctrl->ptag_state[tag].cbfn)
+			ctrl->ptag_state[tag].cbfn(phba, tag);
+		else
+			__beiscsi_log(phba, KERN_ERR,
+				      "BC_%d : MBX ASYNC command with no callback\n");
+		free_mcc_wrb(ctrl, tag);
 		return 0;
 		return 0;
 	}
 	}
-	for (i = 0; i < mcc_timeout; i++) {
-		if (beiscsi_error(phba))
-			return -EIO;
 
 
-		beiscsi_process_mcc_cq(phba);
-		/* after polling, wrb and tag need to be released */
-		if (!test_bit(MCC_TAG_STATE_RUNNING,
-			      &ctrl->ptag_state[tag].tag_state)) {
-			free_mcc_wrb(ctrl, tag);
-			break;
-		}
-		udelay(100);
-	}
-
-	if (i < mcc_timeout)
+	if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) {
+		/* just check completion status and free wrb */
+		__beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
+		free_mcc_wrb(ctrl, tag);
 		return 0;
 		return 0;
+	}
 
 
-	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-		    "BC_%d : FW Timed Out\n");
-	phba->fw_timeout = true;
-	beiscsi_ue_detect(phba);
-	return -EBUSY;
+	wake_up_interruptible(&ctrl->mcc_wait[tag]);
+	return 0;
 }
 }
 
 
 void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
 void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
@@ -642,7 +599,7 @@ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
 	 */
 	 */
 	timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
 	timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
 	do {
 	do {
-		if (beiscsi_error(phba))
+		if (beiscsi_hba_in_error(phba))
 			return -EIO;
 			return -EIO;
 
 
 		ready = ioread32(db);
 		ready = ioread32(db);
@@ -655,16 +612,14 @@ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
 
 
 		if (time_after(jiffies, timeout))
 		if (time_after(jiffies, timeout))
 			break;
 			break;
-		msleep(20);
+		/* 1ms sleep is enough in most cases */
+		schedule_timeout_uninterruptible(msecs_to_jiffies(1));
 	} while (!ready);
 	} while (!ready);
 
 
 	beiscsi_log(phba, KERN_ERR,
 	beiscsi_log(phba, KERN_ERR,
 			BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 			BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
 			"BC_%d : FW Timed Out\n");
 			"BC_%d : FW Timed Out\n");
-
-	phba->fw_timeout = true;
-	beiscsi_ue_detect(phba);
-
+	set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
 	return -EBUSY;
 	return -EBUSY;
 }
 }
 
 
@@ -679,7 +634,7 @@ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
  * Success: 0
  * Success: 0
  * Failure: Non-Zero
  * Failure: Non-Zero
  **/
  **/
-int be_mbox_notify(struct be_ctrl_info *ctrl)
+static int be_mbox_notify(struct be_ctrl_info *ctrl)
 {
 {
 	int status;
 	int status;
 	u32 val = 0;
 	u32 val = 0;
@@ -819,87 +774,6 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
 	return status;
 	return status;
 }
 }
 
 
-/**
- * be_cmd_fw_initialize()- Initialize FW
- * @ctrl: Pointer to function control structure
- *
- * Send FW initialize pattern for the function.
- *
- * return
- * Success: 0
- * Failure: Non-Zero value
- **/
-int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
-{
-	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
-	int status;
-	u8 *endian_check;
-
-	mutex_lock(&ctrl->mbox_lock);
-	memset(wrb, 0, sizeof(*wrb));
-
-	endian_check = (u8 *) wrb;
-	*endian_check++ = 0xFF;
-	*endian_check++ = 0x12;
-	*endian_check++ = 0x34;
-	*endian_check++ = 0xFF;
-	*endian_check++ = 0xFF;
-	*endian_check++ = 0x56;
-	*endian_check++ = 0x78;
-	*endian_check++ = 0xFF;
-	be_dws_cpu_to_le(wrb, sizeof(*wrb));
-
-	status = be_mbox_notify(ctrl);
-	if (status)
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BC_%d : be_cmd_fw_initialize Failed\n");
-
-	mutex_unlock(&ctrl->mbox_lock);
-	return status;
-}
-
-/**
- * be_cmd_fw_uninit()- Uinitialize FW
- * @ctrl: Pointer to function control structure
- *
- * Send FW uninitialize pattern for the function
- *
- * return
- * Success: 0
- * Failure: Non-Zero value
- **/
-int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
-{
-	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
-	int status;
-	u8 *endian_check;
-
-	mutex_lock(&ctrl->mbox_lock);
-	memset(wrb, 0, sizeof(*wrb));
-
-	endian_check = (u8 *) wrb;
-	*endian_check++ = 0xFF;
-	*endian_check++ = 0xAA;
-	*endian_check++ = 0xBB;
-	*endian_check++ = 0xFF;
-	*endian_check++ = 0xFF;
-	*endian_check++ = 0xCC;
-	*endian_check++ = 0xDD;
-	*endian_check = 0xFF;
-
-	be_dws_cpu_to_le(wrb, sizeof(*wrb));
-
-	status = be_mbox_notify(ctrl);
-	if (status)
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BC_%d : be_cmd_fw_uninit Failed\n");
-
-	mutex_unlock(&ctrl->mbox_lock);
-	return status;
-}
-
 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
 			  struct be_queue_info *cq, struct be_queue_info *eq,
 			  struct be_queue_info *cq, struct be_queue_info *eq,
 			  bool sol_evts, bool no_delay, int coalesce_wm)
 			  bool sol_evts, bool no_delay, int coalesce_wm)
@@ -1343,25 +1217,6 @@ error:
 	return status;
 	return status;
 }
 }
 
 
-int beiscsi_cmd_reset_function(struct beiscsi_hba  *phba)
-{
-	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-	struct be_post_sgl_pages_req *req = embedded_payload(wrb);
-	int status;
-
-	mutex_lock(&ctrl->mbox_lock);
-
-	req = embedded_payload(wrb);
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-			   OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
-	status = be_mbox_notify(ctrl);
-
-	mutex_unlock(&ctrl->mbox_lock);
-	return status;
-}
-
 /**
 /**
  * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
  * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
  * @phba: device priv structure instance
  * @phba: device priv structure instance
@@ -1402,3 +1257,564 @@ int be_cmd_set_vlan(struct beiscsi_hba *phba,
 
 
 	return tag;
 	return tag;
 }
 }
+
+int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
+			       struct beiscsi_hba *phba)
+{
+	struct be_dma_mem nonemb_cmd;
+	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct be_mgmt_controller_attributes *req;
+	struct be_sge *sge = nonembedded_sgl(wrb);
+	int status = 0;
+
+	nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
+				sizeof(struct be_mgmt_controller_attributes),
+				&nonemb_cmd.dma);
+	if (nonemb_cmd.va == NULL) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BG_%d : pci_alloc_consistent failed in %s\n",
+			    __func__);
+		return -ENOMEM;
+	}
+	nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
+	req = nonemb_cmd.va;
+	memset(req, 0, sizeof(*req));
+	mutex_lock(&ctrl->mbox_lock);
+	memset(wrb, 0, sizeof(*wrb));
+	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			   OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
+	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
+	sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
+	sge->len = cpu_to_le32(nonemb_cmd.size);
+	status = be_mbox_notify(ctrl);
+	if (!status) {
+		struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
+
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+			    "BG_%d : Firmware Version of CMD : %s\n"
+			    "Firmware Version is : %s\n"
+			    "Developer Build, not performing version check...\n",
+			    resp->params.hba_attribs
+			    .flashrom_version_string,
+			    resp->params.hba_attribs.
+			    firmware_version_string);
+
+		phba->fw_config.iscsi_features =
+				resp->params.hba_attribs.iscsi_features;
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+			    "BM_%d : phba->fw_config.iscsi_features = %d\n",
+			    phba->fw_config.iscsi_features);
+		memcpy(phba->fw_ver_str, resp->params.hba_attribs.
+		       firmware_version_string, BEISCSI_VER_STRLEN);
+	} else
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BG_%d :  Failed in beiscsi_check_supported_fw\n");
+	mutex_unlock(&ctrl->mbox_lock);
+	if (nonemb_cmd.va)
+		pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
+				    nonemb_cmd.va, nonemb_cmd.dma);
+
+	return status;
+}
+
+/**
+ * beiscsi_get_fw_config()- Get the FW config for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the FW config and resources available for the function.
+ * The resources are created based on the count received here.
+ *
+ * return
+ *	Success: 0
+ *	Failure: Non-Zero Value
+ **/
+int beiscsi_get_fw_config(struct be_ctrl_info *ctrl,
+			  struct beiscsi_hba *phba)
+{
+	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
+	uint32_t cid_count, icd_count;
+	int status = -EINVAL;
+	uint8_t ulp_num = 0;
+
+	mutex_lock(&ctrl->mbox_lock);
+	memset(wrb, 0, sizeof(*wrb));
+	be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
+
+	be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
+			   OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+			   EMBED_MBX_MAX_PAYLOAD_SIZE);
+
+	if (be_mbox_notify(ctrl)) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BG_%d : Failed in beiscsi_get_fw_config\n");
+		goto fail_init;
+	}
+
+	/* FW response formats depend on port id */
+	phba->fw_config.phys_port = pfw_cfg->phys_port;
+	if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BG_%d : invalid physical port id %d\n",
+			    phba->fw_config.phys_port);
+		goto fail_init;
+	}
+
+	/* populate and check FW config against min and max values */
+	if (!is_chip_be2_be3r(phba)) {
+		phba->fw_config.eqid_count = pfw_cfg->eqid_count;
+		phba->fw_config.cqid_count = pfw_cfg->cqid_count;
+		if (phba->fw_config.eqid_count == 0 ||
+		    phba->fw_config.eqid_count > 2048) {
+			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+				    "BG_%d : invalid EQ count %d\n",
+				    phba->fw_config.eqid_count);
+			goto fail_init;
+		}
+		if (phba->fw_config.cqid_count == 0 ||
+		    phba->fw_config.cqid_count > 4096) {
+			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+				    "BG_%d : invalid CQ count %d\n",
+				    phba->fw_config.cqid_count);
+			goto fail_init;
+		}
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+			    "BG_%d : EQ_Count : %d CQ_Count : %d\n",
+			    phba->fw_config.eqid_count,
+			    phba->fw_config.cqid_count);
+	}
+
+	/**
+	 * Check on which all ULP iSCSI Protocol is loaded.
+	 * Set the Bit for those ULP. This set flag is used
+	 * at all places in the code to check on which ULP
+	 * iSCSi Protocol is loaded
+	 **/
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (pfw_cfg->ulp[ulp_num].ulp_mode &
+		    BEISCSI_ULP_ISCSI_INI_MODE) {
+			set_bit(ulp_num, &phba->fw_config.ulp_supported);
+
+			/* Get the CID, ICD and Chain count for each ULP */
+			phba->fw_config.iscsi_cid_start[ulp_num] =
+				pfw_cfg->ulp[ulp_num].sq_base;
+			phba->fw_config.iscsi_cid_count[ulp_num] =
+				pfw_cfg->ulp[ulp_num].sq_count;
+
+			phba->fw_config.iscsi_icd_start[ulp_num] =
+				pfw_cfg->ulp[ulp_num].icd_base;
+			phba->fw_config.iscsi_icd_count[ulp_num] =
+				pfw_cfg->ulp[ulp_num].icd_count;
+
+			phba->fw_config.iscsi_chain_start[ulp_num] =
+				pfw_cfg->chain_icd[ulp_num].chain_base;
+			phba->fw_config.iscsi_chain_count[ulp_num] =
+				pfw_cfg->chain_icd[ulp_num].chain_count;
+
+			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+				    "BG_%d : Function loaded on ULP : %d\n"
+				    "\tiscsi_cid_count : %d\n"
+				    "\tiscsi_cid_start : %d\n"
+				    "\t iscsi_icd_count : %d\n"
+				    "\t iscsi_icd_start : %d\n",
+				    ulp_num,
+				    phba->fw_config.
+				    iscsi_cid_count[ulp_num],
+				    phba->fw_config.
+				    iscsi_cid_start[ulp_num],
+				    phba->fw_config.
+				    iscsi_icd_count[ulp_num],
+				    phba->fw_config.
+				    iscsi_icd_start[ulp_num]);
+		}
+	}
+
+	if (phba->fw_config.ulp_supported == 0) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
+			    pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
+			    pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
+		goto fail_init;
+	}
+
+	/**
+	 * ICD is shared among ULPs. Use icd_count of any one loaded ULP
+	 **/
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+			break;
+	icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
+	if (icd_count == 0 || icd_count > 65536) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BG_%d: invalid ICD count %d\n", icd_count);
+		goto fail_init;
+	}
+
+	cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
+		    BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
+	if (cid_count == 0 || cid_count > 4096) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BG_%d: invalid CID count %d\n", cid_count);
+		goto fail_init;
+	}
+
+	/**
+	 * Check FW is dual ULP aware i.e. can handle either
+	 * of the protocols.
+	 */
+	phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
+					  BEISCSI_FUNC_DUA_MODE);
+
+	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+		    "BG_%d : DUA Mode : 0x%x\n",
+		    phba->fw_config.dual_ulp_aware);
+
+	/* all set, continue using this FW config */
+	status = 0;
+fail_init:
+	mutex_unlock(&ctrl->mbox_lock);
+	return status;
+}
+
+/**
+ * beiscsi_get_port_name()- Get port name for the function
+ * @ctrl: ptr to Ctrl Info
+ * @phba: ptr to the dev priv structure
+ *
+ * Get the alphanumeric character for port
+ *
+ **/
+int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
+{
+	int ret = 0;
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_get_port_name *ioctl;
+
+	mutex_lock(&ctrl->mbox_lock);
+	wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	memset(wrb, 0, sizeof(*wrb));
+	ioctl = embedded_payload(wrb);
+
+	be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
+	be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
+			   OPCODE_COMMON_GET_PORT_NAME,
+			   EMBED_MBX_MAX_PAYLOAD_SIZE);
+	ret = be_mbox_notify(ctrl);
+	phba->port_name = 0;
+	if (!ret) {
+		phba->port_name = ioctl->p.resp.port_names >>
+				  (phba->fw_config.phys_port * 8) & 0xff;
+	} else {
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+			    "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
+			    ret, ioctl->h.resp_hdr.status);
+	}
+
+	if (phba->port_name == 0)
+		phba->port_name = '?';
+
+	mutex_unlock(&ctrl->mbox_lock);
+	return ret;
+}
+
+int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
+{
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+	struct be_cmd_set_features *ioctl;
+	struct be_mcc_wrb *wrb;
+	int ret = 0;
+
+	mutex_lock(&ctrl->mbox_lock);
+	wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	memset(wrb, 0, sizeof(*wrb));
+	ioctl = embedded_payload(wrb);
+
+	be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
+	be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
+			   OPCODE_COMMON_SET_FEATURES,
+			   EMBED_MBX_MAX_PAYLOAD_SIZE);
+	ioctl->feature = BE_CMD_SET_FEATURE_UER;
+	ioctl->param_len = sizeof(ioctl->param.req);
+	ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
+	ret = be_mbox_notify(ctrl);
+	if (!ret) {
+		phba->ue2rp = ioctl->param.resp.ue2rp;
+		set_bit(BEISCSI_HBA_UER_SUPP, &phba->state);
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+			    "BG_%d : HBA error recovery supported\n");
+	} else {
+		/**
+		 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
+		 * Older FW versions return this error.
+		 */
+		if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
+		    ret == MCC_STATUS_INVALID_LENGTH)
+			__beiscsi_log(phba, KERN_INFO,
+				      "BG_%d : HBA error recovery not supported\n");
+	}
+
+	mutex_unlock(&ctrl->mbox_lock);
+	return ret;
+}
+
+static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba)
+{
+	u32 sem;
+
+	if (is_chip_be2_be3r(phba))
+		sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx);
+	else
+		pci_read_config_dword(phba->pcidev,
+				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
+	return sem;
+}
+
+int beiscsi_check_fw_rdy(struct beiscsi_hba *phba)
+{
+	u32 loop, post, rdy = 0;
+
+	loop = 1000;
+	while (loop--) {
+		post = beiscsi_get_post_stage(phba);
+		if (post & POST_ERROR_BIT)
+			break;
+		if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) {
+			rdy = 1;
+			break;
+		}
+		msleep(60);
+	}
+
+	if (!rdy) {
+		__beiscsi_log(phba, KERN_ERR,
+			      "BC_%d : FW not ready 0x%x\n", post);
+	}
+
+	return rdy;
+}
+
+int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
+{
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct be_post_sgl_pages_req *req = embedded_payload(wrb);
+	int status;
+
+	mutex_lock(&ctrl->mbox_lock);
+
+	req = embedded_payload(wrb);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			   OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
+	status = be_mbox_notify(ctrl);
+
+	mutex_unlock(&ctrl->mbox_lock);
+	return status;
+}
+
+int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load)
+{
+	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+	u8 *endian_check;
+	int status;
+
+	mutex_lock(&ctrl->mbox_lock);
+	memset(wrb, 0, sizeof(*wrb));
+
+	endian_check = (u8 *) wrb;
+	if (load) {
+		/* to start communicating */
+		*endian_check++ = 0xFF;
+		*endian_check++ = 0x12;
+		*endian_check++ = 0x34;
+		*endian_check++ = 0xFF;
+		*endian_check++ = 0xFF;
+		*endian_check++ = 0x56;
+		*endian_check++ = 0x78;
+		*endian_check++ = 0xFF;
+	} else {
+		/* to stop communicating */
+		*endian_check++ = 0xFF;
+		*endian_check++ = 0xAA;
+		*endian_check++ = 0xBB;
+		*endian_check++ = 0xFF;
+		*endian_check++ = 0xFF;
+		*endian_check++ = 0xCC;
+		*endian_check++ = 0xDD;
+		*endian_check = 0xFF;
+	}
+	be_dws_cpu_to_le(wrb, sizeof(*wrb));
+
+	status = be_mbox_notify(ctrl);
+	if (status)
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
+			    "BC_%d : special WRB message failed\n");
+	mutex_unlock(&ctrl->mbox_lock);
+	return status;
+}
+
+int beiscsi_init_sliport(struct beiscsi_hba *phba)
+{
+	int status;
+
+	/* check POST stage before talking to FW */
+	status = beiscsi_check_fw_rdy(phba);
+	if (!status)
+		return -EIO;
+
+	/* clear all error states after checking FW rdy */
+	phba->state &= ~BEISCSI_HBA_IN_ERR;
+
+	/* check again UER support */
+	phba->state &= ~BEISCSI_HBA_UER_SUPP;
+
+	/*
+	 * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit.
+	 * It should clean up any stale info in FW for this fn.
+	 */
+	status = beiscsi_cmd_function_reset(phba);
+	if (status) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BC_%d : SLI Function Reset failed\n");
+		return status;
+	}
+
+	/* indicate driver is loading */
+	return beiscsi_cmd_special_wrb(&phba->ctrl, 1);
+}
+
+/**
+ * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures.
+ * @phba: pointer to dev priv structure
+ * @ulp: ULP number.
+ *
+ * return
+ *	Success: 0
+ *	Failure: Non-Zero Value
+ **/
+int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
+{
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+	struct iscsi_cleanup_req_v1 *req_v1;
+	struct iscsi_cleanup_req *req;
+	struct be_mcc_wrb *wrb;
+	int status;
+
+	mutex_lock(&ctrl->mbox_lock);
+	wrb = wrb_from_mbox(&ctrl->mbox_mem);
+	req = embedded_payload(wrb);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+			   OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
+
+       /**
+	* TODO: Check with FW folks the chute value to be set.
+	* For now, use the ULP_MASK as the chute value.
+	*/
+	if (is_chip_be2_be3r(phba)) {
+		req->chute = (1 << ulp);
+		req->hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
+		req->data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
+	} else {
+		req_v1 = (struct iscsi_cleanup_req_v1 *)req;
+		req_v1->hdr.version = 1;
+		req_v1->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba,
+								      ulp));
+		req_v1->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba,
+								       ulp));
+	}
+
+	status = be_mbox_notify(ctrl);
+	if (status)
+		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
+			    "BG_%d : %s failed %d\n", __func__, ulp);
+	mutex_unlock(&ctrl->mbox_lock);
+	return status;
+}
+
+/*
+ * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter
+ * @phba: Driver priv structure
+ *
+ * Read registers linked to UE and check for the UE status
+ **/
+int beiscsi_detect_ue(struct beiscsi_hba *phba)
+{
+	uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
+	uint32_t ue_hi = 0, ue_lo = 0;
+	uint8_t i = 0;
+	int ret = 0;
+
+	pci_read_config_dword(phba->pcidev,
+			      PCICFG_UE_STATUS_LOW, &ue_lo);
+	pci_read_config_dword(phba->pcidev,
+			      PCICFG_UE_STATUS_MASK_LOW,
+			      &ue_mask_lo);
+	pci_read_config_dword(phba->pcidev,
+			      PCICFG_UE_STATUS_HIGH,
+			      &ue_hi);
+	pci_read_config_dword(phba->pcidev,
+			      PCICFG_UE_STATUS_MASK_HI,
+			      &ue_mask_hi);
+
+	ue_lo = (ue_lo & ~ue_mask_lo);
+	ue_hi = (ue_hi & ~ue_mask_hi);
+
+
+	if (ue_lo || ue_hi) {
+		set_bit(BEISCSI_HBA_IN_UE, &phba->state);
+		__beiscsi_log(phba, KERN_ERR,
+			      "BC_%d : HBA error detected\n");
+		ret = 1;
+	}
+
+	if (ue_lo) {
+		for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+			if (ue_lo & 1)
+				__beiscsi_log(phba, KERN_ERR,
+					      "BC_%d : UE_LOW %s bit set\n",
+					      desc_ue_status_low[i]);
+		}
+	}
+
+	if (ue_hi) {
+		for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+			if (ue_hi & 1)
+				__beiscsi_log(phba, KERN_ERR,
+					      "BC_%d : UE_HIGH %s bit set\n",
+					      desc_ue_status_hi[i]);
+		}
+	}
+	return ret;
+}
+
+/*
+ * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter
+ * @phba: Driver priv structure
+ *
+ * Read SLIPORT SEMAPHORE register to check for UER
+ *
+ **/
+int beiscsi_detect_tpe(struct beiscsi_hba *phba)
+{
+	u32 post, status;
+	int ret = 0;
+
+	post = beiscsi_get_post_stage(phba);
+	status = post & POST_STAGE_MASK;
+	if ((status & POST_ERR_RECOVERY_CODE_MASK) ==
+	    POST_STAGE_RECOVERABLE_ERR) {
+		set_bit(BEISCSI_HBA_IN_TPE, &phba->state);
+		__beiscsi_log(phba, KERN_INFO,
+			      "BC_%d : HBA error recoverable: 0x%x\n", post);
+		ret = 1;
+	} else {
+		__beiscsi_log(phba, KERN_INFO,
+			      "BC_%d : HBA in UE: 0x%x\n", post);
+	}
+
+	return ret;
+}

+ 107 - 35
drivers/scsi/be2iscsi/be_cmds.h

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -8,7 +8,7 @@
  * Public License is included in this distribution in the file called COPYING.
  * Public License is included in this distribution in the file called COPYING.
  *
  *
  * Contact Information:
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  *
  * Emulex
  * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
@@ -57,6 +57,7 @@ struct be_mcc_wrb {
 #define MCC_STATUS_ILLEGAL_REQUEST 0x2
 #define MCC_STATUS_ILLEGAL_REQUEST 0x2
 #define MCC_STATUS_ILLEGAL_FIELD 0x3
 #define MCC_STATUS_ILLEGAL_FIELD 0x3
 #define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
 #define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
+#define MCC_STATUS_INVALID_LENGTH 0x74
 
 
 #define CQE_STATUS_COMPL_MASK	0xFFFF
 #define CQE_STATUS_COMPL_MASK	0xFFFF
 #define CQE_STATUS_COMPL_SHIFT	0		/* bits 0 - 15 */
 #define CQE_STATUS_COMPL_SHIFT	0		/* bits 0 - 15 */
@@ -97,11 +98,23 @@ struct be_mcc_compl {
 #define MPU_MAILBOX_DB_RDY_MASK	0x1	/* bit 0 */
 #define MPU_MAILBOX_DB_RDY_MASK	0x1	/* bit 0 */
 #define MPU_MAILBOX_DB_HI_MASK	0x2	/* bit 1 */
 #define MPU_MAILBOX_DB_HI_MASK	0x2	/* bit 1 */
 
 
-/********** MPU semphore ******************/
-#define MPU_EP_SEMAPHORE_OFFSET 0xac
-#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
-#define EP_SEMAPHORE_POST_ERR_MASK 0x1
-#define EP_SEMAPHORE_POST_ERR_SHIFT 31
+/********** MPU semphore: used for SH & BE ******************/
+#define SLIPORT_SOFTRESET_OFFSET		0x5c	/* CSR BAR offset */
+#define SLIPORT_SEMAPHORE_OFFSET_BEx		0xac	/* CSR BAR offset */
+#define SLIPORT_SEMAPHORE_OFFSET_SH		0x94	/* PCI-CFG offset */
+#define POST_STAGE_MASK				0x0000FFFF
+#define POST_ERROR_BIT				0x80000000
+#define POST_ERR_RECOVERY_CODE_MASK		0xF000
+
+/* Soft Reset register masks */
+#define SLIPORT_SOFTRESET_SR_MASK		0x00000080	/* SR bit */
+
+/* MPU semphore POST stage values */
+#define POST_STAGE_AWAITING_HOST_RDY	0x1 /* FW awaiting goahead from host */
+#define POST_STAGE_HOST_RDY		0x2 /* Host has given go-ahed to FW */
+#define POST_STAGE_BE_RESET		0x3 /* Host wants to reset chip */
+#define POST_STAGE_ARMFW_RDY		0xC000 /* FW is done with POST */
+#define POST_STAGE_RECOVERABLE_ERR	0xE000 /* Recoverable err detected */
 
 
 /********** MCC door bell ************/
 /********** MCC door bell ************/
 #define DB_MCCQ_OFFSET 0x140
 #define DB_MCCQ_OFFSET 0x140
@@ -109,9 +122,6 @@ struct be_mcc_compl {
 /* Number of entries posted */
 /* Number of entries posted */
 #define DB_MCCQ_NUM_POSTED_SHIFT 16		/* bits 16 - 29 */
 #define DB_MCCQ_NUM_POSTED_SHIFT 16		/* bits 16 - 29 */
 
 
-/* MPU semphore POST stage values */
-#define POST_STAGE_ARMFW_RDY		0xc000	/* FW is done with POST */
-
 /**
 /**
  * When the async bit of mcc_compl is set, the last 4 bytes of
  * When the async bit of mcc_compl is set, the last 4 bytes of
  * mcc_compl is interpreted as follows:
  * mcc_compl is interpreted as follows:
@@ -217,6 +227,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG		58
 #define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG		58
 #define OPCODE_COMMON_FUNCTION_RESET			61
 #define OPCODE_COMMON_FUNCTION_RESET			61
 #define OPCODE_COMMON_GET_PORT_NAME			77
 #define OPCODE_COMMON_GET_PORT_NAME			77
+#define OPCODE_COMMON_SET_FEATURES			191
 
 
 /**
 /**
  * LIST of opcodes that are common between Initiator and Target
  * LIST of opcodes that are common between Initiator and Target
@@ -345,8 +356,8 @@ struct be_cmd_req_logout_fw_sess {
 
 
 struct be_cmd_resp_logout_fw_sess {
 struct be_cmd_resp_logout_fw_sess {
 	struct be_cmd_resp_hdr hdr;	/* dw[4] */
 	struct be_cmd_resp_hdr hdr;	/* dw[4] */
-#define BEISCSI_MGMT_SESSION_CLOSE 0x20
 	uint32_t session_status;
 	uint32_t session_status;
+#define BE_SESS_STATUS_CLOSE		0x20
 } __packed;
 } __packed;
 
 
 struct mgmt_conn_login_options {
 struct mgmt_conn_login_options {
@@ -365,6 +376,14 @@ struct ip_addr_format {
 	u16 size_of_structure;
 	u16 size_of_structure;
 	u8 reserved;
 	u8 reserved;
 	u8 ip_type;
 	u8 ip_type;
+#define BEISCSI_IP_TYPE_V4		0x1
+#define BEISCSI_IP_TYPE_STATIC_V4	0x3
+#define BEISCSI_IP_TYPE_DHCP_V4		0x5
+/* type v4 values < type v6 values */
+#define BEISCSI_IP_TYPE_V6		0x10
+#define BEISCSI_IP_TYPE_ROUTABLE_V6	0x30
+#define BEISCSI_IP_TYPE_LINK_LOCAL_V6	0x50
+#define BEISCSI_IP_TYPE_AUTO_V6		0x90
 	u8 addr[16];
 	u8 addr[16];
 	u32 rsvd0;
 	u32 rsvd0;
 } __packed;
 } __packed;
@@ -430,8 +449,13 @@ struct be_cmd_get_boot_target_req {
 
 
 struct be_cmd_get_boot_target_resp {
 struct be_cmd_get_boot_target_resp {
 	struct be_cmd_resp_hdr hdr;
 	struct be_cmd_resp_hdr hdr;
-	u32  boot_session_count;
-	int  boot_session_handle;
+	u32 boot_session_count;
+	u32 boot_session_handle;
+/**
+ * FW returns 0xffffffff if it couldn't establish connection with
+ * configured boot target.
+ */
+#define BE_BOOT_INVALID_SHANDLE	0xffffffff
 };
 };
 
 
 struct be_cmd_reopen_session_req {
 struct be_cmd_reopen_session_req {
@@ -699,16 +723,59 @@ struct be_cmd_get_nic_conf_resp {
 	u8 mac_address[ETH_ALEN];
 	u8 mac_address[ETH_ALEN];
 } __packed;
 } __packed;
 
 
-#define BEISCSI_ALIAS_LEN 32
+/******************** Get HBA NAME *******************/
 
 
 struct be_cmd_hba_name {
 struct be_cmd_hba_name {
 	struct be_cmd_req_hdr hdr;
 	struct be_cmd_req_hdr hdr;
 	u16 flags;
 	u16 flags;
 	u16 rsvd0;
 	u16 rsvd0;
 	u8 initiator_name[ISCSI_NAME_LEN];
 	u8 initiator_name[ISCSI_NAME_LEN];
-	u8 initiator_alias[BEISCSI_ALIAS_LEN];
+#define BE_INI_ALIAS_LEN 32
+	u8 initiator_alias[BE_INI_ALIAS_LEN];
 } __packed;
 } __packed;
 
 
+/******************** COMMON SET Features *******************/
+#define BE_CMD_SET_FEATURE_UER	0x10
+#define BE_CMD_UER_SUPP_BIT	0x1
+struct be_uer_req {
+	u32 uer;
+	u32 rsvd;
+};
+
+struct be_uer_resp {
+	u32 uer;
+	u16 ue2rp;
+	u16 ue2sr;
+};
+
+struct be_cmd_set_features {
+	union {
+		struct be_cmd_req_hdr req_hdr;
+		struct be_cmd_resp_hdr resp_hdr;
+	} h;
+	u32 feature;
+	u32 param_len;
+	union {
+		struct be_uer_req req;
+		struct be_uer_resp resp;
+		u32 rsvd[2];
+	} param;
+} __packed;
+
+int beiscsi_cmd_function_reset(struct beiscsi_hba *phba);
+
+int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load);
+
+int beiscsi_check_fw_rdy(struct beiscsi_hba *phba);
+
+int beiscsi_init_sliport(struct beiscsi_hba *phba);
+
+int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num);
+
+int beiscsi_detect_ue(struct beiscsi_hba *phba);
+
+int beiscsi_detect_tpe(struct beiscsi_hba *phba);
+
 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
 			  struct be_queue_info *eq, int eq_delay);
 			  struct be_queue_info *eq, int eq_delay);
 
 
@@ -723,24 +790,21 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
 			struct be_queue_info *mccq,
 			struct be_queue_info *mccq,
 			struct be_queue_info *cq);
 			struct be_queue_info *cq);
 
 
-int be_poll_mcc(struct be_ctrl_info *ctrl);
-int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
-				      struct beiscsi_hba *phba);
 unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
 unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
 
 
 void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag);
 void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag);
 
 
-int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
+int beiscsi_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
 			    int num);
 			    int num);
 int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
 int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
-			    uint32_t tag, struct be_mcc_wrb **wrb,
+			    unsigned int tag,
+			    struct be_mcc_wrb **wrb,
 			    struct be_dma_mem *mbx_cmd_mem);
 			    struct be_dma_mem *mbx_cmd_mem);
-/*ISCSI Functuions */
-int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
-int be_cmd_fw_uninit(struct be_ctrl_info *ctrl);
-
+int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
+			       unsigned int tag,
+			       struct be_mcc_wrb **wrb,
+			       struct be_dma_mem *mbx_cmd_mem);
 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
-int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag);
 void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag);
 void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag);
 struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
 struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
 				 unsigned int *ref_tag);
 				 unsigned int *ref_tag);
@@ -749,9 +813,6 @@ void beiscsi_process_async_event(struct beiscsi_hba *phba,
 int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
 int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
 			      struct be_mcc_compl *compl);
 			      struct be_mcc_compl *compl);
 
 
-
-int be_mbox_notify(struct be_ctrl_info *ctrl);
-
 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
 				    struct be_queue_info *cq,
 				    struct be_queue_info *cq,
 				    struct be_queue_info *dq, int length,
 				    struct be_queue_info *dq, int length,
@@ -767,8 +828,6 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
 				struct be_dma_mem *q_mem, u32 page_offset,
 				struct be_dma_mem *q_mem, u32 page_offset,
 				u32 num_pages);
 				u32 num_pages);
 
 
-int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);
-
 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
 		       struct be_queue_info *wrbq,
 		       struct be_queue_info *wrbq,
 		       struct hwi_wrb_context *pwrb_context,
 		       struct hwi_wrb_context *pwrb_context,
@@ -777,6 +836,15 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
 /* Configuration Functions */
 /* Configuration Functions */
 int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
 int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
 
 
+int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
+			       struct beiscsi_hba *phba);
+
+int beiscsi_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
+
+int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
+
+int beiscsi_set_uer_feature(struct beiscsi_hba *phba);
+
 struct be_default_pdu_context {
 struct be_default_pdu_context {
 	u32 dw[4];
 	u32 dw[4];
 } __packed;
 } __packed;
@@ -999,7 +1067,16 @@ struct iscsi_cleanup_req {
 	u16 chute;
 	u16 chute;
 	u8 hdr_ring_id;
 	u8 hdr_ring_id;
 	u8 data_ring_id;
 	u8 data_ring_id;
+} __packed;
 
 
+struct iscsi_cleanup_req_v1 {
+	struct be_cmd_req_hdr hdr;
+	u16 chute;
+	u16 rsvd1;
+	u16 hdr_ring_id;
+	u16 rsvd2;
+	u16 data_ring_id;
+	u16 rsvd3;
 } __packed;
 } __packed;
 
 
 struct eq_delay {
 struct eq_delay {
@@ -1368,14 +1445,9 @@ struct be_cmd_get_port_name {
 						 * the cxn
 						 * the cxn
 						 */
 						 */
 
 
-int beiscsi_pci_soft_reset(struct beiscsi_hba *phba);
-int be_chk_reset_complete(struct beiscsi_hba *phba);
-
 void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
 void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
 			bool embedded, u8 sge_cnt);
 			bool embedded, u8 sge_cnt);
 
 
 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
 			u8 subsystem, u8 opcode, int cmd_len);
 			u8 subsystem, u8 opcode, int cmd_len);
-
-void beiscsi_fail_session(struct iscsi_cls_session *cls_session);
 #endif /* !BEISCSI_CMDS_H */
 #endif /* !BEISCSI_CMDS_H */

+ 191 - 217
drivers/scsi/be2iscsi/be_iscsi.c

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
  * as published by the Free Software Foundation.  The full GNU General
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  * Public License is included in this distribution in the file called COPYING.
  *
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
  *
  *
  * Contact Information:
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  *
  * Emulex
  * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
@@ -52,22 +52,20 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
 
 
 
 
 	if (!ep) {
 	if (!ep) {
-		printk(KERN_ERR
-		       "beiscsi_session_create: invalid ep\n");
+		pr_err("beiscsi_session_create: invalid ep\n");
 		return NULL;
 		return NULL;
 	}
 	}
 	beiscsi_ep = ep->dd_data;
 	beiscsi_ep = ep->dd_data;
 	phba = beiscsi_ep->phba;
 	phba = beiscsi_ep->phba;
 
 
-	if (phba->state & BE_ADAPTER_PCI_ERR) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : PCI_ERROR Recovery\n");
-		return NULL;
-	} else {
+	if (!beiscsi_hba_is_online(phba)) {
 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
-			    "BS_%d : In beiscsi_session_create\n");
+			    "BS_%d : HBA in error 0x%lx\n", phba->state);
+		return NULL;
 	}
 	}
 
 
+	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+		    "BS_%d : In beiscsi_session_create\n");
 	if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
 	if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
 			    "BS_%d : Cannot handle %d cmds."
 			    "BS_%d : Cannot handle %d cmds."
@@ -119,6 +117,16 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
 	iscsi_session_teardown(cls_session);
 	iscsi_session_teardown(cls_session);
 }
 }
 
 
+/**
+ * beiscsi_session_fail(): Closing session with appropriate error
+ * @cls_session: ptr to session
+ **/
+void beiscsi_session_fail(struct iscsi_cls_session *cls_session)
+{
+	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+
 /**
 /**
  * beiscsi_conn_create - create an instance of iscsi connection
  * beiscsi_conn_create - create an instance of iscsi connection
  * @cls_session: ptr to iscsi_cls_session
  * @cls_session: ptr to iscsi_cls_session
@@ -237,7 +245,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
 	return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
 	return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
 }
 }
 
 
-static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
+static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
 {
 {
 	if (phba->ipv4_iface)
 	if (phba->ipv4_iface)
 		return 0;
 		return 0;
@@ -256,7 +264,7 @@ static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
 	return 0;
 	return 0;
 }
 }
 
 
-static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
+static int beiscsi_iface_create_ipv6(struct beiscsi_hba *phba)
 {
 {
 	if (phba->ipv6_iface)
 	if (phba->ipv6_iface)
 		return 0;
 		return 0;
@@ -275,79 +283,31 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
 	return 0;
 	return 0;
 }
 }
 
 
-void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)
+void beiscsi_iface_create_default(struct beiscsi_hba *phba)
 {
 {
 	struct be_cmd_get_if_info_resp *if_info;
 	struct be_cmd_get_if_info_resp *if_info;
 
 
-	if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) {
-		beiscsi_create_ipv4_iface(phba);
+	if (!beiscsi_if_get_info(phba, BEISCSI_IP_TYPE_V4, &if_info)) {
+		beiscsi_iface_create_ipv4(phba);
 		kfree(if_info);
 		kfree(if_info);
 	}
 	}
 
 
-	if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) {
-		beiscsi_create_ipv6_iface(phba);
+	if (!beiscsi_if_get_info(phba, BEISCSI_IP_TYPE_V6, &if_info)) {
+		beiscsi_iface_create_ipv6(phba);
 		kfree(if_info);
 		kfree(if_info);
 	}
 	}
 }
 }
 
 
-void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba)
+void beiscsi_iface_destroy_default(struct beiscsi_hba *phba)
 {
 {
-	if (phba->ipv6_iface)
+	if (phba->ipv6_iface) {
 		iscsi_destroy_iface(phba->ipv6_iface);
 		iscsi_destroy_iface(phba->ipv6_iface);
-	if (phba->ipv4_iface)
-		iscsi_destroy_iface(phba->ipv4_iface);
-}
-
-static int
-beiscsi_set_static_ip(struct Scsi_Host *shost,
-		struct iscsi_iface_param_info *iface_param,
-		void *data, uint32_t dt_len)
-{
-	struct beiscsi_hba *phba = iscsi_host_priv(shost);
-	struct iscsi_iface_param_info *iface_ip = NULL;
-	struct iscsi_iface_param_info *iface_subnet = NULL;
-	struct nlattr *nla;
-	int ret;
-
-
-	switch (iface_param->param) {
-	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
-		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
-		if (nla)
-			iface_ip = nla_data(nla);
-
-		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
-		if (nla)
-			iface_subnet = nla_data(nla);
-		break;
-	case ISCSI_NET_PARAM_IPV4_ADDR:
-		iface_ip = iface_param;
-		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
-		if (nla)
-			iface_subnet = nla_data(nla);
-		break;
-	case ISCSI_NET_PARAM_IPV4_SUBNET:
-		iface_subnet = iface_param;
-		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
-		if (nla)
-			iface_ip = nla_data(nla);
-		break;
-	default:
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : Unsupported param %d\n",
-			    iface_param->param);
+		phba->ipv6_iface = NULL;
 	}
 	}
-
-	if (!iface_ip || !iface_subnet) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : IP and Subnet Mask required\n");
-		return -EINVAL;
+	if (phba->ipv4_iface) {
+		iscsi_destroy_iface(phba->ipv4_iface);
+		phba->ipv4_iface = NULL;
 	}
 	}
-
-	ret = mgmt_set_ip(phba, iface_ip, iface_subnet,
-			ISCSI_BOOTPROTO_STATIC);
-
-	return ret;
 }
 }
 
 
 /**
 /**
@@ -363,137 +323,141 @@ beiscsi_set_static_ip(struct Scsi_Host *shost,
  *	Failure: Non-Zero Value
  *	Failure: Non-Zero Value
  **/
  **/
 static int
 static int
-beiscsi_set_vlan_tag(struct Scsi_Host *shost,
-		      struct iscsi_iface_param_info *iface_param)
+beiscsi_iface_config_vlan(struct Scsi_Host *shost,
+			  struct iscsi_iface_param_info *iface_param)
 {
 {
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
-	int ret;
-
-	/* Get the Interface Handle */
-	ret = mgmt_get_all_if_id(phba);
-	if (ret) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : Getting Interface Handle Failed\n");
-		return ret;
-	}
+	int ret = -EPERM;
 
 
 	switch (iface_param->param) {
 	switch (iface_param->param) {
 	case ISCSI_NET_PARAM_VLAN_ENABLED:
 	case ISCSI_NET_PARAM_VLAN_ENABLED:
+		ret = 0;
 		if (iface_param->value[0] != ISCSI_VLAN_ENABLE)
 		if (iface_param->value[0] != ISCSI_VLAN_ENABLE)
-			ret = mgmt_set_vlan(phba, BEISCSI_VLAN_DISABLE);
+			ret = beiscsi_if_set_vlan(phba, BEISCSI_VLAN_DISABLE);
 		break;
 		break;
 	case ISCSI_NET_PARAM_VLAN_TAG:
 	case ISCSI_NET_PARAM_VLAN_TAG:
-		ret = mgmt_set_vlan(phba,
-				    *((uint16_t *)iface_param->value));
+		ret = beiscsi_if_set_vlan(phba,
+					  *((uint16_t *)iface_param->value));
 		break;
 		break;
-	default:
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-			    "BS_%d : Unknown Param Type : %d\n",
-			    iface_param->param);
-		return -ENOSYS;
 	}
 	}
 	return ret;
 	return ret;
 }
 }
 
 
 
 
 static int
 static int
-beiscsi_set_ipv4(struct Scsi_Host *shost,
-		struct iscsi_iface_param_info *iface_param,
-		void *data, uint32_t dt_len)
+beiscsi_iface_config_ipv4(struct Scsi_Host *shost,
+			  struct iscsi_iface_param_info *info,
+			  void *data, uint32_t dt_len)
 {
 {
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
-	int ret = 0;
+	u8 *ip = NULL, *subnet = NULL, *gw;
+	struct nlattr *nla;
+	int ret = -EPERM;
 
 
 	/* Check the param */
 	/* Check the param */
-	switch (iface_param->param) {
+	switch (info->param) {
+	case ISCSI_NET_PARAM_IFACE_ENABLE:
+		if (info->value[0] == ISCSI_IFACE_ENABLE)
+			ret = beiscsi_iface_create_ipv4(phba);
+		else {
+			iscsi_destroy_iface(phba->ipv4_iface);
+			phba->ipv4_iface = NULL;
+		}
+		break;
 	case ISCSI_NET_PARAM_IPV4_GW:
 	case ISCSI_NET_PARAM_IPV4_GW:
-		ret = mgmt_set_gateway(phba, iface_param);
+		gw = info->value;
+		ret = beiscsi_if_set_gw(phba, BEISCSI_IP_TYPE_V4, gw);
 		break;
 		break;
 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
 	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
-		if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
-			ret = mgmt_set_ip(phba, iface_param,
-					NULL, ISCSI_BOOTPROTO_DHCP);
-		else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
-			ret = beiscsi_set_static_ip(shost, iface_param,
-						    data, dt_len);
+		if (info->value[0] == ISCSI_BOOTPROTO_DHCP)
+			ret = beiscsi_if_en_dhcp(phba, BEISCSI_IP_TYPE_V4);
+		else if (info->value[0] == ISCSI_BOOTPROTO_STATIC)
+			/* release DHCP IP address */
+			ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4,
+						   NULL, NULL);
 		else
 		else
 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
 				    "BS_%d : Invalid BOOTPROTO: %d\n",
 				    "BS_%d : Invalid BOOTPROTO: %d\n",
-				    iface_param->value[0]);
+				    info->value[0]);
 		break;
 		break;
-	case ISCSI_NET_PARAM_IFACE_ENABLE:
-		if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
-			ret = beiscsi_create_ipv4_iface(phba);
-		else
-			iscsi_destroy_iface(phba->ipv4_iface);
-		break;
-	case ISCSI_NET_PARAM_IPV4_SUBNET:
 	case ISCSI_NET_PARAM_IPV4_ADDR:
 	case ISCSI_NET_PARAM_IPV4_ADDR:
-		ret = beiscsi_set_static_ip(shost, iface_param,
-					    data, dt_len);
+		ip = info->value;
+		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
+		if (nla) {
+			info = nla_data(nla);
+			subnet = info->value;
+		}
+		ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4,
+					   ip, subnet);
 		break;
 		break;
-	case ISCSI_NET_PARAM_VLAN_ENABLED:
-	case ISCSI_NET_PARAM_VLAN_TAG:
-		ret = beiscsi_set_vlan_tag(shost, iface_param);
+	case ISCSI_NET_PARAM_IPV4_SUBNET:
+		/*
+		 * OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR ioctl needs IP
+		 * and subnet both. Find IP to be applied for this subnet.
+		 */
+		subnet = info->value;
+		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
+		if (nla) {
+			info = nla_data(nla);
+			ip = info->value;
+		}
+		ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4,
+					   ip, subnet);
 		break;
 		break;
-	default:
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : Param %d not supported\n",
-			    iface_param->param);
 	}
 	}
 
 
 	return ret;
 	return ret;
 }
 }
 
 
 static int
 static int
-beiscsi_set_ipv6(struct Scsi_Host *shost,
-		struct iscsi_iface_param_info *iface_param,
-		void *data, uint32_t dt_len)
+beiscsi_iface_config_ipv6(struct Scsi_Host *shost,
+			  struct iscsi_iface_param_info *iface_param,
+			  void *data, uint32_t dt_len)
 {
 {
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
-	int ret = 0;
+	int ret = -EPERM;
 
 
 	switch (iface_param->param) {
 	switch (iface_param->param) {
 	case ISCSI_NET_PARAM_IFACE_ENABLE:
 	case ISCSI_NET_PARAM_IFACE_ENABLE:
 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
 		if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
-			ret = beiscsi_create_ipv6_iface(phba);
+			ret = beiscsi_iface_create_ipv6(phba);
 		else {
 		else {
 			iscsi_destroy_iface(phba->ipv6_iface);
 			iscsi_destroy_iface(phba->ipv6_iface);
-			ret = 0;
+			phba->ipv6_iface = NULL;
 		}
 		}
 		break;
 		break;
 	case ISCSI_NET_PARAM_IPV6_ADDR:
 	case ISCSI_NET_PARAM_IPV6_ADDR:
-		ret = mgmt_set_ip(phba, iface_param, NULL,
-				  ISCSI_BOOTPROTO_STATIC);
+		ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V6,
+					   iface_param->value, NULL);
 		break;
 		break;
-	case ISCSI_NET_PARAM_VLAN_ENABLED:
-	case ISCSI_NET_PARAM_VLAN_TAG:
-		ret = beiscsi_set_vlan_tag(shost, iface_param);
-		break;
-	default:
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : Param %d not supported\n",
-			    iface_param->param);
 	}
 	}
 
 
 	return ret;
 	return ret;
 }
 }
 
 
-int be2iscsi_iface_set_param(struct Scsi_Host *shost,
-		void *data, uint32_t dt_len)
+int beiscsi_iface_set_param(struct Scsi_Host *shost,
+			    void *data, uint32_t dt_len)
 {
 {
 	struct iscsi_iface_param_info *iface_param = NULL;
 	struct iscsi_iface_param_info *iface_param = NULL;
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	struct nlattr *attrib;
 	struct nlattr *attrib;
 	uint32_t rm_len = dt_len;
 	uint32_t rm_len = dt_len;
-	int ret = 0 ;
+	int ret;
 
 
-	if (phba->state & BE_ADAPTER_PCI_ERR) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : In PCI_ERROR Recovery\n");
+	if (!beiscsi_hba_is_online(phba)) {
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+			    "BS_%d : HBA in error 0x%lx\n", phba->state);
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 
 
+	/* update interface_handle */
+	ret = beiscsi_if_get_handle(phba);
+	if (ret) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
+			    "BS_%d : Getting Interface Handle Failed\n");
+		return ret;
+	}
+
 	nla_for_each_attr(attrib, data, dt_len, rm_len) {
 	nla_for_each_attr(attrib, data, dt_len, rm_len) {
 		iface_param = nla_data(attrib);
 		iface_param = nla_data(attrib);
 
 
@@ -512,40 +476,58 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 
 
-		switch (iface_param->iface_type) {
-		case ISCSI_IFACE_TYPE_IPV4:
-			ret = beiscsi_set_ipv4(shost, iface_param,
-					       data, dt_len);
-			break;
-		case ISCSI_IFACE_TYPE_IPV6:
-			ret = beiscsi_set_ipv6(shost, iface_param,
-					       data, dt_len);
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+			    "BS_%d : %s.0 set param %d",
+			    (iface_param->iface_type == ISCSI_IFACE_TYPE_IPV4) ?
+			    "ipv4" : "ipv6", iface_param->param);
+
+		ret = -EPERM;
+		switch (iface_param->param) {
+		case ISCSI_NET_PARAM_VLAN_ENABLED:
+		case ISCSI_NET_PARAM_VLAN_TAG:
+			ret = beiscsi_iface_config_vlan(shost, iface_param);
 			break;
 			break;
 		default:
 		default:
-			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-				    "BS_%d : Invalid iface type :%d passed\n",
-				    iface_param->iface_type);
-			break;
+			switch (iface_param->iface_type) {
+			case ISCSI_IFACE_TYPE_IPV4:
+				ret = beiscsi_iface_config_ipv4(shost,
+								iface_param,
+								data, dt_len);
+				break;
+			case ISCSI_IFACE_TYPE_IPV6:
+				ret = beiscsi_iface_config_ipv6(shost,
+								iface_param,
+								data, dt_len);
+				break;
+			}
 		}
 		}
 
 
+		if (ret == -EPERM) {
+			__beiscsi_log(phba, KERN_ERR,
+				      "BS_%d : %s.0 set param %d not permitted",
+				      (iface_param->iface_type ==
+				       ISCSI_IFACE_TYPE_IPV4) ? "ipv4" : "ipv6",
+				      iface_param->param);
+			ret = 0;
+		}
 		if (ret)
 		if (ret)
-			return ret;
+			break;
 	}
 	}
 
 
 	return ret;
 	return ret;
 }
 }
 
 
-static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
-		struct iscsi_iface *iface, int param,
-		char *buf)
+static int __beiscsi_iface_get_param(struct beiscsi_hba *phba,
+				     struct iscsi_iface *iface,
+				     int param, char *buf)
 {
 {
 	struct be_cmd_get_if_info_resp *if_info;
 	struct be_cmd_get_if_info_resp *if_info;
-	int len, ip_type = BE2_IPV4;
+	int len, ip_type = BEISCSI_IP_TYPE_V4;
 
 
 	if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
 	if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
-		ip_type = BE2_IPV6;
+		ip_type = BEISCSI_IP_TYPE_V6;
 
 
-	len = mgmt_get_if_info(phba, ip_type, &if_info);
+	len = beiscsi_if_get_info(phba, ip_type, &if_info);
 	if (len)
 	if (len)
 		return len;
 		return len;
 
 
@@ -567,24 +549,24 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
 		break;
 		break;
 	case ISCSI_NET_PARAM_VLAN_ENABLED:
 	case ISCSI_NET_PARAM_VLAN_ENABLED:
 		len = sprintf(buf, "%s\n",
 		len = sprintf(buf, "%s\n",
-			     (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
-			     ? "Disabled\n" : "Enabled\n");
+			      (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) ?
+			      "disable" : "enable");
 		break;
 		break;
 	case ISCSI_NET_PARAM_VLAN_ID:
 	case ISCSI_NET_PARAM_VLAN_ID:
 		if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
 		if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
 			len = -EINVAL;
 			len = -EINVAL;
 		else
 		else
 			len = sprintf(buf, "%d\n",
 			len = sprintf(buf, "%d\n",
-				     (if_info->vlan_priority &
-				     ISCSI_MAX_VLAN_ID));
+				      (if_info->vlan_priority &
+				       ISCSI_MAX_VLAN_ID));
 		break;
 		break;
 	case ISCSI_NET_PARAM_VLAN_PRIORITY:
 	case ISCSI_NET_PARAM_VLAN_PRIORITY:
 		if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
 		if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
 			len = -EINVAL;
 			len = -EINVAL;
 		else
 		else
 			len = sprintf(buf, "%d\n",
 			len = sprintf(buf, "%d\n",
-				     ((if_info->vlan_priority >> 13) &
-				     ISCSI_MAX_VLAN_PRIORITY));
+				      ((if_info->vlan_priority >> 13) &
+				       ISCSI_MAX_VLAN_PRIORITY));
 		break;
 		break;
 	default:
 	default:
 		WARN_ON(1);
 		WARN_ON(1);
@@ -594,18 +576,20 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
 	return len;
 	return len;
 }
 }
 
 
-int be2iscsi_iface_get_param(struct iscsi_iface *iface,
-		enum iscsi_param_type param_type,
-		int param, char *buf)
+int beiscsi_iface_get_param(struct iscsi_iface *iface,
+			    enum iscsi_param_type param_type,
+			    int param, char *buf)
 {
 {
 	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
 	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	struct be_cmd_get_def_gateway_resp gateway;
 	struct be_cmd_get_def_gateway_resp gateway;
-	int len = -ENOSYS;
+	int len = -EPERM;
 
 
-	if (phba->state & BE_ADAPTER_PCI_ERR) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : In PCI_ERROR Recovery\n");
+	if (param_type != ISCSI_NET_PARAM)
+		return 0;
+	if (!beiscsi_hba_is_online(phba)) {
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+			    "BS_%d : HBA in error 0x%lx\n", phba->state);
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 
 
@@ -617,19 +601,22 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
 	case ISCSI_NET_PARAM_VLAN_ENABLED:
 	case ISCSI_NET_PARAM_VLAN_ENABLED:
 	case ISCSI_NET_PARAM_VLAN_ID:
 	case ISCSI_NET_PARAM_VLAN_ID:
 	case ISCSI_NET_PARAM_VLAN_PRIORITY:
 	case ISCSI_NET_PARAM_VLAN_PRIORITY:
-		len = be2iscsi_get_if_param(phba, iface, param, buf);
+		len = __beiscsi_iface_get_param(phba, iface, param, buf);
 		break;
 		break;
 	case ISCSI_NET_PARAM_IFACE_ENABLE:
 	case ISCSI_NET_PARAM_IFACE_ENABLE:
-		len = sprintf(buf, "enabled\n");
+		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
+			len = sprintf(buf, "%s\n",
+				      phba->ipv4_iface ? "enable" : "disable");
+		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
+			len = sprintf(buf, "%s\n",
+				      phba->ipv6_iface ? "enable" : "disable");
 		break;
 		break;
 	case ISCSI_NET_PARAM_IPV4_GW:
 	case ISCSI_NET_PARAM_IPV4_GW:
 		memset(&gateway, 0, sizeof(gateway));
 		memset(&gateway, 0, sizeof(gateway));
-		len = mgmt_get_gateway(phba, BE2_IPV4, &gateway);
+		len = beiscsi_if_get_gw(phba, BEISCSI_IP_TYPE_V4, &gateway);
 		if (!len)
 		if (!len)
 			len = sprintf(buf, "%pI4\n", &gateway.ip_addr.addr);
 			len = sprintf(buf, "%pI4\n", &gateway.ip_addr.addr);
 		break;
 		break;
-	default:
-		len = -ENOSYS;
 	}
 	}
 
 
 	return len;
 	return len;
@@ -647,7 +634,7 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
 			   enum iscsi_param param, char *buf)
 			   enum iscsi_param param, char *buf)
 {
 {
 	struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
 	struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
-	int len = 0;
+	int len;
 
 
 	beiscsi_log(beiscsi_ep->phba, KERN_INFO,
 	beiscsi_log(beiscsi_ep->phba, KERN_INFO,
 		    BEISCSI_LOG_CONFIG,
 		    BEISCSI_LOG_CONFIG,
@@ -659,13 +646,13 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
 		len = sprintf(buf, "%hu\n", beiscsi_ep->dst_tcpport);
 		len = sprintf(buf, "%hu\n", beiscsi_ep->dst_tcpport);
 		break;
 		break;
 	case ISCSI_PARAM_CONN_ADDRESS:
 	case ISCSI_PARAM_CONN_ADDRESS:
-		if (beiscsi_ep->ip_type == BE2_IPV4)
+		if (beiscsi_ep->ip_type == BEISCSI_IP_TYPE_V4)
 			len = sprintf(buf, "%pI4\n", &beiscsi_ep->dst_addr);
 			len = sprintf(buf, "%pI4\n", &beiscsi_ep->dst_addr);
 		else
 		else
 			len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
 			len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
 		break;
 		break;
 	default:
 	default:
-		return -ENOSYS;
+		len = -EPERM;
 	}
 	}
 	return len;
 	return len;
 }
 }
@@ -758,7 +745,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	struct iscsi_cls_host *ihost = shost->shost_data;
 	struct iscsi_cls_host *ihost = shost->shost_data;
 
 
-	ihost->port_state = (phba->state & BE_ADAPTER_LINK_UP) ?
+	ihost->port_state = test_bit(BEISCSI_HBA_LINK_UP, &phba->state) ?
 		ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
 		ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
 }
 }
 
 
@@ -810,16 +797,13 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	struct beiscsi_hba *phba = iscsi_host_priv(shost);
 	int status = 0;
 	int status = 0;
 
 
-
-	if (phba->state & BE_ADAPTER_PCI_ERR) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : In PCI_ERROR Recovery\n");
-		return -EBUSY;
-	} else {
+	if (!beiscsi_hba_is_online(phba)) {
 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
-			    "BS_%d : In beiscsi_get_host_param,"
-			    " param = %d\n", param);
+			    "BS_%d : HBA in error 0x%lx\n", phba->state);
+		return -EBUSY;
 	}
 	}
+	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+		    "BS_%d : In beiscsi_get_host_param, param = %d\n", param);
 
 
 	switch (param) {
 	switch (param) {
 	case ISCSI_HOST_PARAM_HWADDRESS:
 	case ISCSI_HOST_PARAM_HWADDRESS:
@@ -961,15 +945,13 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
 
 
 	phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
 	phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
 
 
-	if (phba->state & BE_ADAPTER_PCI_ERR) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : In PCI_ERROR Recovery\n");
+	if (!beiscsi_hba_is_online(phba)) {
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+			    "BS_%d : HBA in error 0x%lx\n", phba->state);
 		return -EBUSY;
 		return -EBUSY;
-	} else {
-		beiscsi_log(beiscsi_conn->phba, KERN_INFO,
-			    BEISCSI_LOG_CONFIG,
-			    "BS_%d : In beiscsi_conn_start\n");
 	}
 	}
+	beiscsi_log(beiscsi_conn->phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+		    "BS_%d : In beiscsi_conn_start\n");
 
 
 	memset(&params, 0, sizeof(struct beiscsi_offload_params));
 	memset(&params, 0, sizeof(struct beiscsi_offload_params));
 	beiscsi_ep = beiscsi_conn->ep;
 	beiscsi_ep = beiscsi_conn->ep;
@@ -1186,28 +1168,20 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
 	struct iscsi_endpoint *ep;
 	struct iscsi_endpoint *ep;
 	int ret;
 	int ret;
 
 
-	if (shost)
-		phba = iscsi_host_priv(shost);
-	else {
+	if (!shost) {
 		ret = -ENXIO;
 		ret = -ENXIO;
-		printk(KERN_ERR
-		       "beiscsi_ep_connect shost is NULL\n");
+		pr_err("beiscsi_ep_connect shost is NULL\n");
 		return ERR_PTR(ret);
 		return ERR_PTR(ret);
 	}
 	}
 
 
-	if (beiscsi_error(phba)) {
+	phba = iscsi_host_priv(shost);
+	if (!beiscsi_hba_is_online(phba)) {
 		ret = -EIO;
 		ret = -EIO;
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-			    "BS_%d : The FW state Not Stable!!!\n");
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+			    "BS_%d : HBA in error 0x%lx\n", phba->state);
 		return ERR_PTR(ret);
 		return ERR_PTR(ret);
 	}
 	}
-
-	if (phba->state & BE_ADAPTER_PCI_ERR) {
-		ret = -EBUSY;
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : In PCI_ERROR Recovery\n");
-		return ERR_PTR(ret);
-	} else if (phba->state & BE_ADAPTER_LINK_DOWN) {
+	if (!test_bit(BEISCSI_HBA_LINK_UP, &phba->state)) {
 		ret = -EBUSY;
 		ret = -EBUSY;
 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
 			    "BS_%d : The Adapter Port state is Down!!!\n");
 			    "BS_%d : The Adapter Port state is Down!!!\n");
@@ -1361,9 +1335,9 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
 		tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
 		tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
 	}
 	}
 
 
-	if (phba->state & BE_ADAPTER_PCI_ERR) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
-			    "BS_%d : PCI_ERROR Recovery\n");
+	if (!beiscsi_hba_is_online(phba)) {
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+			    "BS_%d : HBA in error 0x%lx\n", phba->state);
 		goto free_ep;
 		goto free_ep;
 	}
 	}
 
 
@@ -1386,7 +1360,7 @@ free_ep:
 	iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
 	iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
 }
 }
 
 
-umode_t be2iscsi_attr_is_visible(int param_type, int param)
+umode_t beiscsi_attr_is_visible(int param_type, int param)
 {
 {
 	switch (param_type) {
 	switch (param_type) {
 	case ISCSI_NET_PARAM:
 	case ISCSI_NET_PARAM:

+ 10 - 15
drivers/scsi/be2iscsi/be_iscsi.h

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Avago Technologies
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
  * as published by the Free Software Foundation.  The full GNU General
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  * Public License is included in this distribution in the file called COPYING.
  *
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
  *
  *
  * Contact Information:
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  *
  * Avago Technologies
  * Avago Technologies
  * 3333 Susan Street
  * 3333 Susan Street
@@ -23,25 +23,18 @@
 #include "be_main.h"
 #include "be_main.h"
 #include "be_mgmt.h"
 #include "be_mgmt.h"
 
 
-#define BE2_IPV4  0x1
-#define BE2_IPV6  0x10
-#define BE2_DHCP_V4 0x05
+void beiscsi_iface_create_default(struct beiscsi_hba *phba);
 
 
-#define NON_BLOCKING 0x0
-#define BLOCKING 0x1
+void beiscsi_iface_destroy_default(struct beiscsi_hba *phba);
 
 
-void beiscsi_create_def_ifaces(struct beiscsi_hba *phba);
-
-void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba);
-
-int be2iscsi_iface_get_param(struct iscsi_iface *iface,
+int beiscsi_iface_get_param(struct iscsi_iface *iface,
 			     enum iscsi_param_type param_type,
 			     enum iscsi_param_type param_type,
 			     int param, char *buf);
 			     int param, char *buf);
 
 
-int be2iscsi_iface_set_param(struct Scsi_Host *shost,
+int beiscsi_iface_set_param(struct Scsi_Host *shost,
 			     void *data, uint32_t count);
 			     void *data, uint32_t count);
 
 
-umode_t be2iscsi_attr_is_visible(int param_type, int param);
+umode_t beiscsi_attr_is_visible(int param_type, int param);
 
 
 void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
 void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
 				struct beiscsi_offload_params *params);
 				struct beiscsi_offload_params *params);
@@ -57,6 +50,8 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
 
 
 void beiscsi_session_destroy(struct iscsi_cls_session *cls_session);
 void beiscsi_session_destroy(struct iscsi_cls_session *cls_session);
 
 
+void beiscsi_session_fail(struct iscsi_cls_session *cls_session);
+
 struct iscsi_cls_conn *beiscsi_conn_create(struct iscsi_cls_session
 struct iscsi_cls_conn *beiscsi_conn_create(struct iscsi_cls_session
 					   *cls_session, uint32_t cid);
 					   *cls_session, uint32_t cid);
 
 

+ 1268 - 1212
drivers/scsi/be2iscsi/be_main.c

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
  * as published by the Free Software Foundation.  The full GNU General
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  * Public License is included in this distribution in the file called COPYING.
  *
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
  *
  *
  * Contact Information:
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  *
  * Emulex
  * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
@@ -374,170 +374,6 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
 	return iscsi_eh_device_reset(sc);
 	return iscsi_eh_device_reset(sc);
 }
 }
 
 
-static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
-{
-	struct beiscsi_hba *phba = data;
-	struct mgmt_session_info *boot_sess = &phba->boot_sess;
-	struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
-	char *str = buf;
-	int rc;
-
-	switch (type) {
-	case ISCSI_BOOT_TGT_NAME:
-		rc = sprintf(buf, "%.*s\n",
-			    (int)strlen(boot_sess->target_name),
-			    (char *)&boot_sess->target_name);
-		break;
-	case ISCSI_BOOT_TGT_IP_ADDR:
-		if (boot_conn->dest_ipaddr.ip_type == 0x1)
-			rc = sprintf(buf, "%pI4\n",
-				(char *)&boot_conn->dest_ipaddr.addr);
-		else
-			rc = sprintf(str, "%pI6\n",
-				(char *)&boot_conn->dest_ipaddr.addr);
-		break;
-	case ISCSI_BOOT_TGT_PORT:
-		rc = sprintf(str, "%d\n", boot_conn->dest_port);
-		break;
-
-	case ISCSI_BOOT_TGT_CHAP_NAME:
-		rc = sprintf(str,  "%.*s\n",
-			     boot_conn->negotiated_login_options.auth_data.chap.
-			     target_chap_name_length,
-			     (char *)&boot_conn->negotiated_login_options.
-			     auth_data.chap.target_chap_name);
-		break;
-	case ISCSI_BOOT_TGT_CHAP_SECRET:
-		rc = sprintf(str,  "%.*s\n",
-			     boot_conn->negotiated_login_options.auth_data.chap.
-			     target_secret_length,
-			     (char *)&boot_conn->negotiated_login_options.
-			     auth_data.chap.target_secret);
-		break;
-	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
-		rc = sprintf(str,  "%.*s\n",
-			     boot_conn->negotiated_login_options.auth_data.chap.
-			     intr_chap_name_length,
-			     (char *)&boot_conn->negotiated_login_options.
-			     auth_data.chap.intr_chap_name);
-		break;
-	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
-		rc = sprintf(str,  "%.*s\n",
-			     boot_conn->negotiated_login_options.auth_data.chap.
-			     intr_secret_length,
-			     (char *)&boot_conn->negotiated_login_options.
-			     auth_data.chap.intr_secret);
-		break;
-	case ISCSI_BOOT_TGT_FLAGS:
-		rc = sprintf(str, "2\n");
-		break;
-	case ISCSI_BOOT_TGT_NIC_ASSOC:
-		rc = sprintf(str, "0\n");
-		break;
-	default:
-		rc = -ENOSYS;
-		break;
-	}
-	return rc;
-}
-
-static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
-{
-	struct beiscsi_hba *phba = data;
-	char *str = buf;
-	int rc;
-
-	switch (type) {
-	case ISCSI_BOOT_INI_INITIATOR_NAME:
-		rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
-		break;
-	default:
-		rc = -ENOSYS;
-		break;
-	}
-	return rc;
-}
-
-static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
-{
-	struct beiscsi_hba *phba = data;
-	char *str = buf;
-	int rc;
-
-	switch (type) {
-	case ISCSI_BOOT_ETH_FLAGS:
-		rc = sprintf(str, "2\n");
-		break;
-	case ISCSI_BOOT_ETH_INDEX:
-		rc = sprintf(str, "0\n");
-		break;
-	case ISCSI_BOOT_ETH_MAC:
-		rc  = beiscsi_get_macaddr(str, phba);
-		break;
-	default:
-		rc = -ENOSYS;
-		break;
-	}
-	return rc;
-}
-
-
-static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
-{
-	umode_t rc;
-
-	switch (type) {
-	case ISCSI_BOOT_TGT_NAME:
-	case ISCSI_BOOT_TGT_IP_ADDR:
-	case ISCSI_BOOT_TGT_PORT:
-	case ISCSI_BOOT_TGT_CHAP_NAME:
-	case ISCSI_BOOT_TGT_CHAP_SECRET:
-	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
-	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
-	case ISCSI_BOOT_TGT_NIC_ASSOC:
-	case ISCSI_BOOT_TGT_FLAGS:
-		rc = S_IRUGO;
-		break;
-	default:
-		rc = 0;
-		break;
-	}
-	return rc;
-}
-
-static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
-{
-	umode_t rc;
-
-	switch (type) {
-	case ISCSI_BOOT_INI_INITIATOR_NAME:
-		rc = S_IRUGO;
-		break;
-	default:
-		rc = 0;
-		break;
-	}
-	return rc;
-}
-
-
-static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
-{
-	umode_t rc;
-
-	switch (type) {
-	case ISCSI_BOOT_ETH_FLAGS:
-	case ISCSI_BOOT_ETH_MAC:
-	case ISCSI_BOOT_ETH_INDEX:
-		rc = S_IRUGO;
-		break;
-	default:
-		rc = 0;
-		break;
-	}
-	return rc;
-}
-
 /*------------------- PCI Driver operations and data ----------------- */
 /*------------------- PCI Driver operations and data ----------------- */
 static const struct pci_device_id beiscsi_pci_id_table[] = {
 static const struct pci_device_id beiscsi_pci_id_table[] = {
 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -850,12 +686,11 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba,
 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
 {
 {
 	struct beiscsi_hba *phba;
 	struct beiscsi_hba *phba;
-	struct be_eq_entry *eqe = NULL;
+	struct be_eq_entry *eqe;
 	struct be_queue_info *eq;
 	struct be_queue_info *eq;
 	struct be_queue_info *mcc;
 	struct be_queue_info *mcc;
-	unsigned int num_eq_processed;
+	unsigned int mcc_events;
 	struct be_eq_obj *pbe_eq;
 	struct be_eq_obj *pbe_eq;
-	unsigned long flags;
 
 
 	pbe_eq = dev_id;
 	pbe_eq = dev_id;
 	eq = &pbe_eq->q;
 	eq = &pbe_eq->q;
@@ -863,27 +698,23 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
 	mcc = &phba->ctrl.mcc_obj.cq;
 	mcc = &phba->ctrl.mcc_obj.cq;
 	eqe = queue_tail_node(eq);
 	eqe = queue_tail_node(eq);
 
 
-	num_eq_processed = 0;
-
+	mcc_events = 0;
 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
 				& EQE_VALID_MASK) {
 				& EQE_VALID_MASK) {
 		if (((eqe->dw[offsetof(struct amap_eq_entry,
 		if (((eqe->dw[offsetof(struct amap_eq_entry,
 		     resource_id) / 32] &
 		     resource_id) / 32] &
 		     EQE_RESID_MASK) >> 16) == mcc->id) {
 		     EQE_RESID_MASK) >> 16) == mcc->id) {
-			spin_lock_irqsave(&phba->isr_lock, flags);
-			pbe_eq->todo_mcc_cq = true;
-			spin_unlock_irqrestore(&phba->isr_lock, flags);
+			mcc_events++;
 		}
 		}
 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 		queue_tail_inc(eq);
 		queue_tail_inc(eq);
 		eqe = queue_tail_node(eq);
 		eqe = queue_tail_node(eq);
-		num_eq_processed++;
 	}
 	}
-	if (pbe_eq->todo_mcc_cq)
-		queue_work(phba->wq, &pbe_eq->work_cqs);
-	if (num_eq_processed)
-		hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 1, 1);
 
 
+	if (mcc_events) {
+		queue_work(phba->wq, &pbe_eq->mcc_work);
+		hwi_ring_eq_db(phba, eq->id, 1,	mcc_events, 1, 1);
+	}
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
@@ -902,7 +733,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
 	eq = &pbe_eq->q;
 	eq = &pbe_eq->q;
 
 
 	phba = pbe_eq->phba;
 	phba = pbe_eq->phba;
-
 	/* disable interrupt till iopoll completes */
 	/* disable interrupt till iopoll completes */
 	hwi_ring_eq_db(phba, eq->id, 1,	0, 0, 1);
 	hwi_ring_eq_db(phba, eq->id, 1,	0, 0, 1);
 	irq_poll_sched(&pbe_eq->iopoll);
 	irq_poll_sched(&pbe_eq->iopoll);
@@ -920,14 +750,13 @@ static irqreturn_t be_isr(int irq, void *dev_id)
 	struct beiscsi_hba *phba;
 	struct beiscsi_hba *phba;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_context_memory *phwi_context;
 	struct hwi_context_memory *phwi_context;
-	struct be_eq_entry *eqe = NULL;
+	struct be_eq_entry *eqe;
 	struct be_queue_info *eq;
 	struct be_queue_info *eq;
 	struct be_queue_info *mcc;
 	struct be_queue_info *mcc;
-	unsigned long flags, index;
-	unsigned int num_mcceq_processed, num_ioeq_processed;
+	unsigned int mcc_events, io_events;
 	struct be_ctrl_info *ctrl;
 	struct be_ctrl_info *ctrl;
 	struct be_eq_obj *pbe_eq;
 	struct be_eq_obj *pbe_eq;
-	int isr;
+	int isr, rearm;
 
 
 	phba = dev_id;
 	phba = dev_id;
 	ctrl = &phba->ctrl;
 	ctrl = &phba->ctrl;
@@ -942,44 +771,35 @@ static irqreturn_t be_isr(int irq, void *dev_id)
 
 
 	eq = &phwi_context->be_eq[0].q;
 	eq = &phwi_context->be_eq[0].q;
 	mcc = &phba->ctrl.mcc_obj.cq;
 	mcc = &phba->ctrl.mcc_obj.cq;
-	index = 0;
 	eqe = queue_tail_node(eq);
 	eqe = queue_tail_node(eq);
 
 
-	num_ioeq_processed = 0;
-	num_mcceq_processed = 0;
+	io_events = 0;
+	mcc_events = 0;
 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
 				& EQE_VALID_MASK) {
 				& EQE_VALID_MASK) {
 		if (((eqe->dw[offsetof(struct amap_eq_entry,
 		if (((eqe->dw[offsetof(struct amap_eq_entry,
-		     resource_id) / 32] &
-		     EQE_RESID_MASK) >> 16) == mcc->id) {
-			spin_lock_irqsave(&phba->isr_lock, flags);
-			pbe_eq->todo_mcc_cq = true;
-			spin_unlock_irqrestore(&phba->isr_lock, flags);
-			num_mcceq_processed++;
-		} else {
-			irq_poll_sched(&pbe_eq->iopoll);
-			num_ioeq_processed++;
-		}
+		      resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id)
+			mcc_events++;
+		else
+			io_events++;
 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 		queue_tail_inc(eq);
 		queue_tail_inc(eq);
 		eqe = queue_tail_node(eq);
 		eqe = queue_tail_node(eq);
 	}
 	}
-	if (num_ioeq_processed || num_mcceq_processed) {
-		if (pbe_eq->todo_mcc_cq)
-			queue_work(phba->wq, &pbe_eq->work_cqs);
-
-		if ((num_mcceq_processed) && (!num_ioeq_processed))
-			hwi_ring_eq_db(phba, eq->id, 0,
-				      (num_ioeq_processed +
-				       num_mcceq_processed) , 1, 1);
-		else
-			hwi_ring_eq_db(phba, eq->id, 0,
-				       (num_ioeq_processed +
-					num_mcceq_processed), 0, 1);
-
-		return IRQ_HANDLED;
-	} else
+	if (!io_events && !mcc_events)
 		return IRQ_NONE;
 		return IRQ_NONE;
+
+	/* no need to rearm if interrupt is only for IOs */
+	rearm = 0;
+	if (mcc_events) {
+		queue_work(phba->wq, &pbe_eq->mcc_work);
+		/* rearm for MCCQ */
+		rearm = 1;
+	}
+	if (io_events)
+		irq_poll_sched(&pbe_eq->iopoll);
+	hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1);
+	return IRQ_HANDLED;
 }
 }
 
 
 
 
@@ -1077,57 +897,6 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
 	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
 	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
 }
 }
 
 
-static unsigned int
-beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
-			  struct beiscsi_hba *phba,
-			  struct pdu_base *ppdu,
-			  unsigned long pdu_len,
-			  void *pbuffer, unsigned long buf_len)
-{
-	struct iscsi_conn *conn = beiscsi_conn->conn;
-	struct iscsi_session *session = conn->session;
-	struct iscsi_task *task;
-	struct beiscsi_io_task *io_task;
-	struct iscsi_hdr *login_hdr;
-
-	switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
-						PDUBASE_OPCODE_MASK) {
-	case ISCSI_OP_NOOP_IN:
-		pbuffer = NULL;
-		buf_len = 0;
-		break;
-	case ISCSI_OP_ASYNC_EVENT:
-		break;
-	case ISCSI_OP_REJECT:
-		WARN_ON(!pbuffer);
-		WARN_ON(!(buf_len == 48));
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
-			    "BM_%d : In ISCSI_OP_REJECT\n");
-		break;
-	case ISCSI_OP_LOGIN_RSP:
-	case ISCSI_OP_TEXT_RSP:
-		task = conn->login_task;
-		io_task = task->dd_data;
-		login_hdr = (struct iscsi_hdr *)ppdu;
-		login_hdr->itt = io_task->libiscsi_itt;
-		break;
-	default:
-		beiscsi_log(phba, KERN_WARNING,
-			    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
-			    "BM_%d : Unrecognized opcode 0x%x in async msg\n",
-			    (ppdu->
-			     dw[offsetof(struct amap_pdu_base, opcode) / 32]
-			     & PDUBASE_OPCODE_MASK));
-		return 1;
-	}
-
-	spin_lock_bh(&session->back_lock);
-	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
-	spin_unlock_bh(&session->back_lock);
-	return 0;
-}
-
 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
 {
 {
 	struct sgl_handle *psgl_handle;
 	struct sgl_handle *psgl_handle;
@@ -1199,6 +968,9 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
 		pwrb_context->alloc_index++;
 		pwrb_context->alloc_index++;
 	spin_unlock_bh(&pwrb_context->wrb_lock);
 	spin_unlock_bh(&pwrb_context->wrb_lock);
 
 
+	if (pwrb_handle)
+		memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
+
 	return pwrb_handle;
 	return pwrb_handle;
 }
 }
 
 
@@ -1440,11 +1212,10 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
 		       struct beiscsi_hba *phba, struct sol_cqe *psol)
 		       struct beiscsi_hba *phba, struct sol_cqe *psol)
 {
 {
 	struct hwi_wrb_context *pwrb_context;
 	struct hwi_wrb_context *pwrb_context;
-	struct wrb_handle *pwrb_handle = NULL;
+	uint16_t wrb_index, cid, cri_index;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_controller *phwi_ctrlr;
+	struct wrb_handle *pwrb_handle;
 	struct iscsi_task *task;
 	struct iscsi_task *task;
-	struct beiscsi_io_task *io_task;
-	uint16_t wrb_index, cid, cri_index;
 
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_ctrlr = phba->phwi_ctrlr;
 	if (is_chip_be2_be3r(phba)) {
 	if (is_chip_be2_be3r(phba)) {
@@ -1463,9 +1234,6 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
 	pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
 	pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
 	task = pwrb_handle->pio_handle;
 	task = pwrb_handle->pio_handle;
-
-	io_task = task->dd_data;
-	memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
 	iscsi_put_task(task);
 	iscsi_put_task(task);
 }
 }
 
 
@@ -1614,431 +1382,428 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
 	spin_unlock_bh(&session->back_lock);
 	spin_unlock_bh(&session->back_lock);
 }
 }
 
 
-static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
-					  *pasync_ctx, unsigned int is_header,
-					  unsigned int host_write_ptr)
+/**
+ * ASYNC PDUs include
+ * a. Unsolicited NOP-In (target initiated NOP-In)
+ * b. ASYNC Messages
+ * c. Reject PDU
+ * d. Login response
+ * These headers arrive unprocessed by the EP firmware.
+ * iSCSI layer processes them.
+ */
+static unsigned int
+beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn,
+		struct pdu_base *phdr, void *pdata, unsigned int dlen)
 {
 {
-	if (is_header)
-		return &pasync_ctx->async_entry[host_write_ptr].
-		    header_busy_list;
-	else
-		return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
+	struct beiscsi_hba *phba = beiscsi_conn->phba;
+	struct iscsi_conn *conn = beiscsi_conn->conn;
+	struct beiscsi_io_task *io_task;
+	struct iscsi_hdr *login_hdr;
+	struct iscsi_task *task;
+	u8 code;
+
+	code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr);
+	switch (code) {
+	case ISCSI_OP_NOOP_IN:
+		pdata = NULL;
+		dlen = 0;
+		break;
+	case ISCSI_OP_ASYNC_EVENT:
+		break;
+	case ISCSI_OP_REJECT:
+		WARN_ON(!pdata);
+		WARN_ON(!(dlen == 48));
+		beiscsi_log(phba, KERN_ERR,
+			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
+			    "BM_%d : In ISCSI_OP_REJECT\n");
+		break;
+	case ISCSI_OP_LOGIN_RSP:
+	case ISCSI_OP_TEXT_RSP:
+		task = conn->login_task;
+		io_task = task->dd_data;
+		login_hdr = (struct iscsi_hdr *)phdr;
+		login_hdr->itt = io_task->libiscsi_itt;
+		break;
+	default:
+		beiscsi_log(phba, KERN_WARNING,
+			    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+			    "BM_%d : unrecognized async PDU opcode 0x%x\n",
+			    code);
+		return 1;
+	}
+	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen);
+	return 0;
+}
+
+static inline void
+beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx,
+			 struct hd_async_handle *pasync_handle)
+{
+	if (pasync_handle->is_header) {
+		list_add_tail(&pasync_handle->link,
+				&pasync_ctx->async_header.free_list);
+		pasync_ctx->async_header.free_entries++;
+	} else {
+		list_add_tail(&pasync_handle->link,
+				&pasync_ctx->async_data.free_list);
+		pasync_ctx->async_data.free_entries++;
+	}
 }
 }
 
 
-static struct async_pdu_handle *
-hwi_get_async_handle(struct beiscsi_hba *phba,
-		     struct beiscsi_conn *beiscsi_conn,
-		     struct hwi_async_pdu_context *pasync_ctx,
-		     struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
+static struct hd_async_handle *
+beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
+		       struct hd_async_context *pasync_ctx,
+		       struct i_t_dpdu_cqe *pdpdu_cqe)
 {
 {
+	struct beiscsi_hba *phba = beiscsi_conn->phba;
+	struct hd_async_handle *pasync_handle;
 	struct be_bus_address phys_addr;
 	struct be_bus_address phys_addr;
-	struct list_head *pbusy_list;
-	struct async_pdu_handle *pasync_handle = NULL;
-	unsigned char is_header = 0;
-	unsigned int index, dpl;
+	u8 final, error = 0;
+	u16 cid, code, ci;
+	u32 dpl;
 
 
+	cid = beiscsi_conn->beiscsi_conn_cid;
+	/**
+	 * This function is invoked to get the right async_handle structure
+	 * from a given DEF PDU CQ entry.
+	 *
+	 * - index in CQ entry gives the vertical index
+	 * - address in CQ entry is the offset where the DMA last ended
+	 * - final - no more notifications for this PDU
+	 */
 	if (is_chip_be2_be3r(phba)) {
 	if (is_chip_be2_be3r(phba)) {
 		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
 		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
 				    dpl, pdpdu_cqe);
 				    dpl, pdpdu_cqe);
-		index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+		ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
 				      index, pdpdu_cqe);
 				      index, pdpdu_cqe);
+		final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+				      final, pdpdu_cqe);
 	} else {
 	} else {
 		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
 		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
 				    dpl, pdpdu_cqe);
 				    dpl, pdpdu_cqe);
-		index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+		ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
 				      index, pdpdu_cqe);
 				      index, pdpdu_cqe);
+		final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+				      final, pdpdu_cqe);
 	}
 	}
 
 
-	phys_addr.u.a32.address_lo =
-		(pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
-					db_addr_lo) / 32] - dpl);
-	phys_addr.u.a32.address_hi =
-		pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
-				       db_addr_hi) / 32];
-
-	phys_addr.u.a64.address =
-			*((unsigned long long *)(&phys_addr.u.a64.address));
-
-	switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
-			& PDUCQE_CODE_MASK) {
+	/**
+	 * DB addr Hi/Lo is same for BE and SKH.
+	 * Subtract the dataplacementlength to get to the base.
+	 */
+	phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+						   db_addr_lo, pdpdu_cqe);
+	phys_addr.u.a32.address_lo -= dpl;
+	phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+						   db_addr_hi, pdpdu_cqe);
+
+	code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe);
+	switch (code) {
 	case UNSOL_HDR_NOTIFY:
 	case UNSOL_HDR_NOTIFY:
-		is_header = 1;
-
-		 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
-						      is_header, index);
+		pasync_handle = pasync_ctx->async_entry[ci].header;
 		break;
 		break;
+	case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
+		error = 1;
 	case UNSOL_DATA_NOTIFY:
 	case UNSOL_DATA_NOTIFY:
-		 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
-						      is_header, index);
+		pasync_handle = pasync_ctx->async_entry[ci].data;
 		break;
 		break;
+	/* called only for above codes */
 	default:
 	default:
-		pbusy_list = NULL;
-		beiscsi_log(phba, KERN_WARNING,
-			    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
-			    "BM_%d : Unexpected code=%d\n",
-			    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
-			    code) / 32] & PDUCQE_CODE_MASK);
-		return NULL;
+		pasync_handle = NULL;
+		break;
 	}
 	}
 
 
-	WARN_ON(list_empty(pbusy_list));
-	list_for_each_entry(pasync_handle, pbusy_list, link) {
-		if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
-			break;
+	if (!pasync_handle) {
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+			    "BM_%d : cid %d async PDU handle not found - code %d ci %d addr %llx\n",
+			    cid, code, ci, phys_addr.u.a64.address);
+		return pasync_handle;
 	}
 	}
 
 
-	WARN_ON(!pasync_handle);
+	if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address ||
+	    pasync_handle->index != ci) {
+		/* driver bug - if ci does not match async handle index */
+		error = 1;
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+			    "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n",
+			    cid, pasync_handle->is_header ? 'H' : 'D',
+			    pasync_handle->pa.u.a64.address,
+			    pasync_handle->index,
+			    phys_addr.u.a64.address, ci);
+		/* FW has stale address - attempt continuing by dropping */
+	}
 
 
-	pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
-			     beiscsi_conn->beiscsi_conn_cid);
-	pasync_handle->is_header = is_header;
+	/**
+	 * Each CID is associated with unique CRI.
+	 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different.
+	 **/
+	pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(cid);
+	pasync_handle->is_final = final;
 	pasync_handle->buffer_len = dpl;
 	pasync_handle->buffer_len = dpl;
-	*pcq_index = index;
+	/* empty the slot */
+	if (pasync_handle->is_header)
+		pasync_ctx->async_entry[ci].header = NULL;
+	else
+		pasync_ctx->async_entry[ci].data = NULL;
 
 
+	/**
+	 * DEF PDU header and data buffers with errors should be simply
+	 * dropped as there are no consumers for it.
+	 */
+	if (error) {
+		beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
+		pasync_handle = NULL;
+	}
 	return pasync_handle;
 	return pasync_handle;
 }
 }
 
 
-static unsigned int
-hwi_update_async_writables(struct beiscsi_hba *phba,
-			    struct hwi_async_pdu_context *pasync_ctx,
-			    unsigned int is_header, unsigned int cq_index)
+static void
+beiscsi_hdl_purge_handles(struct beiscsi_hba *phba,
+			  struct hd_async_context *pasync_ctx,
+			  u16 cri)
 {
 {
-	struct list_head *pbusy_list;
-	struct async_pdu_handle *pasync_handle;
-	unsigned int num_entries, writables = 0;
-	unsigned int *pep_read_ptr, *pwritables;
-
-	num_entries = pasync_ctx->num_entries;
-	if (is_header) {
-		pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
-		pwritables = &pasync_ctx->async_header.writables;
-	} else {
-		pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
-		pwritables = &pasync_ctx->async_data.writables;
-	}
-
-	while ((*pep_read_ptr) != cq_index) {
-		(*pep_read_ptr)++;
-		*pep_read_ptr = (*pep_read_ptr) % num_entries;
-
-		pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
-						     *pep_read_ptr);
-		if (writables == 0)
-			WARN_ON(list_empty(pbusy_list));
-
-		if (!list_empty(pbusy_list)) {
-			pasync_handle = list_entry(pbusy_list->next,
-						   struct async_pdu_handle,
-						   link);
-			WARN_ON(!pasync_handle);
-			pasync_handle->consumed = 1;
-		}
-
-		writables++;
-	}
+	struct hd_async_handle *pasync_handle, *tmp_handle;
+	struct list_head *plist;
 
 
-	if (!writables) {
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
-			    "BM_%d : Duplicate notification received - index 0x%x!!\n",
-			    cq_index);
-		WARN_ON(1);
+	plist  = &pasync_ctx->async_entry[cri].wq.list;
+	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
+		list_del(&pasync_handle->link);
+		beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
 	}
 	}
 
 
-	*pwritables = *pwritables + writables;
-	return 0;
+	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list);
+	pasync_ctx->async_entry[cri].wq.hdr_len = 0;
+	pasync_ctx->async_entry[cri].wq.bytes_received = 0;
+	pasync_ctx->async_entry[cri].wq.bytes_needed = 0;
 }
 }
 
 
-static void hwi_free_async_msg(struct beiscsi_hba *phba,
-			       struct hwi_async_pdu_context *pasync_ctx,
-			       unsigned int cri)
+static unsigned int
+beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn,
+		    struct hd_async_context *pasync_ctx,
+		    u16 cri)
 {
 {
-	struct async_pdu_handle *pasync_handle, *tmp_handle;
+	struct iscsi_session *session = beiscsi_conn->conn->session;
+	struct hd_async_handle *pasync_handle, *plast_handle;
+	struct beiscsi_hba *phba = beiscsi_conn->phba;
+	void *phdr = NULL, *pdata = NULL;
+	u32 dlen = 0, status = 0;
 	struct list_head *plist;
 	struct list_head *plist;
 
 
-	plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
-	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
-		list_del(&pasync_handle->link);
-
-		if (pasync_handle->is_header) {
-			list_add_tail(&pasync_handle->link,
-				      &pasync_ctx->async_header.free_list);
-			pasync_ctx->async_header.free_entries++;
-		} else {
-			list_add_tail(&pasync_handle->link,
-				      &pasync_ctx->async_data.free_list);
-			pasync_ctx->async_data.free_entries++;
+	plist = &pasync_ctx->async_entry[cri].wq.list;
+	plast_handle = NULL;
+	list_for_each_entry(pasync_handle, plist, link) {
+		plast_handle = pasync_handle;
+		/* get the header, the first entry */
+		if (!phdr) {
+			phdr = pasync_handle->pbuffer;
+			continue;
 		}
 		}
+		/* use first buffer to collect all the data */
+		if (!pdata) {
+			pdata = pasync_handle->pbuffer;
+			dlen = pasync_handle->buffer_len;
+			continue;
+		}
+		memcpy(pdata + dlen, pasync_handle->pbuffer,
+		       pasync_handle->buffer_len);
+		dlen += pasync_handle->buffer_len;
 	}
 	}
 
 
-	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
-	pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
-	pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
+	if (!plast_handle->is_final) {
+		/* last handle should have final PDU notification from FW */
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+			    "BM_%d : cid %u %p fwd async PDU with last handle missing - HL%u:DN%u:DR%u\n",
+			    beiscsi_conn->beiscsi_conn_cid, plast_handle,
+			    pasync_ctx->async_entry[cri].wq.hdr_len,
+			    pasync_ctx->async_entry[cri].wq.bytes_needed,
+			    pasync_ctx->async_entry[cri].wq.bytes_received);
+	}
+	spin_lock_bh(&session->back_lock);
+	status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen);
+	spin_unlock_bh(&session->back_lock);
+	beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
+	return status;
 }
 }
 
 
-static struct phys_addr *
-hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
-		     unsigned int is_header, unsigned int host_write_ptr)
+static unsigned int
+beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn,
+		       struct hd_async_context *pasync_ctx,
+		       struct hd_async_handle *pasync_handle)
 {
 {
-	struct phys_addr *pasync_sge = NULL;
+	unsigned int bytes_needed = 0, status = 0;
+	u16 cri = pasync_handle->cri;
+	struct cri_wait_queue *wq;
+	struct beiscsi_hba *phba;
+	struct pdu_base *ppdu;
+	char *err = "";
 
 
-	if (is_header)
-		pasync_sge = pasync_ctx->async_header.ring_base;
-	else
-		pasync_sge = pasync_ctx->async_data.ring_base;
+	phba = beiscsi_conn->phba;
+	wq = &pasync_ctx->async_entry[cri].wq;
+	if (pasync_handle->is_header) {
+		/* check if PDU hdr is rcv'd when old hdr not completed */
+		if (wq->hdr_len) {
+			err = "incomplete";
+			goto drop_pdu;
+		}
+		ppdu = pasync_handle->pbuffer;
+		bytes_needed = AMAP_GET_BITS(struct amap_pdu_base,
+					     data_len_hi, ppdu);
+		bytes_needed <<= 16;
+		bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base,
+							  data_len_lo, ppdu));
+		wq->hdr_len = pasync_handle->buffer_len;
+		wq->bytes_received = 0;
+		wq->bytes_needed = bytes_needed;
+		list_add_tail(&pasync_handle->link, &wq->list);
+		if (!bytes_needed)
+			status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
+						     pasync_ctx, cri);
+	} else {
+		/* check if data received has header and is needed */
+		if (!wq->hdr_len || !wq->bytes_needed) {
+			err = "header less";
+			goto drop_pdu;
+		}
+		wq->bytes_received += pasync_handle->buffer_len;
+		/* Something got overwritten? Better catch it here. */
+		if (wq->bytes_received > wq->bytes_needed) {
+			err = "overflow";
+			goto drop_pdu;
+		}
+		list_add_tail(&pasync_handle->link, &wq->list);
+		if (wq->bytes_received == wq->bytes_needed)
+			status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
+						     pasync_ctx, cri);
+	}
+	return status;
 
 
-	return pasync_sge + host_write_ptr;
+drop_pdu:
+	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+		    "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n",
+		    beiscsi_conn->beiscsi_conn_cid, err,
+		    pasync_handle->is_header ? 'H' : 'D',
+		    wq->hdr_len, wq->bytes_needed,
+		    pasync_handle->buffer_len);
+	/* discard this handle */
+	beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
+	/* free all the other handles in cri_wait_queue */
+	beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
+	/* try continuing */
+	return status;
 }
 }
 
 
-static void hwi_post_async_buffers(struct beiscsi_hba *phba,
-				    unsigned int is_header, uint8_t ulp_num)
+static void
+beiscsi_hdq_post_handles(struct beiscsi_hba *phba,
+			 u8 header, u8 ulp_num)
 {
 {
+	struct hd_async_handle *pasync_handle, *tmp, **slot;
+	struct hd_async_context *pasync_ctx;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_controller *phwi_ctrlr;
-	struct hwi_async_pdu_context *pasync_ctx;
-	struct async_pdu_handle *pasync_handle;
-	struct list_head *pfree_link, *pbusy_list;
+	struct list_head *hfree_list;
 	struct phys_addr *pasync_sge;
 	struct phys_addr *pasync_sge;
-	unsigned int ring_id, num_entries;
-	unsigned int host_write_num, doorbell_offset;
-	unsigned int writables;
-	unsigned int i = 0;
-	u32 doorbell = 0;
+	u32 ring_id, doorbell = 0;
+	u16 index, num_entries;
+	u32 doorbell_offset;
+	u16 prod = 0, cons;
 
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_ctrlr = phba->phwi_ctrlr;
 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
 	num_entries = pasync_ctx->num_entries;
 	num_entries = pasync_ctx->num_entries;
-
-	if (is_header) {
-		writables = min(pasync_ctx->async_header.writables,
-				pasync_ctx->async_header.free_entries);
-		pfree_link = pasync_ctx->async_header.free_list.next;
-		host_write_num = pasync_ctx->async_header.host_write_ptr;
+	if (header) {
+		cons = pasync_ctx->async_header.free_entries;
+		hfree_list = &pasync_ctx->async_header.free_list;
 		ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
 		ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
 		doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
 		doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
-				  doorbell_offset;
+					doorbell_offset;
 	} else {
 	} else {
-		writables = min(pasync_ctx->async_data.writables,
-				pasync_ctx->async_data.free_entries);
-		pfree_link = pasync_ctx->async_data.free_list.next;
-		host_write_num = pasync_ctx->async_data.host_write_ptr;
+		cons = pasync_ctx->async_data.free_entries;
+		hfree_list = &pasync_ctx->async_data.free_list;
 		ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
 		ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
 		doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
 		doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
-				  doorbell_offset;
+					doorbell_offset;
 	}
 	}
+	/* number of entries posted must be in multiples of 8 */
+	if (cons % 8)
+		return;
 
 
-	writables = (writables / 8) * 8;
-	if (writables) {
-		for (i = 0; i < writables; i++) {
-			pbusy_list =
-			    hwi_get_async_busy_list(pasync_ctx, is_header,
-						    host_write_num);
-			pasync_handle =
-			    list_entry(pfree_link, struct async_pdu_handle,
-								link);
-			WARN_ON(!pasync_handle);
-			pasync_handle->consumed = 0;
-
-			pfree_link = pfree_link->next;
-
-			pasync_sge = hwi_get_ring_address(pasync_ctx,
-						is_header, host_write_num);
-
-			pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
-			pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
-
-			list_move(&pasync_handle->link, pbusy_list);
-
-			host_write_num++;
-			host_write_num = host_write_num % num_entries;
-		}
-
-		if (is_header) {
-			pasync_ctx->async_header.host_write_ptr =
-							host_write_num;
-			pasync_ctx->async_header.free_entries -= writables;
-			pasync_ctx->async_header.writables -= writables;
-			pasync_ctx->async_header.busy_entries += writables;
-		} else {
-			pasync_ctx->async_data.host_write_ptr = host_write_num;
-			pasync_ctx->async_data.free_entries -= writables;
-			pasync_ctx->async_data.writables -= writables;
-			pasync_ctx->async_data.busy_entries += writables;
-		}
-
-		doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
-		doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
-		doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
-		doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
-					<< DB_DEF_PDU_CQPROC_SHIFT;
-
-		iowrite32(doorbell, phba->db_va + doorbell_offset);
-	}
-}
-
-static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
-					 struct beiscsi_conn *beiscsi_conn,
-					 struct i_t_dpdu_cqe *pdpdu_cqe)
-{
-	struct hwi_controller *phwi_ctrlr;
-	struct hwi_async_pdu_context *pasync_ctx;
-	struct async_pdu_handle *pasync_handle = NULL;
-	unsigned int cq_index = -1;
-	uint16_t cri_index = BE_GET_CRI_FROM_CID(
-			     beiscsi_conn->beiscsi_conn_cid);
-
-	phwi_ctrlr = phba->phwi_ctrlr;
-	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
-		     BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
-		     cri_index));
-
-	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
-					     pdpdu_cqe, &cq_index);
-	BUG_ON(pasync_handle->is_header != 0);
-	if (pasync_handle->consumed == 0)
-		hwi_update_async_writables(phba, pasync_ctx,
-					   pasync_handle->is_header, cq_index);
-
-	hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
-	hwi_post_async_buffers(phba, pasync_handle->is_header,
-			       BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
-			       cri_index));
-}
-
-static unsigned int
-hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
-		  struct beiscsi_hba *phba,
-		  struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
-{
-	struct list_head *plist;
-	struct async_pdu_handle *pasync_handle;
-	void *phdr = NULL;
-	unsigned int hdr_len = 0, buf_len = 0;
-	unsigned int status, index = 0, offset = 0;
-	void *pfirst_buffer = NULL;
-	unsigned int num_buf = 0;
-
-	plist = &pasync_ctx->async_entry[cri].wait_queue.list;
+	list_for_each_entry_safe(pasync_handle, tmp, hfree_list, link) {
+		list_del_init(&pasync_handle->link);
+		pasync_handle->is_final = 0;
+		pasync_handle->buffer_len = 0;
 
 
-	list_for_each_entry(pasync_handle, plist, link) {
-		if (index == 0) {
-			phdr = pasync_handle->pbuffer;
-			hdr_len = pasync_handle->buffer_len;
-		} else {
-			buf_len = pasync_handle->buffer_len;
-			if (!num_buf) {
-				pfirst_buffer = pasync_handle->pbuffer;
-				num_buf++;
-			}
-			memcpy(pfirst_buffer + offset,
-			       pasync_handle->pbuffer, buf_len);
-			offset += buf_len;
+		/* handles can be consumed out of order, use index in handle */
+		index = pasync_handle->index;
+		WARN_ON(pasync_handle->is_header != header);
+		if (header)
+			slot = &pasync_ctx->async_entry[index].header;
+		else
+			slot = &pasync_ctx->async_entry[index].data;
+		/**
+		 * The slot just tracks handle's hold and release, so
+		 * overwriting at the same index won't do any harm but
+		 * needs to be caught.
+		 */
+		if (*slot != NULL) {
+			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
+				    "BM_%d : async PDU %s slot at %u not empty\n",
+				    header ? "header" : "data", index);
 		}
 		}
-		index++;
+		/**
+		 * We use same freed index as in completion to post so this
+		 * operation is not required for refills. Its required only
+		 * for ring creation.
+		 */
+		if (header)
+			pasync_sge = pasync_ctx->async_header.ring_base;
+		else
+			pasync_sge = pasync_ctx->async_data.ring_base;
+		pasync_sge += index;
+		/* if its a refill then address is same; hi is lo */
+		WARN_ON(pasync_sge->hi &&
+			pasync_sge->hi != pasync_handle->pa.u.a32.address_lo);
+		WARN_ON(pasync_sge->lo &&
+			pasync_sge->lo != pasync_handle->pa.u.a32.address_hi);
+		pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
+		pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
+
+		*slot = pasync_handle;
+		if (++prod == cons)
+			break;
 	}
 	}
+	if (header)
+		pasync_ctx->async_header.free_entries -= prod;
+	else
+		pasync_ctx->async_data.free_entries -= prod;
 
 
-	status = beiscsi_process_async_pdu(beiscsi_conn, phba,
-					    phdr, hdr_len, pfirst_buffer,
-					    offset);
-
-	hwi_free_async_msg(phba, pasync_ctx, cri);
-	return 0;
-}
-
-static unsigned int
-hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
-		     struct beiscsi_hba *phba,
-		     struct async_pdu_handle *pasync_handle)
-{
-	struct hwi_async_pdu_context *pasync_ctx;
-	struct hwi_controller *phwi_ctrlr;
-	unsigned int bytes_needed = 0, status = 0;
-	unsigned short cri = pasync_handle->cri;
-	struct pdu_base *ppdu;
-
-	phwi_ctrlr = phba->phwi_ctrlr;
-	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
-		     BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
-		     BE_GET_CRI_FROM_CID(beiscsi_conn->
-				 beiscsi_conn_cid)));
-
-	list_del(&pasync_handle->link);
-	if (pasync_handle->is_header) {
-		pasync_ctx->async_header.busy_entries--;
-		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
-			hwi_free_async_msg(phba, pasync_ctx, cri);
-			BUG();
-		}
-
-		pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
-		pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
-		pasync_ctx->async_entry[cri].wait_queue.hdr_len =
-				(unsigned short)pasync_handle->buffer_len;
-		list_add_tail(&pasync_handle->link,
-			      &pasync_ctx->async_entry[cri].wait_queue.list);
-
-		ppdu = pasync_handle->pbuffer;
-		bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
-			data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
-			0xFFFF0000) | ((be16_to_cpu((ppdu->
-			dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
-			& PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
-
-		if (status == 0) {
-			pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
-			    bytes_needed;
-
-			if (bytes_needed == 0)
-				status = hwi_fwd_async_msg(beiscsi_conn, phba,
-							   pasync_ctx, cri);
-		}
-	} else {
-		pasync_ctx->async_data.busy_entries--;
-		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
-			list_add_tail(&pasync_handle->link,
-				      &pasync_ctx->async_entry[cri].wait_queue.
-				      list);
-			pasync_ctx->async_entry[cri].wait_queue.
-				bytes_received +=
-				(unsigned short)pasync_handle->buffer_len;
-
-			if (pasync_ctx->async_entry[cri].wait_queue.
-			    bytes_received >=
-			    pasync_ctx->async_entry[cri].wait_queue.
-			    bytes_needed)
-				status = hwi_fwd_async_msg(beiscsi_conn, phba,
-							   pasync_ctx, cri);
-		}
-	}
-	return status;
+	doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
+	doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
+	doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
+	doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT;
+	iowrite32(doorbell, phba->db_va + doorbell_offset);
 }
 }
 
 
-static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
-					 struct beiscsi_hba *phba,
-					 struct i_t_dpdu_cqe *pdpdu_cqe)
+static void
+beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn,
+			  struct i_t_dpdu_cqe *pdpdu_cqe)
 {
 {
+	struct beiscsi_hba *phba = beiscsi_conn->phba;
+	struct hd_async_handle *pasync_handle = NULL;
+	struct hd_async_context *pasync_ctx;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_controller *phwi_ctrlr;
-	struct hwi_async_pdu_context *pasync_ctx;
-	struct async_pdu_handle *pasync_handle = NULL;
-	unsigned int cq_index = -1;
-	uint16_t cri_index = BE_GET_CRI_FROM_CID(
-			     beiscsi_conn->beiscsi_conn_cid);
+	u16 cid_cri;
+	u8 ulp_num;
 
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_ctrlr = phba->phwi_ctrlr;
-	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
-		     BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
-		     cri_index));
-
-	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
-					     pdpdu_cqe, &cq_index);
-
-	if (pasync_handle->consumed == 0)
-		hwi_update_async_writables(phba, pasync_ctx,
-					   pasync_handle->is_header, cq_index);
+	cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
+	ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri);
+	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
+	pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx,
+					       pdpdu_cqe);
+	if (!pasync_handle)
+		return;
 
 
-	hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
-	hwi_post_async_buffers(phba, pasync_handle->is_header,
-			       BEISCSI_GET_ULP_FROM_CRI(
-			       phwi_ctrlr, cri_index));
+	beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle);
+	beiscsi_hdq_post_handles(phba, pasync_handle->is_header, ulp_num);
 }
 }
 
 
 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
@@ -2051,6 +1816,9 @@ void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
 	mcc_compl = queue_tail_node(mcc_cq);
 	mcc_compl = queue_tail_node(mcc_cq);
 	mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
 	mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
 	while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
 	while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
+		if (beiscsi_hba_in_error(phba))
+			return;
+
 		if (num_processed >= 32) {
 		if (num_processed >= 32) {
 			hwi_ring_cq_db(phba, mcc_cq->id,
 			hwi_ring_cq_db(phba, mcc_cq->id,
 					num_processed, 0);
 					num_processed, 0);
@@ -2073,6 +1841,19 @@ void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
 		hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1);
 		hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1);
 }
 }
 
 
+static void beiscsi_mcc_work(struct work_struct *work)
+{
+	struct be_eq_obj *pbe_eq;
+	struct beiscsi_hba *phba;
+
+	pbe_eq = container_of(work, struct be_eq_obj, mcc_work);
+	phba = pbe_eq->phba;
+	beiscsi_process_mcc_cq(phba);
+	/* rearm EQ for further interrupts */
+	if (!beiscsi_hba_in_error(phba))
+		hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
+}
+
 /**
 /**
  * beiscsi_process_cq()- Process the Completion Queue
  * beiscsi_process_cq()- Process the Completion Queue
  * @pbe_eq: Event Q on which the Completion has come
  * @pbe_eq: Event Q on which the Completion has come
@@ -2101,6 +1882,9 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
 
 
 	while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
 	while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
 	       CQE_VALID_MASK) {
 	       CQE_VALID_MASK) {
+		if (beiscsi_hba_in_error(phba))
+			return 0;
+
 		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
 		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
 
 
 		 code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
 		 code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
@@ -2165,8 +1949,8 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
 				    cqe_desc[code], code, cid);
 				    cqe_desc[code], code, cid);
 
 
 			spin_lock_bh(&phba->async_pdu_lock);
 			spin_lock_bh(&phba->async_pdu_lock);
-			hwi_process_default_pdu_ring(beiscsi_conn, phba,
-					     (struct i_t_dpdu_cqe *)sol);
+			beiscsi_hdq_process_compl(beiscsi_conn,
+						  (struct i_t_dpdu_cqe *)sol);
 			spin_unlock_bh(&phba->async_pdu_lock);
 			spin_unlock_bh(&phba->async_pdu_lock);
 			break;
 			break;
 		case UNSOL_DATA_NOTIFY:
 		case UNSOL_DATA_NOTIFY:
@@ -2176,8 +1960,8 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
 				    cqe_desc[code], code, cid);
 				    cqe_desc[code], code, cid);
 
 
 			spin_lock_bh(&phba->async_pdu_lock);
 			spin_lock_bh(&phba->async_pdu_lock);
-			hwi_process_default_pdu_ring(beiscsi_conn, phba,
-					     (struct i_t_dpdu_cqe *)sol);
+			beiscsi_hdq_process_compl(beiscsi_conn,
+						  (struct i_t_dpdu_cqe *)sol);
 			spin_unlock_bh(&phba->async_pdu_lock);
 			spin_unlock_bh(&phba->async_pdu_lock);
 			break;
 			break;
 		case CXN_INVALIDATE_INDEX_NOTIFY:
 		case CXN_INVALIDATE_INDEX_NOTIFY:
@@ -2213,8 +1997,9 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
 				    "BM_%d :  Dropping %s[%d] on DPDU ring on CID : %d\n",
 				    "BM_%d :  Dropping %s[%d] on DPDU ring on CID : %d\n",
 				    cqe_desc[code], code, cid);
 				    cqe_desc[code], code, cid);
 			spin_lock_bh(&phba->async_pdu_lock);
 			spin_lock_bh(&phba->async_pdu_lock);
-			hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
-					     (struct i_t_dpdu_cqe *) sol);
+			/* driver consumes the entry and drops the contents */
+			beiscsi_hdq_process_compl(beiscsi_conn,
+						  (struct i_t_dpdu_cqe *)sol);
 			spin_unlock_bh(&phba->async_pdu_lock);
 			spin_unlock_bh(&phba->async_pdu_lock);
 			break;
 			break;
 		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
 		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
@@ -2262,60 +2047,32 @@ proc_next_cqe:
 	return total;
 	return total;
 }
 }
 
 
-void beiscsi_process_all_cqs(struct work_struct *work)
-{
-	unsigned long flags;
-	struct hwi_controller *phwi_ctrlr;
-	struct hwi_context_memory *phwi_context;
-	struct beiscsi_hba *phba;
-	struct be_eq_obj *pbe_eq =
-	    container_of(work, struct be_eq_obj, work_cqs);
-
-	phba = pbe_eq->phba;
-	phwi_ctrlr = phba->phwi_ctrlr;
-	phwi_context = phwi_ctrlr->phwi_ctxt;
-
-	if (pbe_eq->todo_mcc_cq) {
-		spin_lock_irqsave(&phba->isr_lock, flags);
-		pbe_eq->todo_mcc_cq = false;
-		spin_unlock_irqrestore(&phba->isr_lock, flags);
-		beiscsi_process_mcc_cq(phba);
-	}
-
-	if (pbe_eq->todo_cq) {
-		spin_lock_irqsave(&phba->isr_lock, flags);
-		pbe_eq->todo_cq = false;
-		spin_unlock_irqrestore(&phba->isr_lock, flags);
-		beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC);
-	}
-
-	/* rearm EQ for further interrupts */
-	hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
-}
-
 static int be_iopoll(struct irq_poll *iop, int budget)
 static int be_iopoll(struct irq_poll *iop, int budget)
 {
 {
-	unsigned int ret, num_eq_processed;
+	unsigned int ret, io_events;
 	struct beiscsi_hba *phba;
 	struct beiscsi_hba *phba;
 	struct be_eq_obj *pbe_eq;
 	struct be_eq_obj *pbe_eq;
 	struct be_eq_entry *eqe = NULL;
 	struct be_eq_entry *eqe = NULL;
 	struct be_queue_info *eq;
 	struct be_queue_info *eq;
 
 
-	num_eq_processed = 0;
 	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
 	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
 	phba = pbe_eq->phba;
 	phba = pbe_eq->phba;
+	if (beiscsi_hba_in_error(phba)) {
+		irq_poll_complete(iop);
+		return 0;
+	}
+
+	io_events = 0;
 	eq = &pbe_eq->q;
 	eq = &pbe_eq->q;
 	eqe = queue_tail_node(eq);
 	eqe = queue_tail_node(eq);
-
 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] &
 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] &
 			EQE_VALID_MASK) {
 			EQE_VALID_MASK) {
 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
 		queue_tail_inc(eq);
 		queue_tail_inc(eq);
 		eqe = queue_tail_node(eq);
 		eqe = queue_tail_node(eq);
-		num_eq_processed++;
+		io_events++;
 	}
 	}
-
-	hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+	hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1);
 
 
 	ret = beiscsi_process_cq(pbe_eq, budget);
 	ret = beiscsi_process_cq(pbe_eq, budget);
 	pbe_eq->cq_count += ret;
 	pbe_eq->cq_count += ret;
@@ -2325,7 +2082,8 @@ static int be_iopoll(struct irq_poll *iop, int budget)
 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
 			    "BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
 			    "BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
 			    pbe_eq->q.id, ret);
 			    pbe_eq->q.id, ret);
-		hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
+		if (!beiscsi_hba_in_error(phba))
+			hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
 	}
 	}
 	return ret;
 	return ret;
 }
 }
@@ -2691,20 +2449,20 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
 					  (ulp_num * MEM_DESCR_OFFSET));
 					  (ulp_num * MEM_DESCR_OFFSET));
 			phba->mem_req[mem_descr_index] =
 			phba->mem_req[mem_descr_index] =
 					  BEISCSI_GET_CID_COUNT(phba, ulp_num) *
 					  BEISCSI_GET_CID_COUNT(phba, ulp_num) *
-					  sizeof(struct async_pdu_handle);
+					  sizeof(struct hd_async_handle);
 
 
 			mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
 			mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
 					  (ulp_num * MEM_DESCR_OFFSET));
 					  (ulp_num * MEM_DESCR_OFFSET));
 			phba->mem_req[mem_descr_index] =
 			phba->mem_req[mem_descr_index] =
 					  BEISCSI_GET_CID_COUNT(phba, ulp_num) *
 					  BEISCSI_GET_CID_COUNT(phba, ulp_num) *
-					  sizeof(struct async_pdu_handle);
+					  sizeof(struct hd_async_handle);
 
 
 			mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
 			mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
 					  (ulp_num * MEM_DESCR_OFFSET));
 					  (ulp_num * MEM_DESCR_OFFSET));
 			phba->mem_req[mem_descr_index] =
 			phba->mem_req[mem_descr_index] =
-					  sizeof(struct hwi_async_pdu_context) +
+					  sizeof(struct hd_async_context) +
 					 (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
 					 (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
-					  sizeof(struct hwi_async_entry));
+					  sizeof(struct hd_async_entry));
 		}
 		}
 	}
 	}
 }
 }
@@ -2963,35 +2721,34 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 	uint8_t ulp_num;
 	uint8_t ulp_num;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_controller *phwi_ctrlr;
 	struct hba_parameters *p = &phba->params;
 	struct hba_parameters *p = &phba->params;
-	struct hwi_async_pdu_context *pasync_ctx;
-	struct async_pdu_handle *pasync_header_h, *pasync_data_h;
+	struct hd_async_context *pasync_ctx;
+	struct hd_async_handle *pasync_header_h, *pasync_data_h;
 	unsigned int index, idx, num_per_mem, num_async_data;
 	unsigned int index, idx, num_per_mem, num_async_data;
 	struct be_mem_descriptor *mem_descr;
 	struct be_mem_descriptor *mem_descr;
 
 
 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-
+			 /* get async_ctx for each ULP */
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
 			mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
 				     (ulp_num * MEM_DESCR_OFFSET));
 				     (ulp_num * MEM_DESCR_OFFSET));
 
 
 			phwi_ctrlr = phba->phwi_ctrlr;
 			phwi_ctrlr = phba->phwi_ctrlr;
 			phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
 			phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
-				(struct hwi_async_pdu_context *)
+				(struct hd_async_context *)
 				 mem_descr->mem_array[0].virtual_address;
 				 mem_descr->mem_array[0].virtual_address;
 
 
 			pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
 			pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
 			memset(pasync_ctx, 0, sizeof(*pasync_ctx));
 			memset(pasync_ctx, 0, sizeof(*pasync_ctx));
 
 
 			pasync_ctx->async_entry =
 			pasync_ctx->async_entry =
-					(struct hwi_async_entry *)
+					(struct hd_async_entry *)
 					((long unsigned int)pasync_ctx +
 					((long unsigned int)pasync_ctx +
-					sizeof(struct hwi_async_pdu_context));
+					sizeof(struct hd_async_context));
 
 
 			pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
 			pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
 						  ulp_num);
 						  ulp_num);
-			pasync_ctx->buffer_size = p->defpdu_hdr_sz;
-
+			/* setup header buffers */
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
 			mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
 				(ulp_num * MEM_DESCR_OFFSET);
 				(ulp_num * MEM_DESCR_OFFSET);
@@ -3008,6 +2765,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 					    "BM_%d : No Virtual address for ULP : %d\n",
 					    "BM_%d : No Virtual address for ULP : %d\n",
 					    ulp_num);
 					    ulp_num);
 
 
+			pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
 			pasync_ctx->async_header.va_base =
 			pasync_ctx->async_header.va_base =
 				mem_descr->mem_array[0].virtual_address;
 				mem_descr->mem_array[0].virtual_address;
 
 
@@ -3015,6 +2773,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 				mem_descr->mem_array[0].
 				mem_descr->mem_array[0].
 				bus_address.u.a64.address;
 				bus_address.u.a64.address;
 
 
+			/* setup header buffer sgls */
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
 			mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
 				     (ulp_num * MEM_DESCR_OFFSET);
 				     (ulp_num * MEM_DESCR_OFFSET);
@@ -3034,6 +2793,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 			pasync_ctx->async_header.ring_base =
 			pasync_ctx->async_header.ring_base =
 				mem_descr->mem_array[0].virtual_address;
 				mem_descr->mem_array[0].virtual_address;
 
 
+			/* setup header buffer handles */
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
 			mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
 				     (ulp_num * MEM_DESCR_OFFSET);
 				     (ulp_num * MEM_DESCR_OFFSET);
@@ -3052,9 +2812,9 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 
 
 			pasync_ctx->async_header.handle_base =
 			pasync_ctx->async_header.handle_base =
 				mem_descr->mem_array[0].virtual_address;
 				mem_descr->mem_array[0].virtual_address;
-			pasync_ctx->async_header.writables = 0;
 			INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
 			INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
 
 
+			/* setup data buffer sgls */
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
 			mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
 				     (ulp_num * MEM_DESCR_OFFSET);
 				     (ulp_num * MEM_DESCR_OFFSET);
@@ -3074,6 +2834,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 			pasync_ctx->async_data.ring_base =
 			pasync_ctx->async_data.ring_base =
 				mem_descr->mem_array[0].virtual_address;
 				mem_descr->mem_array[0].virtual_address;
 
 
+			/* setup data buffer handles */
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
 			mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
 				     (ulp_num * MEM_DESCR_OFFSET);
 				     (ulp_num * MEM_DESCR_OFFSET);
@@ -3085,16 +2846,16 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 
 
 			pasync_ctx->async_data.handle_base =
 			pasync_ctx->async_data.handle_base =
 				mem_descr->mem_array[0].virtual_address;
 				mem_descr->mem_array[0].virtual_address;
-			pasync_ctx->async_data.writables = 0;
 			INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
 			INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
 
 
 			pasync_header_h =
 			pasync_header_h =
-				(struct async_pdu_handle *)
+				(struct hd_async_handle *)
 				pasync_ctx->async_header.handle_base;
 				pasync_ctx->async_header.handle_base;
 			pasync_data_h =
 			pasync_data_h =
-				(struct async_pdu_handle *)
+				(struct hd_async_handle *)
 				pasync_ctx->async_data.handle_base;
 				pasync_ctx->async_data.handle_base;
 
 
+			/* setup data buffers */
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
 			mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
 			mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
 				     (ulp_num * MEM_DESCR_OFFSET);
 				     (ulp_num * MEM_DESCR_OFFSET);
@@ -3112,6 +2873,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 					    ulp_num);
 					    ulp_num);
 
 
 			idx = 0;
 			idx = 0;
+			pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
 			pasync_ctx->async_data.va_base =
 			pasync_ctx->async_data.va_base =
 				mem_descr->mem_array[idx].virtual_address;
 				mem_descr->mem_array[idx].virtual_address;
 			pasync_ctx->async_data.pa_base.u.a64.address =
 			pasync_ctx->async_data.pa_base.u.a64.address =
@@ -3125,7 +2887,8 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 			for (index = 0;	index < BEISCSI_GET_CID_COUNT
 			for (index = 0;	index < BEISCSI_GET_CID_COUNT
 					(phba, ulp_num); index++) {
 					(phba, ulp_num); index++) {
 				pasync_header_h->cri = -1;
 				pasync_header_h->cri = -1;
-				pasync_header_h->index = (char)index;
+				pasync_header_h->is_header = 1;
+				pasync_header_h->index = index;
 				INIT_LIST_HEAD(&pasync_header_h->link);
 				INIT_LIST_HEAD(&pasync_header_h->link);
 				pasync_header_h->pbuffer =
 				pasync_header_h->pbuffer =
 					(void *)((unsigned long)
 					(void *)((unsigned long)
@@ -3142,14 +2905,13 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 					      free_list);
 					      free_list);
 				pasync_header_h++;
 				pasync_header_h++;
 				pasync_ctx->async_header.free_entries++;
 				pasync_ctx->async_header.free_entries++;
-				pasync_ctx->async_header.writables++;
-
-				INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
-					       wait_queue.list);
 				INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
 				INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
-					       header_busy_list);
+						wq.list);
+				pasync_ctx->async_entry[index].header = NULL;
+
 				pasync_data_h->cri = -1;
 				pasync_data_h->cri = -1;
-				pasync_data_h->index = (char)index;
+				pasync_data_h->is_header = 0;
+				pasync_data_h->index = index;
 				INIT_LIST_HEAD(&pasync_data_h->link);
 				INIT_LIST_HEAD(&pasync_data_h->link);
 
 
 				if (!num_async_data) {
 				if (!num_async_data) {
@@ -3184,16 +2946,8 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
 					      free_list);
 					      free_list);
 				pasync_data_h++;
 				pasync_data_h++;
 				pasync_ctx->async_data.free_entries++;
 				pasync_ctx->async_data.free_entries++;
-				pasync_ctx->async_data.writables++;
-
-				INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
-					       data_busy_list);
+				pasync_ctx->async_entry[index].data = NULL;
 			}
 			}
-
-			pasync_ctx->async_header.host_write_ptr = 0;
-			pasync_ctx->async_header.ep_read_ptr = -1;
-			pasync_ctx->async_data.host_write_ptr = 0;
-			pasync_ctx->async_data.ep_read_ptr = -1;
 		}
 		}
 	}
 	}
 
 
@@ -3265,8 +3019,8 @@ static int be_fill_queue(struct be_queue_info *q,
 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
 			     struct hwi_context_memory *phwi_context)
 			     struct hwi_context_memory *phwi_context)
 {
 {
+	int ret = -ENOMEM, eq_for_mcc;
 	unsigned int i, num_eq_pages;
 	unsigned int i, num_eq_pages;
-	int ret = 0, eq_for_mcc;
 	struct be_queue_info *eq;
 	struct be_queue_info *eq;
 	struct be_dma_mem *mem;
 	struct be_dma_mem *mem;
 	void *eq_vaddress;
 	void *eq_vaddress;
@@ -3284,8 +3038,8 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
 		mem = &eq->dma_mem;
 		mem = &eq->dma_mem;
 		phwi_context->be_eq[i].phba = phba;
 		phwi_context->be_eq[i].phba = phba;
 		eq_vaddress = pci_alloc_consistent(phba->pcidev,
 		eq_vaddress = pci_alloc_consistent(phba->pcidev,
-						     num_eq_pages * PAGE_SIZE,
-						     &paddr);
+						   num_eq_pages * PAGE_SIZE,
+						   &paddr);
 		if (!eq_vaddress)
 		if (!eq_vaddress)
 			goto create_eq_error;
 			goto create_eq_error;
 
 
@@ -3313,6 +3067,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
 			    phwi_context->be_eq[i].q.id);
 			    phwi_context->be_eq[i].q.id);
 	}
 	}
 	return 0;
 	return 0;
+
 create_eq_error:
 create_eq_error:
 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
 		eq = &phwi_context->be_eq[i].q;
 		eq = &phwi_context->be_eq[i].q;
@@ -3329,11 +3084,11 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
 			     struct hwi_context_memory *phwi_context)
 			     struct hwi_context_memory *phwi_context)
 {
 {
 	unsigned int i, num_cq_pages;
 	unsigned int i, num_cq_pages;
-	int ret = 0;
 	struct be_queue_info *cq, *eq;
 	struct be_queue_info *cq, *eq;
 	struct be_dma_mem *mem;
 	struct be_dma_mem *mem;
 	struct be_eq_obj *pbe_eq;
 	struct be_eq_obj *pbe_eq;
 	void *cq_vaddress;
 	void *cq_vaddress;
+	int ret = -ENOMEM;
 	dma_addr_t paddr;
 	dma_addr_t paddr;
 
 
 	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
 	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
@@ -3347,10 +3102,11 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
 		pbe_eq->phba = phba;
 		pbe_eq->phba = phba;
 		mem = &cq->dma_mem;
 		mem = &cq->dma_mem;
 		cq_vaddress = pci_alloc_consistent(phba->pcidev,
 		cq_vaddress = pci_alloc_consistent(phba->pcidev,
-						     num_cq_pages * PAGE_SIZE,
-						     &paddr);
+						   num_cq_pages * PAGE_SIZE,
+						   &paddr);
 		if (!cq_vaddress)
 		if (!cq_vaddress)
 			goto create_cq_error;
 			goto create_cq_error;
+
 		ret = be_fill_queue(cq, phba->params.num_cq_entries,
 		ret = be_fill_queue(cq, phba->params.num_cq_entries,
 				    sizeof(struct sol_cqe), cq_vaddress);
 				    sizeof(struct sol_cqe), cq_vaddress);
 		if (ret) {
 		if (ret) {
@@ -3385,7 +3141,6 @@ create_cq_error:
 					    mem->va, mem->dma);
 					    mem->va, mem->dma);
 	}
 	}
 	return ret;
 	return ret;
-
 }
 }
 
 
 static int
 static int
@@ -3437,7 +3192,6 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
 		    "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
 		    "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
 		    ulp_num,
 		    ulp_num,
 		    phwi_context->be_def_hdrq[ulp_num].id);
 		    phwi_context->be_def_hdrq[ulp_num].id);
-	hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3492,11 +3246,9 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
 		    ulp_num,
 		    ulp_num,
 		    phwi_context->be_def_dataq[ulp_num].id);
 		    phwi_context->be_def_dataq[ulp_num].id);
 
 
-	hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
 		    "BM_%d : DEFAULT PDU DATA RING CREATED"
 		    "BM_%d : DEFAULT PDU DATA RING CREATED"
 		    "on ULP : %d\n", ulp_num);
 		    "on ULP : %d\n", ulp_num);
-
 	return 0;
 	return 0;
 }
 }
 
 
@@ -3716,10 +3468,53 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
 
 
 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
 {
 {
-	struct be_queue_info *q;
 	struct be_ctrl_info *ctrl = &phba->ctrl;
 	struct be_ctrl_info *ctrl = &phba->ctrl;
+	struct be_dma_mem *ptag_mem;
+	struct be_queue_info *q;
+	int i, tag;
 
 
 	q = &phba->ctrl.mcc_obj.q;
 	q = &phba->ctrl.mcc_obj.q;
+	for (i = 0; i < MAX_MCC_CMD; i++) {
+		tag = i + 1;
+		if (!test_bit(MCC_TAG_STATE_RUNNING,
+			      &ctrl->ptag_state[tag].tag_state))
+			continue;
+
+		if (test_bit(MCC_TAG_STATE_TIMEOUT,
+			     &ctrl->ptag_state[tag].tag_state)) {
+			ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
+			if (ptag_mem->size) {
+				pci_free_consistent(ctrl->pdev,
+						    ptag_mem->size,
+						    ptag_mem->va,
+						    ptag_mem->dma);
+				ptag_mem->size = 0;
+			}
+			continue;
+		}
+		/**
+		 * If MCC is still active and waiting then wake up the process.
+		 * We are here only because port is going offline. The process
+		 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is
+		 * returned for the operation and allocated memory cleaned up.
+		 */
+		if (waitqueue_active(&ctrl->mcc_wait[tag])) {
+			ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED;
+			ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK;
+			wake_up_interruptible(&ctrl->mcc_wait[tag]);
+			/*
+			 * Control tag info gets reinitialized in enable
+			 * so wait for the process to clear running state.
+			 */
+			while (test_bit(MCC_TAG_STATE_RUNNING,
+					&ctrl->ptag_state[tag].tag_state))
+				schedule_timeout_uninterruptible(HZ);
+		}
+		/**
+		 * For MCC with tag_states MCC_TAG_STATE_ASYNC and
+		 * MCC_TAG_STATE_IGNORE nothing needs to done.
+		 */
+	}
 	if (q->created) {
 	if (q->created) {
 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
 		be_queue_free(phba, q);
 		be_queue_free(phba, q);
@@ -3732,68 +3527,6 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
 	}
 	}
 }
 }
 
 
-static void hwi_cleanup(struct beiscsi_hba *phba)
-{
-	struct be_queue_info *q;
-	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct hwi_controller *phwi_ctrlr;
-	struct hwi_context_memory *phwi_context;
-	struct hwi_async_pdu_context *pasync_ctx;
-	int i, eq_for_mcc, ulp_num;
-
-	phwi_ctrlr = phba->phwi_ctrlr;
-	phwi_context = phwi_ctrlr->phwi_ctxt;
-
-	be_cmd_iscsi_remove_template_hdr(ctrl);
-
-	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
-		q = &phwi_context->be_wrbq[i];
-		if (q->created)
-			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
-	}
-	kfree(phwi_context->be_wrbq);
-	free_wrb_handles(phba);
-
-	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
-		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-
-			q = &phwi_context->be_def_hdrq[ulp_num];
-			if (q->created)
-				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
-
-			q = &phwi_context->be_def_dataq[ulp_num];
-			if (q->created)
-				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
-
-			pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
-		}
-	}
-
-	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
-
-	for (i = 0; i < (phba->num_cpus); i++) {
-		q = &phwi_context->be_cq[i];
-		if (q->created) {
-			be_queue_free(phba, q);
-			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
-		}
-	}
-
-	be_mcc_queues_destroy(phba);
-	if (phba->msix_enabled)
-		eq_for_mcc = 1;
-	else
-		eq_for_mcc = 0;
-	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
-		q = &phwi_context->be_eq[i].q;
-		if (q->created) {
-			be_queue_free(phba, q);
-			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
-		}
-	}
-	be_cmd_fw_uninit(ctrl);
-}
-
 static int be_mcc_queues_create(struct beiscsi_hba *phba,
 static int be_mcc_queues_create(struct beiscsi_hba *phba,
 				struct hwi_context_memory *phwi_context)
 				struct hwi_context_memory *phwi_context)
 {
 {
@@ -3875,7 +3608,119 @@ static void find_num_cpus(struct beiscsi_hba *phba)
 	}
 	}
 }
 }
 
 
-static int hwi_init_port(struct beiscsi_hba *phba)
+static void hwi_purge_eq(struct beiscsi_hba *phba)
+{
+	struct hwi_controller *phwi_ctrlr;
+	struct hwi_context_memory *phwi_context;
+	struct be_queue_info *eq;
+	struct be_eq_entry *eqe = NULL;
+	int i, eq_msix;
+	unsigned int num_processed;
+
+	if (beiscsi_hba_in_error(phba))
+		return;
+
+	phwi_ctrlr = phba->phwi_ctrlr;
+	phwi_context = phwi_ctrlr->phwi_ctxt;
+	if (phba->msix_enabled)
+		eq_msix = 1;
+	else
+		eq_msix = 0;
+
+	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
+		eq = &phwi_context->be_eq[i].q;
+		eqe = queue_tail_node(eq);
+		num_processed = 0;
+		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
+					& EQE_VALID_MASK) {
+			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
+			queue_tail_inc(eq);
+			eqe = queue_tail_node(eq);
+			num_processed++;
+		}
+
+		if (num_processed)
+			hwi_ring_eq_db(phba, eq->id, 1,	num_processed, 1, 1);
+	}
+}
+
+static void hwi_cleanup_port(struct beiscsi_hba *phba)
+{
+	struct be_queue_info *q;
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+	struct hwi_controller *phwi_ctrlr;
+	struct hwi_context_memory *phwi_context;
+	struct hd_async_context *pasync_ctx;
+	int i, eq_for_mcc, ulp_num;
+
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
+			beiscsi_cmd_iscsi_cleanup(phba, ulp_num);
+
+	/**
+	 * Purge all EQ entries that may have been left out. This is to
+	 * workaround a problem we've seen occasionally where driver gets an
+	 * interrupt with EQ entry bit set after stopping the controller.
+	 */
+	hwi_purge_eq(phba);
+
+	phwi_ctrlr = phba->phwi_ctrlr;
+	phwi_context = phwi_ctrlr->phwi_ctxt;
+
+	be_cmd_iscsi_remove_template_hdr(ctrl);
+
+	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
+		q = &phwi_context->be_wrbq[i];
+		if (q->created)
+			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
+	}
+	kfree(phwi_context->be_wrbq);
+	free_wrb_handles(phba);
+
+	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
+		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
+
+			q = &phwi_context->be_def_hdrq[ulp_num];
+			if (q->created)
+				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+			q = &phwi_context->be_def_dataq[ulp_num];
+			if (q->created)
+				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
+
+			pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
+		}
+	}
+
+	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
+
+	for (i = 0; i < (phba->num_cpus); i++) {
+		q = &phwi_context->be_cq[i];
+		if (q->created) {
+			be_queue_free(phba, q);
+			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+		}
+	}
+
+	be_mcc_queues_destroy(phba);
+	if (phba->msix_enabled)
+		eq_for_mcc = 1;
+	else
+		eq_for_mcc = 0;
+	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
+		q = &phwi_context->be_eq[i].q;
+		if (q->created) {
+			be_queue_free(phba, q);
+			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
+		}
+	}
+	/* this ensures complete FW cleanup */
+	beiscsi_cmd_function_reset(phba);
+	/* last communication, indicate driver is unloading */
+	beiscsi_cmd_special_wrb(&phba->ctrl, 0);
+}
+
+static int hwi_init_port(struct beiscsi_hba *phba)
 {
 {
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_context_memory *phwi_context;
 	struct hwi_context_memory *phwi_context;
@@ -3887,9 +3732,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 	phwi_context = phwi_ctrlr->phwi_ctxt;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
 	phwi_context->max_eqd = 128;
 	phwi_context->max_eqd = 128;
 	phwi_context->min_eqd = 0;
 	phwi_context->min_eqd = 0;
-	phwi_context->cur_eqd = 0;
-	be_cmd_fw_initialize(&phba->ctrl);
-	/* set optic state to unknown */
+	phwi_context->cur_eqd = 32;
+	/* set port optic state to unknown */
 	phba->optic_state = 0xff;
 	phba->optic_state = 0xff;
 
 
 	status = beiscsi_create_eqs(phba, phwi_context);
 	status = beiscsi_create_eqs(phba, phwi_context);
@@ -3903,7 +3747,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 	if (status != 0)
 	if (status != 0)
 		goto error;
 		goto error;
 
 
-	status = mgmt_check_supported_fw(ctrl, phba);
+	status = beiscsi_check_supported_fw(ctrl, phba);
 	if (status != 0) {
 	if (status != 0) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 			    "BM_%d : Unsupported fw version\n");
 			    "BM_%d : Unsupported fw version\n");
@@ -3919,7 +3763,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 
 
 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
-
 			def_pdu_ring_sz =
 			def_pdu_ring_sz =
 				BEISCSI_GET_CID_COUNT(phba, ulp_num) *
 				BEISCSI_GET_CID_COUNT(phba, ulp_num) *
 				sizeof(struct phys_addr);
 				sizeof(struct phys_addr);
@@ -3945,6 +3788,15 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 					    ulp_num);
 					    ulp_num);
 				goto error;
 				goto error;
 			}
 			}
+			/**
+			 * Now that the default PDU rings have been created,
+			 * let EP know about it.
+			 * Call beiscsi_cmd_iscsi_cleanup before posting?
+			 */
+			beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
+						 ulp_num);
+			beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
+						 ulp_num);
 		}
 		}
 	}
 	}
 
 
@@ -3973,7 +3825,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 
 
 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
 			uint16_t cri = 0;
 			uint16_t cri = 0;
-			struct hwi_async_pdu_context *pasync_ctx;
+			struct hd_async_context *pasync_ctx;
 
 
 			pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
 			pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
 				     phwi_ctrlr, ulp_num);
 				     phwi_ctrlr, ulp_num);
@@ -3985,6 +3837,14 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 					phwi_ctrlr->wrb_context[cri].cid] =
 					phwi_ctrlr->wrb_context[cri].cid] =
 					async_arr_idx++;
 					async_arr_idx++;
 			}
 			}
+			/**
+			 * Now that the default PDU rings have been created,
+			 * let EP know about it.
+			 */
+			beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
+						 ulp_num);
+			beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
+						 ulp_num);
 		}
 		}
 	}
 	}
 
 
@@ -3995,7 +3855,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 error:
 error:
 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 		    "BM_%d : hwi_init_port failed");
 		    "BM_%d : hwi_init_port failed");
-	hwi_cleanup(phba);
+	hwi_cleanup_port(phba);
 	return status;
 	return status;
 }
 }
 
 
@@ -4354,149 +4214,6 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
 			    "BM_%d : In hwi_disable_intr, Already Disabled\n");
 			    "BM_%d : In hwi_disable_intr, Already Disabled\n");
 }
 }
 
 
-/**
- * beiscsi_get_boot_info()- Get the boot session info
- * @phba: The device priv structure instance
- *
- * Get the boot target info and store in driver priv structure
- *
- * return values
- *	Success: 0
- *	Failure: Non-Zero Value
- **/
-static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
-{
-	struct be_cmd_get_session_resp *session_resp;
-	struct be_dma_mem nonemb_cmd;
-	unsigned int tag;
-	unsigned int s_handle;
-	int ret = -ENOMEM;
-
-	/* Get the session handle of the boot target */
-	ret = be_mgmt_get_boot_shandle(phba, &s_handle);
-	if (ret) {
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-			    "BM_%d : No boot session\n");
-
-		if (ret == -ENXIO)
-			phba->get_boot = 0;
-
-
-		return ret;
-	}
-	phba->get_boot = 0;
-	nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
-					      sizeof(*session_resp),
-					      &nonemb_cmd.dma);
-	if (nonemb_cmd.va == NULL) {
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-			    "BM_%d : Failed to allocate memory for"
-			    "beiscsi_get_session_info\n");
-
-		return -ENOMEM;
-	}
-
-	tag = mgmt_get_session_info(phba, s_handle,
-				    &nonemb_cmd);
-	if (!tag) {
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-			    "BM_%d : beiscsi_get_session_info"
-			    " Failed\n");
-
-		goto boot_freemem;
-	}
-
-	ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
-	if (ret) {
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-			    "BM_%d : beiscsi_get_session_info Failed");
-
-		if (ret != -EBUSY)
-			goto boot_freemem;
-		else
-			return ret;
-	}
-
-	session_resp = nonemb_cmd.va ;
-
-	memcpy(&phba->boot_sess, &session_resp->session_info,
-	       sizeof(struct mgmt_session_info));
-
-	 beiscsi_logout_fw_sess(phba,
-				phba->boot_sess.session_handle);
-	ret = 0;
-
-boot_freemem:
-	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
-		    nonemb_cmd.va, nonemb_cmd.dma);
-	return ret;
-}
-
-static void beiscsi_boot_release(void *data)
-{
-	struct beiscsi_hba *phba = data;
-
-	scsi_host_put(phba->shost);
-}
-
-static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
-{
-	struct iscsi_boot_kobj *boot_kobj;
-
-	/* it has been created previously */
-	if (phba->boot_kset)
-		return 0;
-
-	/* get boot info using mgmt cmd */
-	if (beiscsi_get_boot_info(phba))
-		/* Try to see if we can carry on without this */
-		return 0;
-
-	phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
-	if (!phba->boot_kset)
-		return -ENOMEM;
-
-	/* get a ref because the show function will ref the phba */
-	if (!scsi_host_get(phba->shost))
-		goto free_kset;
-	boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
-					     beiscsi_show_boot_tgt_info,
-					     beiscsi_tgt_get_attr_visibility,
-					     beiscsi_boot_release);
-	if (!boot_kobj)
-		goto put_shost;
-
-	if (!scsi_host_get(phba->shost))
-		goto free_kset;
-	boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
-						beiscsi_show_boot_ini_info,
-						beiscsi_ini_get_attr_visibility,
-						beiscsi_boot_release);
-	if (!boot_kobj)
-		goto put_shost;
-
-	if (!scsi_host_get(phba->shost))
-		goto free_kset;
-	boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
-					       beiscsi_show_boot_eth_info,
-					       beiscsi_eth_get_attr_visibility,
-					       beiscsi_boot_release);
-	if (!boot_kobj)
-		goto put_shost;
-	return 0;
-
-put_shost:
-	scsi_host_put(phba->shost);
-free_kset:
-	iscsi_boot_destroy_kset(phba->boot_kset);
-	phba->boot_kset = NULL;
-	return -ENOMEM;
-}
-
 static int beiscsi_init_port(struct beiscsi_hba *phba)
 static int beiscsi_init_port(struct beiscsi_hba *phba)
 {
 {
 	int ret;
 	int ret;
@@ -4516,7 +4233,8 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
 		goto do_cleanup_ctrlr;
 		goto do_cleanup_ctrlr;
 	}
 	}
 
 
-	if (hba_setup_cid_tbls(phba)) {
+	ret = hba_setup_cid_tbls(phba);
+	if (ret < 0) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 			    "BM_%d : Failed in hba_setup_cid_tbls\n");
 			    "BM_%d : Failed in hba_setup_cid_tbls\n");
 		kfree(phba->io_sgl_hndl_base);
 		kfree(phba->io_sgl_hndl_base);
@@ -4527,61 +4245,15 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
 	return ret;
 	return ret;
 
 
 do_cleanup_ctrlr:
 do_cleanup_ctrlr:
-	hwi_cleanup(phba);
+	hwi_cleanup_port(phba);
 	return ret;
 	return ret;
 }
 }
 
 
-static void hwi_purge_eq(struct beiscsi_hba *phba)
-{
-	struct hwi_controller *phwi_ctrlr;
-	struct hwi_context_memory *phwi_context;
-	struct be_queue_info *eq;
-	struct be_eq_entry *eqe = NULL;
-	int i, eq_msix;
-	unsigned int num_processed;
-
-	phwi_ctrlr = phba->phwi_ctrlr;
-	phwi_context = phwi_ctrlr->phwi_ctxt;
-	if (phba->msix_enabled)
-		eq_msix = 1;
-	else
-		eq_msix = 0;
-
-	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
-		eq = &phwi_context->be_eq[i].q;
-		eqe = queue_tail_node(eq);
-		num_processed = 0;
-		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
-					& EQE_VALID_MASK) {
-			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
-			queue_tail_inc(eq);
-			eqe = queue_tail_node(eq);
-			num_processed++;
-		}
-
-		if (num_processed)
-			hwi_ring_eq_db(phba, eq->id, 1,	num_processed, 1, 1);
-	}
-}
-
-static void beiscsi_clean_port(struct beiscsi_hba *phba)
+static void beiscsi_cleanup_port(struct beiscsi_hba *phba)
 {
 {
-	int mgmt_status, ulp_num;
 	struct ulp_cid_info *ptr_cid_info = NULL;
 	struct ulp_cid_info *ptr_cid_info = NULL;
+	int ulp_num;
 
 
-	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
-		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
-			mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
-			if (mgmt_status)
-				beiscsi_log(phba, KERN_WARNING,
-					    BEISCSI_LOG_INIT,
-					    "BM_%d : mgmt_epfw_cleanup FAILED"
-					    " for ULP_%d\n", ulp_num);
-		}
-	}
-
-	hwi_purge_eq(phba);
-	hwi_cleanup(phba);
 	kfree(phba->io_sgl_hndl_base);
 	kfree(phba->io_sgl_hndl_base);
 	kfree(phba->eh_sgl_hndl_base);
 	kfree(phba->eh_sgl_hndl_base);
 	kfree(phba->ep_array);
 	kfree(phba->ep_array);
@@ -4598,7 +4270,6 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
 			}
 			}
 		}
 		}
 	}
 	}
-
 }
 }
 
 
 /**
 /**
@@ -4625,16 +4296,12 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
 	io_task = task->dd_data;
 	io_task = task->dd_data;
 
 
 	if (io_task->pwrb_handle) {
 	if (io_task->pwrb_handle) {
-		memset(io_task->pwrb_handle->pwrb, 0,
-		       sizeof(struct iscsi_wrb));
-		free_wrb_handle(phba, pwrb_context,
-				io_task->pwrb_handle);
+		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
 		io_task->pwrb_handle = NULL;
 		io_task->pwrb_handle = NULL;
 	}
 	}
 
 
 	if (io_task->psgl_handle) {
 	if (io_task->psgl_handle) {
-		free_mgmt_sgl_handle(phba,
-				     io_task->psgl_handle);
+		free_mgmt_sgl_handle(phba, io_task->psgl_handle);
 		io_task->psgl_handle = NULL;
 		io_task->psgl_handle = NULL;
 	}
 	}
 
 
@@ -4671,6 +4338,7 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
 		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
 		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
 			      io_task->bhs_pa.u.a64.address);
 			      io_task->bhs_pa.u.a64.address);
 		io_task->cmd_bhs = NULL;
 		io_task->cmd_bhs = NULL;
+		task->hdr = NULL;
 	}
 	}
 
 
 	if (task->sc) {
 	if (task->sc) {
@@ -4686,7 +4354,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
 		}
 		}
 
 
 		if (io_task->scsi_cmnd) {
 		if (io_task->scsi_cmnd) {
-			scsi_dma_unmap(io_task->scsi_cmnd);
+			if (io_task->num_sg)
+				scsi_dma_unmap(io_task->scsi_cmnd);
 			io_task->scsi_cmnd = NULL;
 			io_task->scsi_cmnd = NULL;
 		}
 		}
 	} else {
 	} else {
@@ -5051,7 +4720,6 @@ static int beiscsi_mtask(struct iscsi_task *task)
 
 
 	cid = beiscsi_conn->beiscsi_conn_cid;
 	cid = beiscsi_conn->beiscsi_conn_cid;
 	pwrb = io_task->pwrb_handle->pwrb;
 	pwrb = io_task->pwrb_handle->pwrb;
-	memset(pwrb, 0, sizeof(*pwrb));
 
 
 	if (is_chip_be2_be3r(phba)) {
 	if (is_chip_be2_be3r(phba)) {
 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
@@ -5165,6 +4833,15 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
 	int num_sg;
 	int num_sg;
 	unsigned int  writedir = 0, xferlen = 0;
 	unsigned int  writedir = 0, xferlen = 0;
 
 
+	phba = io_task->conn->phba;
+	/**
+	 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be
+	 * operational if FW still gets heartbeat from EP FW. Is management
+	 * path really needed to continue further?
+	 */
+	if (!beiscsi_hba_is_online(phba))
+		return -EIO;
+
 	if (!io_task->conn->login_in_progress)
 	if (!io_task->conn->login_in_progress)
 		task->hdr->exp_statsn = 0;
 		task->hdr->exp_statsn = 0;
 
 
@@ -5172,8 +4849,8 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
 		return beiscsi_mtask(task);
 		return beiscsi_mtask(task);
 
 
 	io_task->scsi_cmnd = sc;
 	io_task->scsi_cmnd = sc;
+	io_task->num_sg = 0;
 	num_sg = scsi_dma_map(sc);
 	num_sg = scsi_dma_map(sc);
-	phba = io_task->conn->phba;
 	if (num_sg < 0) {
 	if (num_sg < 0) {
 		beiscsi_log(phba, KERN_ERR,
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
 			    BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
@@ -5184,6 +4861,11 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
 
 
 		return num_sg;
 		return num_sg;
 	}
 	}
+	/**
+	 * For scsi cmd task, check num_sg before unmapping in cleanup_task.
+	 * For management task, cleanup_task checks mtask_addr before unmapping.
+	 */
+	io_task->num_sg = num_sg;
 	xferlen = scsi_bufflen(sc);
 	xferlen = scsi_bufflen(sc);
 	sg = scsi_sglist(sc);
 	sg = scsi_sglist(sc);
 	if (sc->sc_data_direction == DMA_TO_DEVICE)
 	if (sc->sc_data_direction == DMA_TO_DEVICE)
@@ -5213,6 +4895,12 @@ static int beiscsi_bsg_request(struct bsg_job *job)
 	shost = iscsi_job_to_shost(job);
 	shost = iscsi_job_to_shost(job);
 	phba = iscsi_host_priv(shost);
 	phba = iscsi_host_priv(shost);
 
 
+	if (!beiscsi_hba_is_online(phba)) {
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+			    "BM_%d : HBA in error 0x%lx\n", phba->state);
+		return -ENXIO;
+	}
+
 	switch (bsg_req->msgcode) {
 	switch (bsg_req->msgcode) {
 	case ISCSI_BSG_HST_VENDOR:
 	case ISCSI_BSG_HST_VENDOR:
 		nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
 		nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -5240,6 +4928,14 @@ static int beiscsi_bsg_request(struct bsg_job *job)
 					phba->ctrl.mcc_tag_status[tag],
 					phba->ctrl.mcc_tag_status[tag],
 					msecs_to_jiffies(
 					msecs_to_jiffies(
 					BEISCSI_HOST_MBX_TIMEOUT));
 					BEISCSI_HOST_MBX_TIMEOUT));
+
+		if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
+			clear_bit(MCC_TAG_STATE_RUNNING,
+				  &phba->ctrl.ptag_state[tag].tag_state);
+			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+					    nonemb_cmd.va, nonemb_cmd.dma);
+			return -EIO;
+		}
 		extd_status = (phba->ctrl.mcc_tag_status[tag] &
 		extd_status = (phba->ctrl.mcc_tag_status[tag] &
 			       CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT;
 			       CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT;
 		status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK;
 		status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK;
@@ -5283,106 +4979,294 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
 	beiscsi_log_enable_init(phba, beiscsi_log_enable);
 	beiscsi_log_enable_init(phba, beiscsi_log_enable);
 }
 }
 
 
-/*
- * beiscsi_quiesce()- Cleanup Driver resources
- * @phba: Instance Priv structure
- * @unload_state:i Clean or EEH unload state
- *
- * Free the OS and HW resources held by the driver
- **/
-static void beiscsi_quiesce(struct beiscsi_hba *phba,
-		uint32_t unload_state)
+void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
 {
 {
-	struct hwi_controller *phwi_ctrlr;
-	struct hwi_context_memory *phwi_context;
-	struct be_eq_obj *pbe_eq;
-	unsigned int i, msix_vec;
+	if (phba->boot_struct.boot_kset)
+		return;
 
 
-	phwi_ctrlr = phba->phwi_ctrlr;
-	phwi_context = phwi_ctrlr->phwi_ctxt;
-	hwi_disable_intr(phba);
-	if (phba->msix_enabled) {
-		for (i = 0; i <= phba->num_cpus; i++) {
-			msix_vec = phba->msix_entries[i].vector;
-			free_irq(msix_vec, &phwi_context->be_eq[i]);
-			kfree(phba->msi_name[i]);
-		}
-	} else
-		if (phba->pcidev->irq)
-			free_irq(phba->pcidev->irq, phba);
-	pci_disable_msix(phba->pcidev);
-	cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
+	/* skip if boot work is already in progress */
+	if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state))
+		return;
 
 
-	for (i = 0; i < phba->num_cpus; i++) {
-		pbe_eq = &phwi_context->be_eq[i];
-		irq_poll_disable(&pbe_eq->iopoll);
+	phba->boot_struct.retry = 3;
+	phba->boot_struct.tag = 0;
+	phba->boot_struct.s_handle = s_handle;
+	phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE;
+	schedule_work(&phba->boot_work);
+}
+
+static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
+{
+	struct beiscsi_hba *phba = data;
+	struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess;
+	struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
+	char *str = buf;
+	int rc = -EPERM;
+
+	switch (type) {
+	case ISCSI_BOOT_TGT_NAME:
+		rc = sprintf(buf, "%.*s\n",
+			    (int)strlen(boot_sess->target_name),
+			    (char *)&boot_sess->target_name);
+		break;
+	case ISCSI_BOOT_TGT_IP_ADDR:
+		if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4)
+			rc = sprintf(buf, "%pI4\n",
+				(char *)&boot_conn->dest_ipaddr.addr);
+		else
+			rc = sprintf(str, "%pI6\n",
+				(char *)&boot_conn->dest_ipaddr.addr);
+		break;
+	case ISCSI_BOOT_TGT_PORT:
+		rc = sprintf(str, "%d\n", boot_conn->dest_port);
+		break;
+
+	case ISCSI_BOOT_TGT_CHAP_NAME:
+		rc = sprintf(str,  "%.*s\n",
+			     boot_conn->negotiated_login_options.auth_data.chap.
+			     target_chap_name_length,
+			     (char *)&boot_conn->negotiated_login_options.
+			     auth_data.chap.target_chap_name);
+		break;
+	case ISCSI_BOOT_TGT_CHAP_SECRET:
+		rc = sprintf(str,  "%.*s\n",
+			     boot_conn->negotiated_login_options.auth_data.chap.
+			     target_secret_length,
+			     (char *)&boot_conn->negotiated_login_options.
+			     auth_data.chap.target_secret);
+		break;
+	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+		rc = sprintf(str,  "%.*s\n",
+			     boot_conn->negotiated_login_options.auth_data.chap.
+			     intr_chap_name_length,
+			     (char *)&boot_conn->negotiated_login_options.
+			     auth_data.chap.intr_chap_name);
+		break;
+	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+		rc = sprintf(str,  "%.*s\n",
+			     boot_conn->negotiated_login_options.auth_data.chap.
+			     intr_secret_length,
+			     (char *)&boot_conn->negotiated_login_options.
+			     auth_data.chap.intr_secret);
+		break;
+	case ISCSI_BOOT_TGT_FLAGS:
+		rc = sprintf(str, "2\n");
+		break;
+	case ISCSI_BOOT_TGT_NIC_ASSOC:
+		rc = sprintf(str, "0\n");
+		break;
 	}
 	}
+	return rc;
+}
 
 
-	if (unload_state == BEISCSI_CLEAN_UNLOAD) {
-		destroy_workqueue(phba->wq);
-		beiscsi_clean_port(phba);
-		beiscsi_free_mem(phba);
+static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
+{
+	struct beiscsi_hba *phba = data;
+	char *str = buf;
+	int rc = -EPERM;
 
 
-		beiscsi_unmap_pci_function(phba);
-		pci_free_consistent(phba->pcidev,
-				    phba->ctrl.mbox_mem_alloced.size,
-				    phba->ctrl.mbox_mem_alloced.va,
-				    phba->ctrl.mbox_mem_alloced.dma);
-	} else {
-		hwi_purge_eq(phba);
-		hwi_cleanup(phba);
+	switch (type) {
+	case ISCSI_BOOT_INI_INITIATOR_NAME:
+		rc = sprintf(str, "%s\n",
+			     phba->boot_struct.boot_sess.initiator_iscsiname);
+		break;
 	}
 	}
+	return rc;
+}
+
+static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
+{
+	struct beiscsi_hba *phba = data;
+	char *str = buf;
+	int rc = -EPERM;
 
 
+	switch (type) {
+	case ISCSI_BOOT_ETH_FLAGS:
+		rc = sprintf(str, "2\n");
+		break;
+	case ISCSI_BOOT_ETH_INDEX:
+		rc = sprintf(str, "0\n");
+		break;
+	case ISCSI_BOOT_ETH_MAC:
+		rc  = beiscsi_get_macaddr(str, phba);
+		break;
+	}
+	return rc;
 }
 }
 
 
-static void beiscsi_remove(struct pci_dev *pcidev)
+static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
 {
 {
-	struct beiscsi_hba *phba = NULL;
+	umode_t rc = 0;
 
 
-	phba = pci_get_drvdata(pcidev);
-	if (!phba) {
-		dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
-		return;
+	switch (type) {
+	case ISCSI_BOOT_TGT_NAME:
+	case ISCSI_BOOT_TGT_IP_ADDR:
+	case ISCSI_BOOT_TGT_PORT:
+	case ISCSI_BOOT_TGT_CHAP_NAME:
+	case ISCSI_BOOT_TGT_CHAP_SECRET:
+	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
+	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
+	case ISCSI_BOOT_TGT_NIC_ASSOC:
+	case ISCSI_BOOT_TGT_FLAGS:
+		rc = S_IRUGO;
+		break;
 	}
 	}
+	return rc;
+}
 
 
-	beiscsi_destroy_def_ifaces(phba);
-	iscsi_boot_destroy_kset(phba->boot_kset);
-	iscsi_host_remove(phba->shost);
-	beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);
-	pci_dev_put(phba->pcidev);
-	iscsi_host_free(phba->shost);
-	pci_disable_pcie_error_reporting(pcidev);
-	pci_set_drvdata(pcidev, NULL);
-	pci_release_regions(pcidev);
-	pci_disable_device(pcidev);
+static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
+{
+	umode_t rc = 0;
+
+	switch (type) {
+	case ISCSI_BOOT_INI_INITIATOR_NAME:
+		rc = S_IRUGO;
+		break;
+	}
+	return rc;
 }
 }
 
 
-static void beiscsi_msix_enable(struct beiscsi_hba *phba)
+static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
 {
 {
-	int i, status;
+	umode_t rc = 0;
 
 
-	for (i = 0; i <= phba->num_cpus; i++)
-		phba->msix_entries[i].entry = i;
+	switch (type) {
+	case ISCSI_BOOT_ETH_FLAGS:
+	case ISCSI_BOOT_ETH_MAC:
+	case ISCSI_BOOT_ETH_INDEX:
+		rc = S_IRUGO;
+		break;
+	}
+	return rc;
+}
 
 
-	status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
-				       phba->num_cpus + 1, phba->num_cpus + 1);
-	if (status > 0)
-		phba->msix_enabled = true;
+static void beiscsi_boot_kobj_release(void *data)
+{
+	struct beiscsi_hba *phba = data;
+
+	scsi_host_put(phba->shost);
+}
+
+static int beiscsi_boot_create_kset(struct beiscsi_hba *phba)
+{
+	struct boot_struct *bs = &phba->boot_struct;
+	struct iscsi_boot_kobj *boot_kobj;
+
+	if (bs->boot_kset) {
+		__beiscsi_log(phba, KERN_ERR,
+			      "BM_%d: boot_kset already created\n");
+		return 0;
+	}
+
+	bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
+	if (!bs->boot_kset) {
+		__beiscsi_log(phba, KERN_ERR,
+			      "BM_%d: boot_kset alloc failed\n");
+		return -ENOMEM;
+	}
+
+	/* get shost ref because the show function will refer phba */
+	if (!scsi_host_get(phba->shost))
+		goto free_kset;
+
+	boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba,
+					     beiscsi_show_boot_tgt_info,
+					     beiscsi_tgt_get_attr_visibility,
+					     beiscsi_boot_kobj_release);
+	if (!boot_kobj)
+		goto put_shost;
+
+	if (!scsi_host_get(phba->shost))
+		goto free_kset;
+
+	boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba,
+						beiscsi_show_boot_ini_info,
+						beiscsi_ini_get_attr_visibility,
+						beiscsi_boot_kobj_release);
+	if (!boot_kobj)
+		goto put_shost;
+
+	if (!scsi_host_get(phba->shost))
+		goto free_kset;
+
+	boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba,
+					       beiscsi_show_boot_eth_info,
+					       beiscsi_eth_get_attr_visibility,
+					       beiscsi_boot_kobj_release);
+	if (!boot_kobj)
+		goto put_shost;
+
+	return 0;
+
+put_shost:
+	scsi_host_put(phba->shost);
+free_kset:
+	iscsi_boot_destroy_kset(bs->boot_kset);
+	bs->boot_kset = NULL;
+	return -ENOMEM;
+}
+
+static void beiscsi_boot_work(struct work_struct *work)
+{
+	struct beiscsi_hba *phba =
+		container_of(work, struct beiscsi_hba, boot_work);
+	struct boot_struct *bs = &phba->boot_struct;
+	unsigned int tag = 0;
 
 
-	return;
+	if (!beiscsi_hba_is_online(phba))
+		return;
+
+	beiscsi_log(phba, KERN_INFO,
+		    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+		    "BM_%d : %s action %d\n",
+		    __func__, phba->boot_struct.action);
+
+	switch (phba->boot_struct.action) {
+	case BEISCSI_BOOT_REOPEN_SESS:
+		tag = beiscsi_boot_reopen_sess(phba);
+		break;
+	case BEISCSI_BOOT_GET_SHANDLE:
+		tag = __beiscsi_boot_get_shandle(phba, 1);
+		break;
+	case BEISCSI_BOOT_GET_SINFO:
+		tag = beiscsi_boot_get_sinfo(phba);
+		break;
+	case BEISCSI_BOOT_LOGOUT_SESS:
+		tag = beiscsi_boot_logout_sess(phba);
+		break;
+	case BEISCSI_BOOT_CREATE_KSET:
+		beiscsi_boot_create_kset(phba);
+		/**
+		 * updated boot_kset is made visible to all before
+		 * ending the boot work.
+		 */
+		mb();
+		clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
+		return;
+	}
+	if (!tag) {
+		if (bs->retry--)
+			schedule_work(&phba->boot_work);
+		else
+			clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
+	}
 }
 }
 
 
-static void be_eqd_update(struct beiscsi_hba *phba)
+static void beiscsi_eqd_update_work(struct work_struct *work)
 {
 {
+	struct hwi_context_memory *phwi_context;
 	struct be_set_eqd set_eqd[MAX_CPUS];
 	struct be_set_eqd set_eqd[MAX_CPUS];
-	struct be_aic_obj *aic;
-	struct be_eq_obj *pbe_eq;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_controller *phwi_ctrlr;
-	struct hwi_context_memory *phwi_context;
+	struct be_eq_obj *pbe_eq;
+	struct beiscsi_hba *phba;
+	unsigned int pps, delta;
+	struct be_aic_obj *aic;
 	int eqd, i, num = 0;
 	int eqd, i, num = 0;
-	ulong now;
-	u32 pps, delta;
-	unsigned int tag;
+	unsigned long now;
+
+	phba = container_of(work, struct beiscsi_hba, eqd_update.work);
+	if (!beiscsi_hba_is_online(phba))
+		return;
 
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -5391,13 +5275,13 @@ static void be_eqd_update(struct beiscsi_hba *phba)
 		aic = &phba->aic_obj[i];
 		aic = &phba->aic_obj[i];
 		pbe_eq = &phwi_context->be_eq[i];
 		pbe_eq = &phwi_context->be_eq[i];
 		now = jiffies;
 		now = jiffies;
-		if (!aic->jiffs || time_before(now, aic->jiffs) ||
+		if (!aic->jiffies || time_before(now, aic->jiffies) ||
 		    pbe_eq->cq_count < aic->eq_prev) {
 		    pbe_eq->cq_count < aic->eq_prev) {
-			aic->jiffs = now;
+			aic->jiffies = now;
 			aic->eq_prev = pbe_eq->cq_count;
 			aic->eq_prev = pbe_eq->cq_count;
 			continue;
 			continue;
 		}
 		}
-		delta = jiffies_to_msecs(now - aic->jiffs);
+		delta = jiffies_to_msecs(now - aic->jiffies);
 		pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
 		pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
 		eqd = (pps / 1500) << 2;
 		eqd = (pps / 1500) << 2;
 
 
@@ -5406,7 +5290,7 @@ static void be_eqd_update(struct beiscsi_hba *phba)
 		eqd = min_t(u32, eqd, phwi_context->max_eqd);
 		eqd = min_t(u32, eqd, phwi_context->max_eqd);
 		eqd = max_t(u32, eqd, phwi_context->min_eqd);
 		eqd = max_t(u32, eqd, phwi_context->min_eqd);
 
 
-		aic->jiffs = now;
+		aic->jiffies = now;
 		aic->eq_prev = pbe_eq->cq_count;
 		aic->eq_prev = pbe_eq->cq_count;
 
 
 		if (eqd != aic->prev_eqd) {
 		if (eqd != aic->prev_eqd) {
@@ -5416,53 +5300,242 @@ static void be_eqd_update(struct beiscsi_hba *phba)
 			num++;
 			num++;
 		}
 		}
 	}
 	}
-	if (num) {
-		tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
-		if (tag)
-			beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
+	if (num)
+		/* completion of this is ignored */
+		beiscsi_modify_eq_delay(phba, set_eqd, num);
+
+	schedule_delayed_work(&phba->eqd_update,
+			      msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
+}
+
+static void beiscsi_msix_enable(struct beiscsi_hba *phba)
+{
+	int i, status;
+
+	for (i = 0; i <= phba->num_cpus; i++)
+		phba->msix_entries[i].entry = i;
+
+	status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
+				       phba->num_cpus + 1, phba->num_cpus + 1);
+	if (status > 0)
+		phba->msix_enabled = true;
+}
+
+static void beiscsi_hw_tpe_check(unsigned long ptr)
+{
+	struct beiscsi_hba *phba;
+	u32 wait;
+
+	phba = (struct beiscsi_hba *)ptr;
+	/* if not TPE, do nothing */
+	if (!beiscsi_detect_tpe(phba))
+		return;
+
+	/* wait default 4000ms before recovering */
+	wait = 4000;
+	if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL)
+		wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL;
+	queue_delayed_work(phba->wq, &phba->recover_port,
+			   msecs_to_jiffies(wait));
+}
+
+static void beiscsi_hw_health_check(unsigned long ptr)
+{
+	struct beiscsi_hba *phba;
+
+	phba = (struct beiscsi_hba *)ptr;
+	beiscsi_detect_ue(phba);
+	if (beiscsi_detect_ue(phba)) {
+		__beiscsi_log(phba, KERN_ERR,
+			      "BM_%d : port in error: %lx\n", phba->state);
+		/* sessions are no longer valid, so first fail the sessions */
+		queue_work(phba->wq, &phba->sess_work);
+
+		/* detect UER supported */
+		if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
+			return;
+		/* modify this timer to check TPE */
+		phba->hw_check.function = beiscsi_hw_tpe_check;
 	}
 	}
+
+	mod_timer(&phba->hw_check,
+		  jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
 }
 }
 
 
-static void be_check_boot_session(struct beiscsi_hba *phba)
+/*
+ * beiscsi_enable_port()- Enables the disabled port.
+ * Only port resources freed in disable function are reallocated.
+ * This is called in HBA error handling path.
+ *
+ * @phba: Instance of driver private structure
+ *
+ **/
+static int beiscsi_enable_port(struct beiscsi_hba *phba)
 {
 {
-	if (beiscsi_setup_boot_info(phba))
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : Could not set up "
-			    "iSCSI boot info on async event.\n");
+	struct hwi_context_memory *phwi_context;
+	struct hwi_controller *phwi_ctrlr;
+	struct be_eq_obj *pbe_eq;
+	int ret, i;
+
+	if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
+		__beiscsi_log(phba, KERN_ERR,
+			      "BM_%d : %s : port is online %lx\n",
+			      __func__, phba->state);
+		return 0;
+	}
+
+	ret = beiscsi_init_sliport(phba);
+	if (ret)
+		return ret;
+
+	if (enable_msix)
+		find_num_cpus(phba);
+	else
+		phba->num_cpus = 1;
+	if (enable_msix) {
+		beiscsi_msix_enable(phba);
+		if (!phba->msix_enabled)
+			phba->num_cpus = 1;
+	}
+
+	beiscsi_get_params(phba);
+	/* Re-enable UER. If different TPE occurs then it is recoverable. */
+	beiscsi_set_uer_feature(phba);
+
+	phba->shost->max_id = phba->params.cxns_per_ctrl;
+	phba->shost->can_queue = phba->params.ios_per_ctrl;
+	ret = hwi_init_controller(phba);
+	if (ret) {
+		__beiscsi_log(phba, KERN_ERR,
+			      "BM_%d : init controller failed %d\n", ret);
+		goto disable_msix;
+	}
+
+	for (i = 0; i < MAX_MCC_CMD; i++) {
+		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
+		phba->ctrl.mcc_tag[i] = i + 1;
+		phba->ctrl.mcc_tag_status[i + 1] = 0;
+		phba->ctrl.mcc_tag_available++;
+	}
+
+	phwi_ctrlr = phba->phwi_ctrlr;
+	phwi_context = phwi_ctrlr->phwi_ctxt;
+	for (i = 0; i < phba->num_cpus; i++) {
+		pbe_eq = &phwi_context->be_eq[i];
+		irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
+	}
+
+	i = (phba->msix_enabled) ? i : 0;
+	/* Work item for MCC handling */
+	pbe_eq = &phwi_context->be_eq[i];
+	INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
+
+	ret = beiscsi_init_irqs(phba);
+	if (ret < 0) {
+		__beiscsi_log(phba, KERN_ERR,
+			      "BM_%d : setup IRQs failed %d\n", ret);
+		goto cleanup_port;
+	}
+	hwi_enable_intr(phba);
+	/* port operational: clear all error bits */
+	set_bit(BEISCSI_HBA_ONLINE, &phba->state);
+	__beiscsi_log(phba, KERN_INFO,
+		      "BM_%d : port online: 0x%lx\n", phba->state);
+
+	/* start hw_check timer and eqd_update work */
+	schedule_delayed_work(&phba->eqd_update,
+			      msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
+
+	/**
+	 * Timer function gets modified for TPE detection.
+	 * Always reinit to do health check first.
+	 */
+	phba->hw_check.function = beiscsi_hw_health_check;
+	mod_timer(&phba->hw_check,
+		  jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
+	return 0;
+
+cleanup_port:
+	for (i = 0; i < phba->num_cpus; i++) {
+		pbe_eq = &phwi_context->be_eq[i];
+		irq_poll_disable(&pbe_eq->iopoll);
+	}
+	hwi_cleanup_port(phba);
+
+disable_msix:
+	if (phba->msix_enabled)
+		pci_disable_msix(phba->pcidev);
+
+	return ret;
 }
 }
 
 
 /*
 /*
- * beiscsi_hw_health_check()- Check adapter health
- * @work: work item to check HW health
+ * beiscsi_disable_port()- Disable port and cleanup driver resources.
+ * This is called in HBA error handling and driver removal.
+ * @phba: Instance Priv structure
+ * @unload: indicate driver is unloading
  *
  *
- * Check if adapter in an unrecoverable state or not.
+ * Free the OS and HW resources held by the driver
  **/
  **/
-static void
-beiscsi_hw_health_check(struct work_struct *work)
+static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
 {
 {
-	struct beiscsi_hba *phba =
-		container_of(work, struct beiscsi_hba,
-			     beiscsi_hw_check_task.work);
+	struct hwi_context_memory *phwi_context;
+	struct hwi_controller *phwi_ctrlr;
+	struct be_eq_obj *pbe_eq;
+	unsigned int i, msix_vec;
 
 
-	be_eqd_update(phba);
+	if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state))
+		return;
 
 
-	if (phba->state & BE_ADAPTER_CHECK_BOOT) {
-		if ((phba->get_boot > 0) && (!phba->boot_kset)) {
-			phba->get_boot--;
-			if (!(phba->get_boot % BE_GET_BOOT_TO))
-				be_check_boot_session(phba);
-		} else {
-			phba->state &= ~BE_ADAPTER_CHECK_BOOT;
-			phba->get_boot = 0;
+	phwi_ctrlr = phba->phwi_ctrlr;
+	phwi_context = phwi_ctrlr->phwi_ctxt;
+	hwi_disable_intr(phba);
+	if (phba->msix_enabled) {
+		for (i = 0; i <= phba->num_cpus; i++) {
+			msix_vec = phba->msix_entries[i].vector;
+			free_irq(msix_vec, &phwi_context->be_eq[i]);
+			kfree(phba->msi_name[i]);
 		}
 		}
+	} else
+		if (phba->pcidev->irq)
+			free_irq(phba->pcidev->irq, phba);
+	pci_disable_msix(phba->pcidev);
+
+	for (i = 0; i < phba->num_cpus; i++) {
+		pbe_eq = &phwi_context->be_eq[i];
+		irq_poll_disable(&pbe_eq->iopoll);
+	}
+	cancel_delayed_work_sync(&phba->eqd_update);
+	cancel_work_sync(&phba->boot_work);
+	/* WQ might be running cancel queued mcc_work if we are not exiting */
+	if (!unload && beiscsi_hba_in_error(phba)) {
+		pbe_eq = &phwi_context->be_eq[i];
+		cancel_work_sync(&pbe_eq->mcc_work);
 	}
 	}
+	hwi_cleanup_port(phba);
+}
 
 
-	beiscsi_ue_detect(phba);
+static void beiscsi_sess_work(struct work_struct *work)
+{
+	struct beiscsi_hba *phba;
 
 
-	schedule_delayed_work(&phba->beiscsi_hw_check_task,
-			      msecs_to_jiffies(1000));
+	phba = container_of(work, struct beiscsi_hba, sess_work);
+	/*
+	 * This work gets scheduled only in case of HBA error.
+	 * Old sessions are gone so need to be re-established.
+	 * iscsi_session_failure needs process context hence this work.
+	 */
+	iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
 }
 }
 
 
+static void beiscsi_recover_port(struct work_struct *work)
+{
+	struct beiscsi_hba *phba;
+
+	phba = container_of(work, struct beiscsi_hba, recover_port.work);
+	beiscsi_disable_port(phba, 0);
+	beiscsi_enable_port(phba);
+}
 
 
 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
 		pci_channel_state_t state)
 		pci_channel_state_t state)
@@ -5470,12 +5543,18 @@ static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
 	struct beiscsi_hba *phba = NULL;
 	struct beiscsi_hba *phba = NULL;
 
 
 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
-	phba->state |= BE_ADAPTER_PCI_ERR;
+	set_bit(BEISCSI_HBA_PCI_ERR, &phba->state);
 
 
 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 		    "BM_%d : EEH error detected\n");
 		    "BM_%d : EEH error detected\n");
 
 
-	beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD);
+	/* first stop UE detection when PCI error detected */
+	del_timer_sync(&phba->hw_check);
+	cancel_delayed_work_sync(&phba->recover_port);
+
+	/* sessions are no longer valid, so first fail the sessions */
+	iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
+	beiscsi_disable_port(phba, 0);
 
 
 	if (state == pci_channel_io_perm_failure) {
 	if (state == pci_channel_io_perm_failure) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5515,9 +5594,8 @@ static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
 	pci_set_power_state(pdev, PCI_D0);
 	pci_set_power_state(pdev, PCI_D0);
 	pci_restore_state(pdev);
 	pci_restore_state(pdev);
 
 
-	/* Wait for the CHIP Reset to complete */
-	status = be_chk_reset_complete(phba);
-	if (!status) {
+	status = beiscsi_check_fw_rdy(phba);
+	if (status) {
 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
 			    "BM_%d : EEH Reset Completed\n");
 			    "BM_%d : EEH Reset Completed\n");
 	} else {
 	} else {
@@ -5532,87 +5610,16 @@ static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
 
 
 static void beiscsi_eeh_resume(struct pci_dev *pdev)
 static void beiscsi_eeh_resume(struct pci_dev *pdev)
 {
 {
-	int ret = 0, i;
-	struct be_eq_obj *pbe_eq;
-	struct beiscsi_hba *phba = NULL;
-	struct hwi_controller *phwi_ctrlr;
-	struct hwi_context_memory *phwi_context;
+	struct beiscsi_hba *phba;
+	int ret;
 
 
 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
 	pci_save_state(pdev);
 	pci_save_state(pdev);
 
 
-	if (enable_msix)
-		find_num_cpus(phba);
-	else
-		phba->num_cpus = 1;
-
-	if (enable_msix) {
-		beiscsi_msix_enable(phba);
-		if (!phba->msix_enabled)
-			phba->num_cpus = 1;
-	}
-
-	ret = beiscsi_cmd_reset_function(phba);
-	if (ret) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : Reset Failed\n");
-		goto ret_err;
-	}
-
-	ret = be_chk_reset_complete(phba);
-	if (ret) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : Failed to get out of reset.\n");
-		goto ret_err;
-	}
-
-	beiscsi_get_params(phba);
-	phba->shost->max_id = phba->params.cxns_per_ctrl;
-	phba->shost->can_queue = phba->params.ios_per_ctrl;
-	ret = hwi_init_controller(phba);
-	if (ret) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : beiscsi_eeh_resume -"
-			     "Failed to initialize beiscsi_hba.\n");
-		goto ret_err;
-	}
-
-	for (i = 0; i < MAX_MCC_CMD; i++) {
-		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
-		phba->ctrl.mcc_tag[i] = i + 1;
-		phba->ctrl.mcc_tag_status[i + 1] = 0;
-		phba->ctrl.mcc_tag_available++;
-	}
-
-	phwi_ctrlr = phba->phwi_ctrlr;
-	phwi_context = phwi_ctrlr->phwi_ctxt;
-
-	for (i = 0; i < phba->num_cpus; i++) {
-		pbe_eq = &phwi_context->be_eq[i];
-		irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
-				be_iopoll);
-	}
-
-	i = (phba->msix_enabled) ? i : 0;
-	/* Work item for MCC handling */
-	pbe_eq = &phwi_context->be_eq[i];
-	INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
-
-	ret = beiscsi_init_irqs(phba);
-	if (ret < 0) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : beiscsi_eeh_resume - "
-			    "Failed to beiscsi_init_irqs\n");
-		goto ret_err;
-	}
-
-	hwi_enable_intr(phba);
-	phba->state &= ~BE_ADAPTER_PCI_ERR;
-
-	return;
-ret_err:
-	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-		    "BM_%d : AER EEH Resume Failed\n");
+	ret = beiscsi_enable_port(phba);
+	if (ret)
+		__beiscsi_log(phba, KERN_ERR,
+			      "BM_%d : AER EEH resume failed\n");
 }
 }
 
 
 static int beiscsi_dev_probe(struct pci_dev *pcidev,
 static int beiscsi_dev_probe(struct pci_dev *pcidev,
@@ -5622,7 +5629,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_controller *phwi_ctrlr;
 	struct hwi_context_memory *phwi_context;
 	struct hwi_context_memory *phwi_context;
 	struct be_eq_obj *pbe_eq;
 	struct be_eq_obj *pbe_eq;
-	int ret = 0, i;
+	unsigned int s_handle;
+	int ret, i;
 
 
 	ret = beiscsi_enable_pci(pcidev);
 	ret = beiscsi_enable_pci(pcidev);
 	if (ret < 0) {
 	if (ret < 0) {
@@ -5635,6 +5643,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
 	if (!phba) {
 	if (!phba) {
 		dev_err(&pcidev->dev,
 		dev_err(&pcidev->dev,
 			"beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
 			"beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
+		ret = -ENOMEM;
 		goto disable_pci;
 		goto disable_pci;
 	}
 	}
 
 
@@ -5650,10 +5659,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
 	/* Initialize Driver configuration Paramters */
 	/* Initialize Driver configuration Paramters */
 	beiscsi_hba_attrs_init(phba);
 	beiscsi_hba_attrs_init(phba);
 
 
-	phba->fw_timeout = false;
 	phba->mac_addr_set = false;
 	phba->mac_addr_set = false;
 
 
-
 	switch (pcidev->device) {
 	switch (pcidev->device) {
 	case BE_DEVICE_ID1:
 	case BE_DEVICE_ID1:
 	case OC_DEVICE_ID1:
 	case OC_DEVICE_ID1:
@@ -5677,39 +5684,26 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
 	ret = be_ctrl_init(phba, pcidev);
 	ret = be_ctrl_init(phba, pcidev);
 	if (ret) {
 	if (ret) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : beiscsi_dev_probe-"
-			    "Failed in be_ctrl_init\n");
+			    "BM_%d : be_ctrl_init failed\n");
 		goto hba_free;
 		goto hba_free;
 	}
 	}
 
 
-	/*
-	 * FUNCTION_RESET should clean up any stale info in FW for this fn
-	 */
-	ret = beiscsi_cmd_reset_function(phba);
-	if (ret) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : Reset Failed\n");
-		goto hba_free;
-	}
-	ret = be_chk_reset_complete(phba);
-	if (ret) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : Failed to get out of reset.\n");
+	ret = beiscsi_init_sliport(phba);
+	if (ret)
 		goto hba_free;
 		goto hba_free;
-	}
 
 
 	spin_lock_init(&phba->io_sgl_lock);
 	spin_lock_init(&phba->io_sgl_lock);
 	spin_lock_init(&phba->mgmt_sgl_lock);
 	spin_lock_init(&phba->mgmt_sgl_lock);
-	spin_lock_init(&phba->isr_lock);
 	spin_lock_init(&phba->async_pdu_lock);
 	spin_lock_init(&phba->async_pdu_lock);
-	ret = mgmt_get_fw_config(&phba->ctrl, phba);
+	ret = beiscsi_get_fw_config(&phba->ctrl, phba);
 	if (ret != 0) {
 	if (ret != 0) {
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 			    "BM_%d : Error getting fw config\n");
 			    "BM_%d : Error getting fw config\n");
 		goto free_port;
 		goto free_port;
 	}
 	}
-	mgmt_get_port_name(&phba->ctrl, phba);
+	beiscsi_get_port_name(&phba->ctrl, phba);
 	beiscsi_get_params(phba);
 	beiscsi_get_params(phba);
+	beiscsi_set_uer_feature(phba);
 
 
 	if (enable_msix)
 	if (enable_msix)
 		find_num_cpus(phba);
 		find_num_cpus(phba);
@@ -5754,25 +5748,24 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
 			    "BM_%d : beiscsi_dev_probe-"
 			    "BM_%d : beiscsi_dev_probe-"
 			    "Failed to allocate work queue\n");
 			    "Failed to allocate work queue\n");
+		ret = -ENOMEM;
 		goto free_twq;
 		goto free_twq;
 	}
 	}
 
 
-	INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task,
-			  beiscsi_hw_health_check);
+	INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work);
 
 
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_ctrlr = phba->phwi_ctrlr;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
 	phwi_context = phwi_ctrlr->phwi_ctxt;
 
 
 	for (i = 0; i < phba->num_cpus; i++) {
 	for (i = 0; i < phba->num_cpus; i++) {
 		pbe_eq = &phwi_context->be_eq[i];
 		pbe_eq = &phwi_context->be_eq[i];
-		irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
-				be_iopoll);
+		irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
 	}
 	}
 
 
 	i = (phba->msix_enabled) ? i : 0;
 	i = (phba->msix_enabled) ? i : 0;
 	/* Work item for MCC handling */
 	/* Work item for MCC handling */
 	pbe_eq = &phwi_context->be_eq[i];
 	pbe_eq = &phwi_context->be_eq[i];
-	INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+	INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
 
 
 	ret = beiscsi_init_irqs(phba);
 	ret = beiscsi_init_irqs(phba);
 	if (ret < 0) {
 	if (ret < 0) {
@@ -5783,22 +5776,42 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
 	}
 	}
 	hwi_enable_intr(phba);
 	hwi_enable_intr(phba);
 
 
-	if (iscsi_host_add(phba->shost, &phba->pcidev->dev))
+	ret = iscsi_host_add(phba->shost, &phba->pcidev->dev);
+	if (ret)
 		goto free_blkenbld;
 		goto free_blkenbld;
 
 
-	if (beiscsi_setup_boot_info(phba))
-		/*
-		 * log error but continue, because we may not be using
-		 * iscsi boot.
+	/* set online bit after port is operational */
+	set_bit(BEISCSI_HBA_ONLINE, &phba->state);
+	__beiscsi_log(phba, KERN_INFO,
+		      "BM_%d : port online: 0x%lx\n", phba->state);
+
+	INIT_WORK(&phba->boot_work, beiscsi_boot_work);
+	ret = beiscsi_boot_get_shandle(phba, &s_handle);
+	if (ret > 0) {
+		beiscsi_start_boot_work(phba, s_handle);
+		/**
+		 * Set this bit after starting the work to let
+		 * probe handle it first.
+		 * ASYNC event can too schedule this work.
 		 */
 		 */
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BM_%d : Could not set up "
-			    "iSCSI boot info.\n");
+		set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state);
+	}
 
 
-	beiscsi_create_def_ifaces(phba);
-	schedule_delayed_work(&phba->beiscsi_hw_check_task,
-			      msecs_to_jiffies(1000));
+	beiscsi_iface_create_default(phba);
+	schedule_delayed_work(&phba->eqd_update,
+			      msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
 
 
+	INIT_WORK(&phba->sess_work, beiscsi_sess_work);
+	INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port);
+	/**
+	 * Start UE detection here. UE before this will cause stall in probe
+	 * and eventually fail the probe.
+	 */
+	init_timer(&phba->hw_check);
+	phba->hw_check.function = beiscsi_hw_health_check;
+	phba->hw_check.data = (unsigned long)phba;
+	mod_timer(&phba->hw_check,
+		  jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
 		    "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
 		    "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
 	return 0;
 	return 0;
@@ -5810,7 +5823,8 @@ free_blkenbld:
 		irq_poll_disable(&pbe_eq->iopoll);
 		irq_poll_disable(&pbe_eq->iopoll);
 	}
 	}
 free_twq:
 free_twq:
-	beiscsi_clean_port(phba);
+	hwi_cleanup_port(phba);
+	beiscsi_cleanup_port(phba);
 	beiscsi_free_mem(phba);
 	beiscsi_free_mem(phba);
 free_port:
 free_port:
 	pci_free_consistent(phba->pcidev,
 	pci_free_consistent(phba->pcidev,
@@ -5830,6 +5844,49 @@ disable_pci:
 	return ret;
 	return ret;
 }
 }
 
 
+static void beiscsi_remove(struct pci_dev *pcidev)
+{
+	struct beiscsi_hba *phba = NULL;
+
+	phba = pci_get_drvdata(pcidev);
+	if (!phba) {
+		dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
+		return;
+	}
+
+	/* first stop UE detection before unloading */
+	del_timer_sync(&phba->hw_check);
+	cancel_delayed_work_sync(&phba->recover_port);
+	cancel_work_sync(&phba->sess_work);
+
+	beiscsi_iface_destroy_default(phba);
+	iscsi_host_remove(phba->shost);
+	beiscsi_disable_port(phba, 1);
+
+	/* after cancelling boot_work */
+	iscsi_boot_destroy_kset(phba->boot_struct.boot_kset);
+
+	/* free all resources */
+	destroy_workqueue(phba->wq);
+	beiscsi_cleanup_port(phba);
+	beiscsi_free_mem(phba);
+
+	/* ctrl uninit */
+	beiscsi_unmap_pci_function(phba);
+	pci_free_consistent(phba->pcidev,
+			    phba->ctrl.mbox_mem_alloced.size,
+			    phba->ctrl.mbox_mem_alloced.va,
+			    phba->ctrl.mbox_mem_alloced.dma);
+
+	pci_dev_put(phba->pcidev);
+	iscsi_host_free(phba->shost);
+	pci_disable_pcie_error_reporting(pcidev);
+	pci_set_drvdata(pcidev, NULL);
+	pci_release_regions(pcidev);
+	pci_disable_device(pcidev);
+}
+
+
 static struct pci_error_handlers beiscsi_eeh_handlers = {
 static struct pci_error_handlers beiscsi_eeh_handlers = {
 	.error_detected = beiscsi_eeh_err_detected,
 	.error_detected = beiscsi_eeh_err_detected,
 	.slot_reset = beiscsi_eeh_reset,
 	.slot_reset = beiscsi_eeh_reset,
@@ -5846,9 +5903,9 @@ struct iscsi_transport beiscsi_iscsi_transport = {
 	.create_conn = beiscsi_conn_create,
 	.create_conn = beiscsi_conn_create,
 	.bind_conn = beiscsi_conn_bind,
 	.bind_conn = beiscsi_conn_bind,
 	.destroy_conn = iscsi_conn_teardown,
 	.destroy_conn = iscsi_conn_teardown,
-	.attr_is_visible = be2iscsi_attr_is_visible,
-	.set_iface_param = be2iscsi_iface_set_param,
-	.get_iface_param = be2iscsi_iface_get_param,
+	.attr_is_visible = beiscsi_attr_is_visible,
+	.set_iface_param = beiscsi_iface_set_param,
+	.get_iface_param = beiscsi_iface_get_param,
 	.set_param = beiscsi_set_param,
 	.set_param = beiscsi_set_param,
 	.get_conn_param = iscsi_conn_get_param,
 	.get_conn_param = iscsi_conn_get_param,
 	.get_session_param = iscsi_session_get_param,
 	.get_session_param = iscsi_session_get_param,
@@ -5877,7 +5934,6 @@ static struct pci_driver beiscsi_pci_driver = {
 	.err_handler = &beiscsi_eeh_handlers
 	.err_handler = &beiscsi_eeh_handlers
 };
 };
 
 
-
 static int __init beiscsi_module_init(void)
 static int __init beiscsi_module_init(void)
 {
 {
 	int ret;
 	int ret;

+ 119 - 101
drivers/scsi/be2iscsi/be_main.h

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
  * as published by the Free Software Foundation.  The full GNU General
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  * Public License is included in this distribution in the file called COPYING.
  *
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
  *
  *
  * Contact Information:
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  *
  * Emulex
  * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
@@ -36,7 +36,7 @@
 #include <scsi/scsi_transport_iscsi.h>
 #include <scsi/scsi_transport_iscsi.h>
 
 
 #define DRV_NAME		"be2iscsi"
 #define DRV_NAME		"be2iscsi"
-#define BUILD_STR		"11.0.0.0"
+#define BUILD_STR		"11.2.0.0"
 #define BE_NAME			"Emulex OneConnect" \
 #define BE_NAME			"Emulex OneConnect" \
 				"Open-iSCSI Driver version" BUILD_STR
 				"Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC		BE_NAME " " "Driver"
 #define DRV_DESC		BE_NAME " " "Driver"
@@ -82,36 +82,12 @@
 #define BEISCSI_MAX_FRAGS_INIT	192
 #define BEISCSI_MAX_FRAGS_INIT	192
 #define BE_NUM_MSIX_ENTRIES	1
 #define BE_NUM_MSIX_ENTRIES	1
 
 
-#define MPU_EP_CONTROL          0
-#define MPU_EP_SEMAPHORE        0xac
-#define BE2_SOFT_RESET          0x5c
-#define BE2_PCI_ONLINE0         0xb0
-#define BE2_PCI_ONLINE1         0xb4
-#define BE2_SET_RESET           0x80
-#define BE2_MPU_IRAM_ONLINE     0x00000080
-
 #define BE_SENSE_INFO_SIZE		258
 #define BE_SENSE_INFO_SIZE		258
 #define BE_ISCSI_PDU_HEADER_SIZE	64
 #define BE_ISCSI_PDU_HEADER_SIZE	64
 #define BE_MIN_MEM_SIZE			16384
 #define BE_MIN_MEM_SIZE			16384
 #define MAX_CMD_SZ			65536
 #define MAX_CMD_SZ			65536
 #define IIOC_SCSI_DATA                  0x05	/* Write Operation */
 #define IIOC_SCSI_DATA                  0x05	/* Write Operation */
 
 
-#define INVALID_SESS_HANDLE	0xFFFFFFFF
-
-/**
- * Adapter States
- **/
-#define BE_ADAPTER_LINK_UP	0x001
-#define BE_ADAPTER_LINK_DOWN	0x002
-#define BE_ADAPTER_PCI_ERR	0x004
-#define BE_ADAPTER_CHECK_BOOT	0x008
-
-
-#define BEISCSI_CLEAN_UNLOAD	0x01
-#define BEISCSI_EEH_UNLOAD	0x02
-
-#define BE_GET_BOOT_RETRIES	45
-#define BE_GET_BOOT_TO		20
 /**
 /**
  * hardware needs the async PDU buffers to be posted in multiples of 8
  * hardware needs the async PDU buffers to be posted in multiples of 8
  * So have atleast 8 of them by default
  * So have atleast 8 of them by default
@@ -378,7 +354,6 @@ struct beiscsi_hba {
 	struct sgl_handle **eh_sgl_hndl_base;
 	struct sgl_handle **eh_sgl_hndl_base;
 	spinlock_t io_sgl_lock;
 	spinlock_t io_sgl_lock;
 	spinlock_t mgmt_sgl_lock;
 	spinlock_t mgmt_sgl_lock;
-	spinlock_t isr_lock;
 	spinlock_t async_pdu_lock;
 	spinlock_t async_pdu_lock;
 	unsigned int age;
 	unsigned int age;
 	struct list_head hba_queue;
 	struct list_head hba_queue;
@@ -390,7 +365,6 @@ struct beiscsi_hba {
 	struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT];
 	struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT];
 	struct iscsi_endpoint **ep_array;
 	struct iscsi_endpoint **ep_array;
 	struct beiscsi_conn **conn_table;
 	struct beiscsi_conn **conn_table;
-	struct iscsi_boot_kset *boot_kset;
 	struct Scsi_Host *shost;
 	struct Scsi_Host *shost;
 	struct iscsi_iface *ipv4_iface;
 	struct iscsi_iface *ipv4_iface;
 	struct iscsi_iface *ipv6_iface;
 	struct iscsi_iface *ipv6_iface;
@@ -418,12 +392,33 @@ struct beiscsi_hba {
 		unsigned long ulp_supported;
 		unsigned long ulp_supported;
 	} fw_config;
 	} fw_config;
 
 
-	unsigned int state;
+	unsigned long state;
+#define BEISCSI_HBA_ONLINE	0
+#define BEISCSI_HBA_LINK_UP	1
+#define BEISCSI_HBA_BOOT_FOUND	2
+#define BEISCSI_HBA_BOOT_WORK	3
+#define BEISCSI_HBA_UER_SUPP	4
+#define BEISCSI_HBA_PCI_ERR	5
+#define BEISCSI_HBA_FW_TIMEOUT	6
+#define BEISCSI_HBA_IN_UE	7
+#define BEISCSI_HBA_IN_TPE	8
+
+/* error bits */
+#define BEISCSI_HBA_IN_ERR	((1 << BEISCSI_HBA_PCI_ERR) | \
+				 (1 << BEISCSI_HBA_FW_TIMEOUT) | \
+				 (1 << BEISCSI_HBA_IN_UE) | \
+				 (1 << BEISCSI_HBA_IN_TPE))
+
 	u8 optic_state;
 	u8 optic_state;
-	int get_boot;
-	bool fw_timeout;
-	bool ue_detected;
-	struct delayed_work beiscsi_hw_check_task;
+	struct delayed_work eqd_update;
+	/* update EQ delay timer every 1000ms */
+#define BEISCSI_EQD_UPDATE_INTERVAL	1000
+	struct timer_list hw_check;
+	/* check for UE every 1000ms */
+#define BEISCSI_UE_DETECT_INTERVAL	1000
+	u32 ue2rp;
+	struct delayed_work recover_port;
+	struct work_struct sess_work;
 
 
 	bool mac_addr_set;
 	bool mac_addr_set;
 	u8 mac_address[ETH_ALEN];
 	u8 mac_address[ETH_ALEN];
@@ -435,7 +430,6 @@ struct beiscsi_hba {
 	struct be_ctrl_info ctrl;
 	struct be_ctrl_info ctrl;
 	unsigned int generation;
 	unsigned int generation;
 	unsigned int interface_handle;
 	unsigned int interface_handle;
-	struct mgmt_session_info boot_sess;
 	struct invalidate_command_table inv_tbl[128];
 	struct invalidate_command_table inv_tbl[128];
 
 
 	struct be_aic_obj aic_obj[MAX_CPUS];
 	struct be_aic_obj aic_obj[MAX_CPUS];
@@ -444,8 +438,29 @@ struct beiscsi_hba {
 			struct scatterlist *sg,
 			struct scatterlist *sg,
 			uint32_t num_sg, uint32_t xferlen,
 			uint32_t num_sg, uint32_t xferlen,
 			uint32_t writedir);
 			uint32_t writedir);
+	struct boot_struct {
+		int retry;
+		unsigned int tag;
+		unsigned int s_handle;
+		struct be_dma_mem nonemb_cmd;
+		enum {
+			BEISCSI_BOOT_REOPEN_SESS = 1,
+			BEISCSI_BOOT_GET_SHANDLE,
+			BEISCSI_BOOT_GET_SINFO,
+			BEISCSI_BOOT_LOGOUT_SESS,
+			BEISCSI_BOOT_CREATE_KSET,
+		} action;
+		struct mgmt_session_info boot_sess;
+		struct iscsi_boot_kset *boot_kset;
+	} boot_struct;
+	struct work_struct boot_work;
 };
 };
 
 
+#define beiscsi_hba_in_error(phba) ((phba)->state & BEISCSI_HBA_IN_ERR)
+#define beiscsi_hba_is_online(phba) \
+	(!beiscsi_hba_in_error((phba)) && \
+	 test_bit(BEISCSI_HBA_ONLINE, &phba->state))
+
 struct beiscsi_session {
 struct beiscsi_session {
 	struct pci_pool *bhs_pool;
 	struct pci_pool *bhs_pool;
 };
 };
@@ -508,6 +523,7 @@ struct beiscsi_io_task {
 	struct sgl_handle *psgl_handle;
 	struct sgl_handle *psgl_handle;
 	struct beiscsi_conn *conn;
 	struct beiscsi_conn *conn;
 	struct scsi_cmnd *scsi_cmnd;
 	struct scsi_cmnd *scsi_cmnd;
+	int num_sg;
 	struct hwi_wrb_context *pwrb_context;
 	struct hwi_wrb_context *pwrb_context;
 	unsigned int cmd_sn;
 	unsigned int cmd_sn;
 	unsigned int flags;
 	unsigned int flags;
@@ -592,80 +608,81 @@ struct amap_beiscsi_offload_params {
 	u8 max_recv_data_segment_length[32];
 	u8 max_recv_data_segment_length[32];
 };
 };
 
 
-/* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
-		struct beiscsi_hba *phba, struct sol_cqe *psol);*/
-
-struct async_pdu_handle {
+struct hd_async_handle {
 	struct list_head link;
 	struct list_head link;
 	struct be_bus_address pa;
 	struct be_bus_address pa;
 	void *pbuffer;
 	void *pbuffer;
-	unsigned int consumed;
-	unsigned char index;
-	unsigned char is_header;
-	unsigned short cri;
-	unsigned long buffer_len;
+	u32 buffer_len;
+	u16 index;
+	u16 cri;
+	u8 is_header;
+	u8 is_final;
 };
 };
 
 
-struct hwi_async_entry {
-	struct {
-		unsigned char hdr_received;
-		unsigned char hdr_len;
-		unsigned short bytes_received;
+/**
+ * This has list of async PDUs that are waiting to be processed.
+ * Buffers live in this list for a brief duration before they get
+ * processed and posted back to hardware.
+ * Note that we don't really need one cri_wait_queue per async_entry.
+ * We need one cri_wait_queue per CRI. Its easier to manage if this
+ * is tagged along with the async_entry.
+ */
+struct hd_async_entry {
+	struct cri_wait_queue {
+		unsigned short hdr_len;
+		unsigned int bytes_received;
 		unsigned int bytes_needed;
 		unsigned int bytes_needed;
 		struct list_head list;
 		struct list_head list;
-	} wait_queue;
-
-	struct list_head header_busy_list;
-	struct list_head data_busy_list;
+	} wq;
+	/* handles posted to FW resides here */
+	struct hd_async_handle *header;
+	struct hd_async_handle *data;
 };
 };
 
 
-struct hwi_async_pdu_context {
-	struct {
-		struct be_bus_address pa_base;
-		void *va_base;
-		void *ring_base;
-		struct async_pdu_handle *handle_base;
-
-		unsigned int host_write_ptr;
-		unsigned int ep_read_ptr;
-		unsigned int writables;
-
-		unsigned int free_entries;
-		unsigned int busy_entries;
-
-		struct list_head free_list;
-	} async_header;
+struct hd_async_buf_context {
+	struct be_bus_address pa_base;
+	void *va_base;
+	void *ring_base;
+	struct hd_async_handle *handle_base;
+	u16 free_entries;
+	u32 buffer_size;
+	/**
+	 * Once iSCSI layer finishes processing an async PDU, the
+	 * handles used for the PDU are added to this list.
+	 * They are posted back to FW in groups of 8.
+	 */
+	struct list_head free_list;
+};
 
 
-	struct {
-		struct be_bus_address pa_base;
-		void *va_base;
-		void *ring_base;
-		struct async_pdu_handle *handle_base;
-
-		unsigned int host_write_ptr;
-		unsigned int ep_read_ptr;
-		unsigned int writables;
-
-		unsigned int free_entries;
-		unsigned int busy_entries;
-		struct list_head free_list;
-	} async_data;
-
-	unsigned int buffer_size;
-	unsigned int num_entries;
+/**
+ * hd_async_context is declared for each ULP supporting iSCSI function.
+ */
+struct hd_async_context {
+	struct hd_async_buf_context async_header;
+	struct hd_async_buf_context async_data;
+	u16 num_entries;
+	/**
+	 * When unsol PDU is in, it needs to be chained till all the bytes are
+	 * received and then processing is done. hd_async_entry is created
+	 * based on the cid_count for each ULP. When unsol PDU comes in based
+	 * on the conn_id it needs to be added to the correct async_entry wq.
+	 * Below defined cid_to_async_cri_map is used to reterive the
+	 * async_cri_map for a particular connection.
+	 *
+	 * This array is initialized after beiscsi_create_wrb_rings returns.
+	 *
+	 * - this method takes more memory space, fixed to 2K
+	 * - any support for connections greater than this the array size needs
+	 * to be incremented
+	 */
 #define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid])
 #define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid])
 	unsigned short cid_to_async_cri_map[BE_MAX_SESSION];
 	unsigned short cid_to_async_cri_map[BE_MAX_SESSION];
 	/**
 	/**
-	 * This is a varying size list! Do not add anything
-	 * after this entry!!
+	 * This is a variable size array. Don`t add anything after this field!!
 	 */
 	 */
-	struct hwi_async_entry *async_entry;
+	struct hd_async_entry *async_entry;
 };
 };
 
 
-#define PDUCQE_CODE_MASK	0x0000003F
-#define PDUCQE_DPL_MASK		0xFFFF0000
-#define PDUCQE_INDEX_MASK	0x0000FFFF
-
 struct i_t_dpdu_cqe {
 struct i_t_dpdu_cqe {
 	u32 dw[4];
 	u32 dw[4];
 } __packed;
 } __packed;
@@ -845,7 +862,6 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
 void
 void
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
 
 
-void beiscsi_process_all_cqs(struct work_struct *work);
 void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
 void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
 				     struct iscsi_task *task);
 				     struct iscsi_task *task);
 
 
@@ -856,11 +872,6 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget);
 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget);
 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba);
 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba);
 
 
-static inline bool beiscsi_error(struct beiscsi_hba *phba)
-{
-	return phba->ue_detected || phba->fw_timeout;
-}
-
 struct pdu_nop_out {
 struct pdu_nop_out {
 	u32 dw[12];
 	u32 dw[12];
 };
 };
@@ -1067,11 +1078,18 @@ struct hwi_context_memory {
 	struct be_queue_info be_cq[MAX_CPUS - 1];
 	struct be_queue_info be_cq[MAX_CPUS - 1];
 
 
 	struct be_queue_info *be_wrbq;
 	struct be_queue_info *be_wrbq;
+	/**
+	 * Create array of ULP number for below entries as DEFQ
+	 * will be created for both ULP if iSCSI Protocol is
+	 * loaded on both ULP.
+	 */
 	struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT];
 	struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT];
 	struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT];
 	struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT];
-	struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT];
+	struct hd_async_context *pasync_ctx[BEISCSI_ULP_COUNT];
 };
 };
 
 
+void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle);
+
 /* Logging related definitions */
 /* Logging related definitions */
 #define BEISCSI_LOG_INIT	0x0001	/* Initialization events */
 #define BEISCSI_LOG_INIT	0x0001	/* Initialization events */
 #define BEISCSI_LOG_MBOX	0x0002	/* Mailbox Events */
 #define BEISCSI_LOG_MBOX	0x0002	/* Mailbox Events */

+ 567 - 930
drivers/scsi/be2iscsi/be_mgmt.c

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
  * as published by the Free Software Foundation.  The full GNU General
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  * Public License is included in this distribution in the file called COPYING.
  *
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
  *
  *
  * Contact Information:
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  *
  * Emulex
  * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
@@ -24,139 +24,9 @@
 #include "be_iscsi.h"
 #include "be_iscsi.h"
 #include "be_main.h"
 #include "be_main.h"
 
 
-/* UE Status Low CSR */
-static const char * const desc_ue_status_low[] = {
-	"CEV",
-	"CTX",
-	"DBUF",
-	"ERX",
-	"Host",
-	"MPU",
-	"NDMA",
-	"PTC ",
-	"RDMA ",
-	"RXF ",
-	"RXIPS ",
-	"RXULP0 ",
-	"RXULP1 ",
-	"RXULP2 ",
-	"TIM ",
-	"TPOST ",
-	"TPRE ",
-	"TXIPS ",
-	"TXULP0 ",
-	"TXULP1 ",
-	"UC ",
-	"WDMA ",
-	"TXULP2 ",
-	"HOST1 ",
-	"P0_OB_LINK ",
-	"P1_OB_LINK ",
-	"HOST_GPIO ",
-	"MBOX ",
-	"AXGMAC0",
-	"AXGMAC1",
-	"JTAG",
-	"MPU_INTPEND"
-};
-
-/* UE Status High CSR */
-static const char * const desc_ue_status_hi[] = {
-	"LPCMEMHOST",
-	"MGMT_MAC",
-	"PCS0ONLINE",
-	"MPU_IRAM",
-	"PCS1ONLINE",
-	"PCTL0",
-	"PCTL1",
-	"PMEM",
-	"RR",
-	"TXPB",
-	"RXPP",
-	"XAUI",
-	"TXP",
-	"ARM",
-	"IPC",
-	"HOST2",
-	"HOST3",
-	"HOST4",
-	"HOST5",
-	"HOST6",
-	"HOST7",
-	"HOST8",
-	"HOST9",
-	"NETC",
-	"Unknown",
-	"Unknown",
-	"Unknown",
-	"Unknown",
-	"Unknown",
-	"Unknown",
-	"Unknown",
-	"Unknown"
-};
-
-/*
- * beiscsi_ue_detec()- Detect Unrecoverable Error on adapter
- * @phba: Driver priv structure
- *
- * Read registers linked to UE and check for the UE status
- **/
-void beiscsi_ue_detect(struct beiscsi_hba *phba)
-{
-	uint32_t ue_hi = 0, ue_lo = 0;
-	uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
-	uint8_t i = 0;
-
-	if (phba->ue_detected)
-		return;
-
-	pci_read_config_dword(phba->pcidev,
-			      PCICFG_UE_STATUS_LOW, &ue_lo);
-	pci_read_config_dword(phba->pcidev,
-			      PCICFG_UE_STATUS_MASK_LOW,
-			      &ue_mask_lo);
-	pci_read_config_dword(phba->pcidev,
-			      PCICFG_UE_STATUS_HIGH,
-			      &ue_hi);
-	pci_read_config_dword(phba->pcidev,
-			      PCICFG_UE_STATUS_MASK_HI,
-			      &ue_mask_hi);
-
-	ue_lo = (ue_lo & ~ue_mask_lo);
-	ue_hi = (ue_hi & ~ue_mask_hi);
-
-
-	if (ue_lo || ue_hi) {
-		phba->ue_detected = true;
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-			    "BG_%d : Error detected on the adapter\n");
-	}
-
-	if (ue_lo) {
-		for (i = 0; ue_lo; ue_lo >>= 1, i++) {
-			if (ue_lo & 1)
-				beiscsi_log(phba, KERN_ERR,
-					    BEISCSI_LOG_CONFIG,
-					    "BG_%d : UE_LOW %s bit set\n",
-					    desc_ue_status_low[i]);
-		}
-	}
-
-	if (ue_hi) {
-		for (i = 0; ue_hi; ue_hi >>= 1, i++) {
-			if (ue_hi & 1)
-				beiscsi_log(phba, KERN_ERR,
-					    BEISCSI_LOG_CONFIG,
-					    "BG_%d : UE_HIGH %s bit set\n",
-					    desc_ue_status_hi[i]);
-		}
-	}
-}
-
-int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
-		 struct be_set_eqd *set_eqd, int num)
+int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
+			    struct be_set_eqd *set_eqd,
+			    int num)
 {
 {
 	struct be_ctrl_info *ctrl = &phba->ctrl;
 	struct be_ctrl_info *ctrl = &phba->ctrl;
 	struct be_mcc_wrb *wrb;
 	struct be_mcc_wrb *wrb;
@@ -174,7 +44,7 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
 	req = embedded_payload(wrb);
 	req = embedded_payload(wrb);
 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
+			   OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
 
 
 	req->num_eq = cpu_to_le32(num);
 	req->num_eq = cpu_to_le32(num);
 	for (i = 0; i < num; i++) {
 	for (i = 0; i < num; i++) {
@@ -184,386 +54,13 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
 				cpu_to_le32(set_eqd[i].delay_multiplier);
 				cpu_to_le32(set_eqd[i].delay_multiplier);
 	}
 	}
 
 
+	/* ignore the completion of this mbox command */
+	set_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state);
 	be_mcc_notify(phba, tag);
 	be_mcc_notify(phba, tag);
 	mutex_unlock(&ctrl->mbox_lock);
 	mutex_unlock(&ctrl->mbox_lock);
 	return tag;
 	return tag;
 }
 }
 
 
-/**
- * mgmt_reopen_session()- Reopen a session based on reopen_type
- * @phba: Device priv structure instance
- * @reopen_type: Type of reopen_session FW should do.
- * @sess_handle: Session Handle of the session to be re-opened
- *
- * return
- *	the TAG used for MBOX Command
- *
- **/
-unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
-				  unsigned int reopen_type,
-				  unsigned int sess_handle)
-{
-	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb;
-	struct be_cmd_reopen_session_req *req;
-	unsigned int tag;
-
-	beiscsi_log(phba, KERN_INFO,
-		    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-		    "BG_%d : In bescsi_get_boot_target\n");
-
-	mutex_lock(&ctrl->mbox_lock);
-	wrb = alloc_mcc_wrb(phba, &tag);
-	if (!wrb) {
-		mutex_unlock(&ctrl->mbox_lock);
-		return 0;
-	}
-
-	req = embedded_payload(wrb);
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
-			   OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
-			   sizeof(struct be_cmd_reopen_session_resp));
-
-	/* set the reopen_type,sess_handle */
-	req->reopen_type = reopen_type;
-	req->session_handle = sess_handle;
-
-	be_mcc_notify(phba, tag);
-	mutex_unlock(&ctrl->mbox_lock);
-	return tag;
-}
-
-unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
-{
-	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb;
-	struct be_cmd_get_boot_target_req *req;
-	unsigned int tag;
-
-	beiscsi_log(phba, KERN_INFO,
-		    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-		    "BG_%d : In bescsi_get_boot_target\n");
-
-	mutex_lock(&ctrl->mbox_lock);
-	wrb = alloc_mcc_wrb(phba, &tag);
-	if (!wrb) {
-		mutex_unlock(&ctrl->mbox_lock);
-		return 0;
-	}
-
-	req = embedded_payload(wrb);
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
-			   OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
-			   sizeof(struct be_cmd_get_boot_target_resp));
-
-	be_mcc_notify(phba, tag);
-	mutex_unlock(&ctrl->mbox_lock);
-	return tag;
-}
-
-unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
-				   u32 boot_session_handle,
-				   struct be_dma_mem *nonemb_cmd)
-{
-	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb;
-	unsigned int tag;
-	struct  be_cmd_get_session_req *req;
-	struct be_cmd_get_session_resp *resp;
-	struct be_sge *sge;
-
-	beiscsi_log(phba, KERN_INFO,
-		    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-		    "BG_%d : In beiscsi_get_session_info\n");
-
-	mutex_lock(&ctrl->mbox_lock);
-	wrb = alloc_mcc_wrb(phba, &tag);
-	if (!wrb) {
-		mutex_unlock(&ctrl->mbox_lock);
-		return 0;
-	}
-
-	nonemb_cmd->size = sizeof(*resp);
-	req = nonemb_cmd->va;
-	memset(req, 0, sizeof(*req));
-	sge = nonembedded_sgl(wrb);
-	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
-	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
-			   OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
-			   sizeof(*resp));
-	req->session_handle = boot_session_handle;
-	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
-	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
-	sge->len = cpu_to_le32(nonemb_cmd->size);
-
-	be_mcc_notify(phba, tag);
-	mutex_unlock(&ctrl->mbox_lock);
-	return tag;
-}
-
-/**
- * mgmt_get_port_name()- Get port name for the function
- * @ctrl: ptr to Ctrl Info
- * @phba: ptr to the dev priv structure
- *
- * Get the alphanumeric character for port
- *
- **/
-int mgmt_get_port_name(struct be_ctrl_info *ctrl,
-		       struct beiscsi_hba *phba)
-{
-	int ret = 0;
-	struct be_mcc_wrb *wrb;
-	struct be_cmd_get_port_name *ioctl;
-
-	mutex_lock(&ctrl->mbox_lock);
-	wrb = wrb_from_mbox(&ctrl->mbox_mem);
-	memset(wrb, 0, sizeof(*wrb));
-	ioctl = embedded_payload(wrb);
-
-	be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
-	be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
-			   OPCODE_COMMON_GET_PORT_NAME,
-			   EMBED_MBX_MAX_PAYLOAD_SIZE);
-	ret = be_mbox_notify(ctrl);
-	phba->port_name = 0;
-	if (!ret) {
-		phba->port_name = ioctl->p.resp.port_names >>
-				  (phba->fw_config.phys_port * 8) & 0xff;
-	} else {
-		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-			    "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
-			    ret, ioctl->h.resp_hdr.status);
-	}
-
-	if (phba->port_name == 0)
-		phba->port_name = '?';
-
-	mutex_unlock(&ctrl->mbox_lock);
-	return ret;
-}
-
-/**
- * mgmt_get_fw_config()- Get the FW config for the function
- * @ctrl: ptr to Ctrl Info
- * @phba: ptr to the dev priv structure
- *
- * Get the FW config and resources available for the function.
- * The resources are created based on the count received here.
- *
- * return
- *	Success: 0
- *	Failure: Non-Zero Value
- **/
-int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
-				struct beiscsi_hba *phba)
-{
-	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-	struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
-	uint32_t cid_count, icd_count;
-	int status = -EINVAL;
-	uint8_t ulp_num = 0;
-
-	mutex_lock(&ctrl->mbox_lock);
-	memset(wrb, 0, sizeof(*wrb));
-	be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
-
-	be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
-			   OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
-			   EMBED_MBX_MAX_PAYLOAD_SIZE);
-
-	if (be_mbox_notify(ctrl)) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BG_%d : Failed in mgmt_get_fw_config\n");
-		goto fail_init;
-	}
-
-	/* FW response formats depend on port id */
-	phba->fw_config.phys_port = pfw_cfg->phys_port;
-	if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BG_%d : invalid physical port id %d\n",
-			    phba->fw_config.phys_port);
-		goto fail_init;
-	}
-
-	/* populate and check FW config against min and max values */
-	if (!is_chip_be2_be3r(phba)) {
-		phba->fw_config.eqid_count = pfw_cfg->eqid_count;
-		phba->fw_config.cqid_count = pfw_cfg->cqid_count;
-		if (phba->fw_config.eqid_count == 0 ||
-		    phba->fw_config.eqid_count > 2048) {
-			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-				    "BG_%d : invalid EQ count %d\n",
-				    phba->fw_config.eqid_count);
-			goto fail_init;
-		}
-		if (phba->fw_config.cqid_count == 0 ||
-		    phba->fw_config.cqid_count > 4096) {
-			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-				    "BG_%d : invalid CQ count %d\n",
-				    phba->fw_config.cqid_count);
-			goto fail_init;
-		}
-		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-			    "BG_%d : EQ_Count : %d CQ_Count : %d\n",
-			    phba->fw_config.eqid_count,
-			    phba->fw_config.cqid_count);
-	}
-
-	/**
-	 * Check on which all ULP iSCSI Protocol is loaded.
-	 * Set the Bit for those ULP. This set flag is used
-	 * at all places in the code to check on which ULP
-	 * iSCSi Protocol is loaded
-	 **/
-	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
-		if (pfw_cfg->ulp[ulp_num].ulp_mode &
-		    BEISCSI_ULP_ISCSI_INI_MODE) {
-			set_bit(ulp_num, &phba->fw_config.ulp_supported);
-
-			/* Get the CID, ICD and Chain count for each ULP */
-			phba->fw_config.iscsi_cid_start[ulp_num] =
-				pfw_cfg->ulp[ulp_num].sq_base;
-			phba->fw_config.iscsi_cid_count[ulp_num] =
-				pfw_cfg->ulp[ulp_num].sq_count;
-
-			phba->fw_config.iscsi_icd_start[ulp_num] =
-				pfw_cfg->ulp[ulp_num].icd_base;
-			phba->fw_config.iscsi_icd_count[ulp_num] =
-				pfw_cfg->ulp[ulp_num].icd_count;
-
-			phba->fw_config.iscsi_chain_start[ulp_num] =
-				pfw_cfg->chain_icd[ulp_num].chain_base;
-			phba->fw_config.iscsi_chain_count[ulp_num] =
-				pfw_cfg->chain_icd[ulp_num].chain_count;
-
-			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-				    "BG_%d : Function loaded on ULP : %d\n"
-				    "\tiscsi_cid_count : %d\n"
-				    "\tiscsi_cid_start : %d\n"
-				    "\t iscsi_icd_count : %d\n"
-				    "\t iscsi_icd_start : %d\n",
-				    ulp_num,
-				    phba->fw_config.
-				    iscsi_cid_count[ulp_num],
-				    phba->fw_config.
-				    iscsi_cid_start[ulp_num],
-				    phba->fw_config.
-				    iscsi_icd_count[ulp_num],
-				    phba->fw_config.
-				    iscsi_icd_start[ulp_num]);
-		}
-	}
-
-	if (phba->fw_config.ulp_supported == 0) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
-			    pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
-			    pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
-		goto fail_init;
-	}
-
-	/**
-	 * ICD is shared among ULPs. Use icd_count of any one loaded ULP
-	 **/
-	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
-		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
-			break;
-	icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
-	if (icd_count == 0 || icd_count > 65536) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BG_%d: invalid ICD count %d\n", icd_count);
-		goto fail_init;
-	}
-
-	cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
-		    BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
-	if (cid_count == 0 || cid_count > 4096) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BG_%d: invalid CID count %d\n", cid_count);
-		goto fail_init;
-	}
-
-	/**
-	 * Check FW is dual ULP aware i.e. can handle either
-	 * of the protocols.
-	 */
-	phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
-					  BEISCSI_FUNC_DUA_MODE);
-
-	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-		    "BG_%d : DUA Mode : 0x%x\n",
-		    phba->fw_config.dual_ulp_aware);
-
-	/* all set, continue using this FW config */
-	status = 0;
-fail_init:
-	mutex_unlock(&ctrl->mbox_lock);
-	return status;
-}
-
-int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
-				      struct beiscsi_hba *phba)
-{
-	struct be_dma_mem nonemb_cmd;
-	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
-	struct be_mgmt_controller_attributes *req;
-	struct be_sge *sge = nonembedded_sgl(wrb);
-	int status = 0;
-
-	nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
-				sizeof(struct be_mgmt_controller_attributes),
-				&nonemb_cmd.dma);
-	if (nonemb_cmd.va == NULL) {
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BG_%d : Failed to allocate memory for "
-			    "mgmt_check_supported_fw\n");
-		return -ENOMEM;
-	}
-	nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
-	req = nonemb_cmd.va;
-	memset(req, 0, sizeof(*req));
-	mutex_lock(&ctrl->mbox_lock);
-	memset(wrb, 0, sizeof(*wrb));
-	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
-	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-			   OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
-	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
-	sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
-	sge->len = cpu_to_le32(nonemb_cmd.size);
-	status = be_mbox_notify(ctrl);
-	if (!status) {
-		struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
-		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-			    "BG_%d : Firmware Version of CMD : %s\n"
-			    "Firmware Version is : %s\n"
-			    "Developer Build, not performing version check...\n",
-			    resp->params.hba_attribs
-			    .flashrom_version_string,
-			    resp->params.hba_attribs.
-			    firmware_version_string);
-
-		phba->fw_config.iscsi_features =
-				resp->params.hba_attribs.iscsi_features;
-		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
-			    "BM_%d : phba->fw_config.iscsi_features = %d\n",
-			    phba->fw_config.iscsi_features);
-		memcpy(phba->fw_ver_str, resp->params.hba_attribs.
-		       firmware_version_string, BEISCSI_VER_STRLEN);
-	} else
-		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
-			    "BG_%d :  Failed in mgmt_check_supported_fw\n");
-	mutex_unlock(&ctrl->mbox_lock);
-	if (nonemb_cmd.va)
-		pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
-				    nonemb_cmd.va, nonemb_cmd.dma);
-
-	return status;
-}
-
 unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
 unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
 					 struct beiscsi_hba *phba,
 					 struct beiscsi_hba *phba,
 					 struct bsg_job *job,
 					 struct bsg_job *job,
@@ -609,7 +106,7 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
 			    bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
 			    bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
 
 
 		mutex_unlock(&ctrl->mbox_lock);
 		mutex_unlock(&ctrl->mbox_lock);
-		return -ENOSYS;
+		return -EPERM;
 	}
 	}
 
 
 	wrb = alloc_mcc_wrb(phba, &tag);
 	wrb = alloc_mcc_wrb(phba, &tag);
@@ -631,48 +128,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
 	return tag;
 	return tag;
 }
 }
 
 
-/**
- * mgmt_epfw_cleanup()- Inform FW to cleanup data structures.
- * @phba: pointer to dev priv structure
- * @ulp_num: ULP number.
- *
- * return
- *	Success: 0
- *	Failure: Non-Zero Value
- **/
-int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
-{
-	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb;
-	struct iscsi_cleanup_req *req;
-	unsigned int tag;
-	int status;
-
-	mutex_lock(&ctrl->mbox_lock);
-	wrb = alloc_mcc_wrb(phba, &tag);
-	if (!wrb) {
-		mutex_unlock(&ctrl->mbox_lock);
-		return -EBUSY;
-	}
-
-	req = embedded_payload(wrb);
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
-			   OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
-
-	req->chute = (1 << ulp_num);
-	req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
-	req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
-
-	be_mcc_notify(phba, tag);
-	status = be_mcc_compl_poll(phba, tag);
-	if (status)
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
-			    "BG_%d : mgmt_epfw_cleanup , FAILED\n");
-	mutex_unlock(&ctrl->mbox_lock);
-	return status;
-}
-
 unsigned int  mgmt_invalidate_icds(struct beiscsi_hba *phba,
 unsigned int  mgmt_invalidate_icds(struct beiscsi_hba *phba,
 				struct invalidate_command_table *inv_tbl,
 				struct invalidate_command_table *inv_tbl,
 				unsigned int num_invalidate, unsigned int cid,
 				unsigned int num_invalidate, unsigned int cid,
@@ -844,7 +299,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
 			   nonemb_cmd->size);
 			   nonemb_cmd->size);
 	if (dst_addr->sa_family == PF_INET) {
 	if (dst_addr->sa_family == PF_INET) {
 		__be32 s_addr = daddr_in->sin_addr.s_addr;
 		__be32 s_addr = daddr_in->sin_addr.s_addr;
-		req->ip_address.ip_type = BE2_IPV4;
+		req->ip_address.ip_type = BEISCSI_IP_TYPE_V4;
 		req->ip_address.addr[0] = s_addr & 0x000000ff;
 		req->ip_address.addr[0] = s_addr & 0x000000ff;
 		req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8;
 		req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8;
 		req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16;
 		req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16;
@@ -852,17 +307,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
 		req->tcp_port = ntohs(daddr_in->sin_port);
 		req->tcp_port = ntohs(daddr_in->sin_port);
 		beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
 		beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
 		beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
 		beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
-		beiscsi_ep->ip_type = BE2_IPV4;
+		beiscsi_ep->ip_type = BEISCSI_IP_TYPE_V4;
 	} else {
 	} else {
 		/* else its PF_INET6 family */
 		/* else its PF_INET6 family */
-		req->ip_address.ip_type = BE2_IPV6;
+		req->ip_address.ip_type = BEISCSI_IP_TYPE_V6;
 		memcpy(&req->ip_address.addr,
 		memcpy(&req->ip_address.addr,
 		       &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
 		       &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
 		req->tcp_port = ntohs(daddr_in6->sin6_port);
 		req->tcp_port = ntohs(daddr_in6->sin6_port);
 		beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
 		beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
 		memcpy(&beiscsi_ep->dst6_addr,
 		memcpy(&beiscsi_ep->dst6_addr,
 		       &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
 		       &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
-		beiscsi_ep->ip_type = BE2_IPV6;
+		beiscsi_ep->ip_type = BEISCSI_IP_TYPE_V6;
 	}
 	}
 	req->cid = cid;
 	req->cid = cid;
 	i = phba->nxt_cqid++;
 	i = phba->nxt_cqid++;
@@ -883,7 +338,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
 
 
 	if (!is_chip_be2_be3r(phba)) {
 	if (!is_chip_be2_be3r(phba)) {
 		req->hdr.version = MBX_CMD_VER1;
 		req->hdr.version = MBX_CMD_VER1;
-		req->tcp_window_size = 0;
+		req->tcp_window_size = 0x8000;
 		req->tcp_window_scale_count = 2;
 		req->tcp_window_scale_count = 2;
 	}
 	}
 
 
@@ -892,44 +347,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
 	return tag;
 	return tag;
 }
 }
 
 
-unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
-{
-	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb;
-	struct be_cmd_get_all_if_id_req *req;
-	struct be_cmd_get_all_if_id_req *pbe_allid;
-	unsigned int tag;
-	int status = 0;
-
-	if (mutex_lock_interruptible(&ctrl->mbox_lock))
-		return -EINTR;
-	wrb = alloc_mcc_wrb(phba, &tag);
-	if (!wrb) {
-		mutex_unlock(&ctrl->mbox_lock);
-		return -ENOMEM;
-	}
-
-	req = embedded_payload(wrb);
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
-			   OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
-			   sizeof(*req));
-	be_mcc_notify(phba, tag);
-	mutex_unlock(&ctrl->mbox_lock);
-
-	status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
-	if (status) {
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-			    "BG_%d : Failed in mgmt_get_all_if_id\n");
-		return -EBUSY;
-	}
-
-	pbe_allid = embedded_payload(wrb);
-	phba->interface_handle = pbe_allid->if_hndl_list[0];
-
-	return status;
-}
-
 /*
 /*
  * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd
  * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd
  * @phba: Driver priv structure
  * @phba: Driver priv structure
@@ -1001,72 +418,68 @@ static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
 	}
 	}
 	cmd->size = size;
 	cmd->size = size;
 	be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
 	be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
+	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+		    "BG_%d : subsystem iSCSI cmd %d size %d\n",
+		    iscsi_cmd, size);
 	return 0;
 	return 0;
 }
 }
 
 
-static int
-mgmt_static_ip_modify(struct beiscsi_hba *phba,
-		      struct be_cmd_get_if_info_resp *if_info,
-		      struct iscsi_iface_param_info *ip_param,
-		      struct iscsi_iface_param_info *subnet_param,
-		      uint32_t ip_action)
+unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba)
 {
 {
-	struct be_cmd_set_ip_addr_req *req;
-	struct be_dma_mem nonemb_cmd;
-	uint32_t ip_type;
-	int rc;
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_get_all_if_id_req *req;
+	struct be_cmd_get_all_if_id_req *pbe_allid;
+	unsigned int tag;
+	int status = 0;
 
 
-	rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
-				 OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
-				 sizeof(*req));
-	if (rc)
-		return rc;
+	if (mutex_lock_interruptible(&ctrl->mbox_lock))
+		return -EINTR;
+	wrb = alloc_mcc_wrb(phba, &tag);
+	if (!wrb) {
+		mutex_unlock(&ctrl->mbox_lock);
+		return -ENOMEM;
+	}
 
 
-	ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
-		BE2_IPV6 : BE2_IPV4 ;
+	req = embedded_payload(wrb);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
+			   OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
+			   sizeof(*req));
+	be_mcc_notify(phba, tag);
+	mutex_unlock(&ctrl->mbox_lock);
 
 
-	req = nonemb_cmd.va;
-	req->ip_params.record_entry_count = 1;
-	req->ip_params.ip_record.action = ip_action;
-	req->ip_params.ip_record.interface_hndl =
-		phba->interface_handle;
-	req->ip_params.ip_record.ip_addr.size_of_structure =
-		sizeof(struct be_ip_addr_subnet_format);
-	req->ip_params.ip_record.ip_addr.ip_type = ip_type;
+	status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
+	if (status) {
+		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+			    "BG_%d : %s failed: %d\n", __func__, status);
+		return -EBUSY;
+	}
 
 
-	if (ip_action == IP_ACTION_ADD) {
-		memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
-		       sizeof(req->ip_params.ip_record.ip_addr.addr));
+	pbe_allid = embedded_payload(wrb);
+	/* we now support only one interface per function */
+	phba->interface_handle = pbe_allid->if_hndl_list[0];
 
 
-		if (subnet_param)
-			memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
-			       subnet_param->value,
-			       sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
-	} else {
-		memcpy(req->ip_params.ip_record.ip_addr.addr,
-		       if_info->ip_addr.addr,
-		       sizeof(req->ip_params.ip_record.ip_addr.addr));
+	return status;
+}
 
 
-		memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
-		       if_info->ip_addr.subnet_mask,
-		       sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
-	}
+static inline bool beiscsi_if_zero_ip(u8 *ip, u32 ip_type)
+{
+	u32 len;
 
 
-	rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
-	if (rc < 0)
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-			    "BG_%d : Failed to Modify existing IP Address\n");
-	return rc;
+	len = (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN;
+	while (len && !ip[len - 1])
+		len--;
+	return (len == 0);
 }
 }
 
 
-static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
-			       uint32_t gtway_action, uint32_t param_len)
+static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
+			     u32 action, u32 ip_type, u8 *gw)
 {
 {
 	struct be_cmd_set_def_gateway_req *req;
 	struct be_cmd_set_def_gateway_req *req;
 	struct be_dma_mem nonemb_cmd;
 	struct be_dma_mem nonemb_cmd;
 	int rt_val;
 	int rt_val;
 
 
-
 	rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
 	rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
 				OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
 				OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
 				sizeof(*req));
 				sizeof(*req));
@@ -1074,200 +487,300 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
 		return rt_val;
 		return rt_val;
 
 
 	req = nonemb_cmd.va;
 	req = nonemb_cmd.va;
-	req->action = gtway_action;
-	req->ip_addr.ip_type = BE2_IPV4;
+	req->action = action;
+	req->ip_addr.ip_type = ip_type;
+	memcpy(req->ip_addr.addr, gw,
+	       (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN);
+	return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+}
 
 
-	memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr));
+int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw)
+{
+	struct be_cmd_get_def_gateway_resp gw_resp;
+	int rt_val;
 
 
-	return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+	memset(&gw_resp, 0, sizeof(gw_resp));
+	rt_val = beiscsi_if_get_gw(phba, ip_type, &gw_resp);
+	if (rt_val) {
+		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+			    "BG_%d : Failed to Get Gateway Addr\n");
+		return rt_val;
+	}
+
+	if (!beiscsi_if_zero_ip(gw_resp.ip_addr.addr, ip_type)) {
+		rt_val = beiscsi_if_mod_gw(phba, IP_ACTION_DEL, ip_type,
+					   gw_resp.ip_addr.addr);
+		if (rt_val) {
+			beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+				    "BG_%d : Failed to clear Gateway Addr Set\n");
+			return rt_val;
+		}
+	}
+
+	rt_val = beiscsi_if_mod_gw(phba, IP_ACTION_ADD, ip_type, gw);
+	if (rt_val)
+		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+			    "BG_%d : Failed to Set Gateway Addr\n");
+
+	return rt_val;
 }
 }
 
 
-int mgmt_set_ip(struct beiscsi_hba *phba,
-		struct iscsi_iface_param_info *ip_param,
-		struct iscsi_iface_param_info *subnet_param,
-		uint32_t boot_proto)
+int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
+		      struct be_cmd_get_def_gateway_resp *resp)
 {
 {
-	struct be_cmd_get_def_gateway_resp gtway_addr_set;
-	struct be_cmd_get_if_info_resp *if_info;
-	struct be_cmd_set_dhcp_req *dhcpreq;
-	struct be_cmd_rel_dhcp_req *reldhcp;
+	struct be_cmd_get_def_gateway_req *req;
 	struct be_dma_mem nonemb_cmd;
 	struct be_dma_mem nonemb_cmd;
-	uint8_t *gtway_addr;
-	uint32_t ip_type;
 	int rc;
 	int rc;
 
 
-	rc = mgmt_get_all_if_id(phba);
+	rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+				 OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
+				 sizeof(*resp));
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
 
 
-	ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
-		BE2_IPV6 : BE2_IPV4 ;
+	req = nonemb_cmd.va;
+	req->ip_type = ip_type;
+
+	return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, resp,
+				    sizeof(*resp));
+}
 
 
-	rc = mgmt_get_if_info(phba, ip_type, &if_info);
+static int
+beiscsi_if_clr_ip(struct beiscsi_hba *phba,
+		  struct be_cmd_get_if_info_resp *if_info)
+{
+	struct be_cmd_set_ip_addr_req *req;
+	struct be_dma_mem nonemb_cmd;
+	int rc;
+
+	rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+				 OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+				 sizeof(*req));
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
 
 
-	if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
-		if (if_info->dhcp_state) {
-			beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-				    "BG_%d : DHCP Already Enabled\n");
-			goto exit;
-		}
-		/* The ip_param->len is 1 in DHCP case. Setting
-		   proper IP len as this it is used while
-		   freeing the Static IP.
-		 */
-		ip_param->len = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?
-				IP_V6_LEN : IP_V4_LEN;
-
-	} else {
-		if (if_info->dhcp_state) {
+	req = nonemb_cmd.va;
+	req->ip_params.record_entry_count = 1;
+	req->ip_params.ip_record.action = IP_ACTION_DEL;
+	req->ip_params.ip_record.interface_hndl =
+		phba->interface_handle;
+	req->ip_params.ip_record.ip_addr.size_of_structure =
+		sizeof(struct be_ip_addr_subnet_format);
+	req->ip_params.ip_record.ip_addr.ip_type = if_info->ip_addr.ip_type;
+	memcpy(req->ip_params.ip_record.ip_addr.addr,
+	       if_info->ip_addr.addr,
+	       sizeof(if_info->ip_addr.addr));
+	memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+	       if_info->ip_addr.subnet_mask,
+	       sizeof(if_info->ip_addr.subnet_mask));
+	rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+	if (rc < 0 || req->ip_params.ip_record.status) {
+		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+			    "BG_%d : failed to clear IP: rc %d status %d\n",
+			    rc, req->ip_params.ip_record.status);
+	}
+	return rc;
+}
 
 
-			memset(if_info, 0, sizeof(*if_info));
-			rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
-				OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
-				sizeof(*reldhcp));
+static int
+beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
+		  u8 *subnet, u32 ip_type)
+{
+	struct be_cmd_set_ip_addr_req *req;
+	struct be_dma_mem nonemb_cmd;
+	uint32_t ip_len;
+	int rc;
 
 
-			if (rc)
-				goto exit;
+	rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+				 OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
+				 sizeof(*req));
+	if (rc)
+		return rc;
 
 
-			reldhcp = nonemb_cmd.va;
-			reldhcp->interface_hndl = phba->interface_handle;
-			reldhcp->ip_type = ip_type;
+	req = nonemb_cmd.va;
+	req->ip_params.record_entry_count = 1;
+	req->ip_params.ip_record.action = IP_ACTION_ADD;
+	req->ip_params.ip_record.interface_hndl =
+		phba->interface_handle;
+	req->ip_params.ip_record.ip_addr.size_of_structure =
+		sizeof(struct be_ip_addr_subnet_format);
+	req->ip_params.ip_record.ip_addr.ip_type = ip_type;
+	ip_len = (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN;
+	memcpy(req->ip_params.ip_record.ip_addr.addr, ip, ip_len);
+	if (subnet)
+		memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
+		       subnet, ip_len);
 
 
-			rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
-			if (rc < 0) {
-				beiscsi_log(phba, KERN_WARNING,
-					    BEISCSI_LOG_CONFIG,
-					    "BG_%d : Failed to Delete existing dhcp\n");
-				goto exit;
-			}
-		}
+	rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+	/**
+	 * In some cases, host needs to look into individual record status
+	 * even though FW reported success for that IOCTL.
+	 */
+	if (rc < 0 || req->ip_params.ip_record.status) {
+		__beiscsi_log(phba, KERN_ERR,
+			    "BG_%d : failed to set IP: rc %d status %d\n",
+			    rc, req->ip_params.ip_record.status);
+		if (req->ip_params.ip_record.status)
+			rc = -EINVAL;
 	}
 	}
+	return rc;
+}
 
 
-	/* Delete the Static IP Set */
-	if (if_info->ip_addr.addr[0]) {
-		rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL,
-					   IP_ACTION_DEL);
+int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
+			 u8 *ip, u8 *subnet)
+{
+	struct be_cmd_get_if_info_resp *if_info;
+	struct be_cmd_rel_dhcp_req *reldhcp;
+	struct be_dma_mem nonemb_cmd;
+	int rc;
+
+	rc = beiscsi_if_get_info(phba, ip_type, &if_info);
+	if (rc)
+		return rc;
+
+	if (if_info->dhcp_state) {
+		rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+				OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
+				sizeof(*reldhcp));
 		if (rc)
 		if (rc)
 			goto exit;
 			goto exit;
-	}
 
 
-	/* Delete the Gateway settings if mode change is to DHCP */
-	if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
-		memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
-		rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
-		if (rc) {
+		reldhcp = nonemb_cmd.va;
+		reldhcp->interface_hndl = phba->interface_handle;
+		reldhcp->ip_type = ip_type;
+		rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+		if (rc < 0) {
 			beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
 			beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-				    "BG_%d : Failed to Get Gateway Addr\n");
+				    "BG_%d : failed to release existing DHCP: %d\n",
+				    rc);
 			goto exit;
 			goto exit;
 		}
 		}
-
-		if (gtway_addr_set.ip_addr.addr[0]) {
-			gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
-			rc = mgmt_modify_gateway(phba, gtway_addr,
-						 IP_ACTION_DEL, IP_V4_LEN);
-
-			if (rc) {
-				beiscsi_log(phba, KERN_WARNING,
-					    BEISCSI_LOG_CONFIG,
-					    "BG_%d : Failed to clear Gateway Addr Set\n");
-				goto exit;
-			}
-		}
 	}
 	}
 
 
-	/* Set Adapter to DHCP/Static Mode */
-	if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
-		rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
-			OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
-			sizeof(*dhcpreq));
+	/* first delete any IP set */
+	if (!beiscsi_if_zero_ip(if_info->ip_addr.addr, ip_type)) {
+		rc = beiscsi_if_clr_ip(phba, if_info);
 		if (rc)
 		if (rc)
 			goto exit;
 			goto exit;
-
-		dhcpreq = nonemb_cmd.va;
-		dhcpreq->flags = BLOCKING;
-		dhcpreq->retry_count = 1;
-		dhcpreq->interface_hndl = phba->interface_handle;
-		dhcpreq->ip_type = BE2_DHCP_V4;
-
-		rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
-	} else {
-		rc = mgmt_static_ip_modify(phba, if_info, ip_param,
-					     subnet_param, IP_ACTION_ADD);
 	}
 	}
 
 
+	/* if ip == NULL then this is called just to release DHCP IP */
+	if (ip)
+		rc = beiscsi_if_set_ip(phba, ip, subnet, ip_type);
 exit:
 exit:
 	kfree(if_info);
 	kfree(if_info);
 	return rc;
 	return rc;
 }
 }
 
 
-int mgmt_set_gateway(struct beiscsi_hba *phba,
-		     struct iscsi_iface_param_info *gateway_param)
+int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
 {
 {
-	struct be_cmd_get_def_gateway_resp gtway_addr_set;
-	uint8_t *gtway_addr;
-	int rt_val;
+	struct be_cmd_get_def_gateway_resp gw_resp;
+	struct be_cmd_get_if_info_resp *if_info;
+	struct be_cmd_set_dhcp_req *dhcpreq;
+	struct be_dma_mem nonemb_cmd;
+	u8 *gw;
+	int rc;
 
 
-	memset(&gtway_addr_set, 0, sizeof(gtway_addr_set));
-	rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set);
-	if (rt_val) {
+	rc = beiscsi_if_get_info(phba, ip_type, &if_info);
+	if (rc)
+		return rc;
+
+	if (if_info->dhcp_state) {
 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-			    "BG_%d : Failed to Get Gateway Addr\n");
-		return rt_val;
+				"BG_%d : DHCP Already Enabled\n");
+		goto exit;
 	}
 	}
 
 
-	if (gtway_addr_set.ip_addr.addr[0]) {
-		gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
-		rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL,
-					     gateway_param->len);
-		if (rt_val) {
+	/* first delete any IP set */
+	if (!beiscsi_if_zero_ip(if_info->ip_addr.addr, ip_type)) {
+		rc = beiscsi_if_clr_ip(phba, if_info);
+		if (rc)
+			goto exit;
+	}
+
+	/* delete gateway settings if mode change is to DHCP */
+	memset(&gw_resp, 0, sizeof(gw_resp));
+	/* use ip_type provided in if_info */
+	rc = beiscsi_if_get_gw(phba, if_info->ip_addr.ip_type, &gw_resp);
+	if (rc) {
+		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+			    "BG_%d : Failed to Get Gateway Addr\n");
+		goto exit;
+	}
+	gw = (u8 *)&gw_resp.ip_addr.addr;
+	if (!beiscsi_if_zero_ip(gw, if_info->ip_addr.ip_type)) {
+		rc = beiscsi_if_mod_gw(phba, IP_ACTION_DEL,
+				       if_info->ip_addr.ip_type, gw);
+		if (rc) {
 			beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
 			beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
 				    "BG_%d : Failed to clear Gateway Addr Set\n");
 				    "BG_%d : Failed to clear Gateway Addr Set\n");
-			return rt_val;
+			goto exit;
 		}
 		}
 	}
 	}
 
 
-	gtway_addr = (uint8_t *)&gateway_param->value;
-	rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_ADD,
-				     gateway_param->len);
+	rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
+			OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
+			sizeof(*dhcpreq));
+	if (rc)
+		goto exit;
 
 
-	if (rt_val)
-		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
-			    "BG_%d : Failed to Set Gateway Addr\n");
+	dhcpreq = nonemb_cmd.va;
+	dhcpreq->flags = 1; /* 1 - blocking; 0 - non-blocking */
+	dhcpreq->retry_count = 1;
+	dhcpreq->interface_hndl = phba->interface_handle;
+	dhcpreq->ip_type = ip_type;
+	rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
 
 
-	return rt_val;
+exit:
+	kfree(if_info);
+	return rc;
 }
 }
 
 
-int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
-		     struct be_cmd_get_def_gateway_resp *gateway)
+/**
+ * beiscsi_if_set_vlan()- Issue and wait for CMD completion
+ * @phba: device private structure instance
+ * @vlan_tag: VLAN tag
+ *
+ * Issue the MBX Cmd and wait for the completion of the
+ * command.
+ *
+ * returns
+ *	Success: 0
+ *	Failure: Non-Xero Value
+ **/
+int beiscsi_if_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag)
 {
 {
-	struct be_cmd_get_def_gateway_req *req;
-	struct be_dma_mem nonemb_cmd;
 	int rc;
 	int rc;
+	unsigned int tag;
 
 
-	rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
-				 OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
-				 sizeof(*gateway));
-	if (rc)
-		return rc;
-
-	req = nonemb_cmd.va;
-	req->ip_type = ip_type;
+	tag = be_cmd_set_vlan(phba, vlan_tag);
+	if (!tag) {
+		beiscsi_log(phba, KERN_ERR,
+			    (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
+			    "BG_%d : VLAN Setting Failed\n");
+		return -EBUSY;
+	}
 
 
-	return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, gateway,
-				    sizeof(*gateway));
+	rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
+	if (rc) {
+		beiscsi_log(phba, KERN_ERR,
+			    (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
+			    "BS_%d : VLAN MBX Cmd Failed\n");
+		return rc;
+	}
+	return rc;
 }
 }
 
 
-int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
-		     struct be_cmd_get_if_info_resp **if_info)
+
+int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
+			struct be_cmd_get_if_info_resp **if_info)
 {
 {
 	struct be_cmd_get_if_info_req *req;
 	struct be_cmd_get_if_info_req *req;
 	struct be_dma_mem nonemb_cmd;
 	struct be_dma_mem nonemb_cmd;
 	uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
 	uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
 	int rc;
 	int rc;
 
 
-	rc = mgmt_get_all_if_id(phba);
+	rc = beiscsi_if_get_handle(phba);
 	if (rc)
 	if (rc)
 		return rc;
 		return rc;
 
 
@@ -1364,123 +877,317 @@ unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
 	return tag;
 	return tag;
 }
 }
 
 
+static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
+				       unsigned int tag)
+{
+	struct be_cmd_get_boot_target_resp *boot_resp;
+	struct be_cmd_resp_logout_fw_sess *logo_resp;
+	struct be_cmd_get_session_resp *sess_resp;
+	struct be_mcc_wrb *wrb;
+	struct boot_struct *bs;
+	int boot_work, status;
+
+	if (!test_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) {
+		__beiscsi_log(phba, KERN_ERR,
+			      "BG_%d : %s no boot work %lx\n",
+			      __func__, phba->state);
+		return;
+	}
+
+	if (phba->boot_struct.tag != tag) {
+		__beiscsi_log(phba, KERN_ERR,
+			      "BG_%d : %s tag mismatch %d:%d\n",
+			      __func__, tag, phba->boot_struct.tag);
+		return;
+	}
+	bs = &phba->boot_struct;
+	boot_work = 1;
+	status = 0;
+	switch (bs->action) {
+	case BEISCSI_BOOT_REOPEN_SESS:
+		status = __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
+		if (!status)
+			bs->action = BEISCSI_BOOT_GET_SHANDLE;
+		else
+			bs->retry--;
+		break;
+	case BEISCSI_BOOT_GET_SHANDLE:
+		status = __beiscsi_mcc_compl_status(phba, tag, &wrb, NULL);
+		if (!status) {
+			boot_resp = embedded_payload(wrb);
+			bs->s_handle = boot_resp->boot_session_handle;
+		}
+		if (bs->s_handle == BE_BOOT_INVALID_SHANDLE) {
+			bs->action = BEISCSI_BOOT_REOPEN_SESS;
+			bs->retry--;
+		} else {
+			bs->action = BEISCSI_BOOT_GET_SINFO;
+		}
+		break;
+	case BEISCSI_BOOT_GET_SINFO:
+		status = __beiscsi_mcc_compl_status(phba, tag, NULL,
+						    &bs->nonemb_cmd);
+		if (!status) {
+			sess_resp = bs->nonemb_cmd.va;
+			memcpy(&bs->boot_sess, &sess_resp->session_info,
+			       sizeof(struct mgmt_session_info));
+			bs->action = BEISCSI_BOOT_LOGOUT_SESS;
+		} else {
+			__beiscsi_log(phba, KERN_ERR,
+				      "BG_%d : get boot session info error : 0x%x\n",
+				      status);
+			boot_work = 0;
+		}
+		pci_free_consistent(phba->ctrl.pdev, bs->nonemb_cmd.size,
+				    bs->nonemb_cmd.va, bs->nonemb_cmd.dma);
+		bs->nonemb_cmd.va = NULL;
+		break;
+	case BEISCSI_BOOT_LOGOUT_SESS:
+		status = __beiscsi_mcc_compl_status(phba, tag, &wrb, NULL);
+		if (!status) {
+			logo_resp = embedded_payload(wrb);
+			if (logo_resp->session_status != BE_SESS_STATUS_CLOSE) {
+				__beiscsi_log(phba, KERN_ERR,
+					      "BG_%d : FW boot session logout error : 0x%x\n",
+					      logo_resp->session_status);
+			}
+		}
+		/* continue to create boot_kset even if logout failed? */
+		bs->action = BEISCSI_BOOT_CREATE_KSET;
+		break;
+	default:
+		break;
+	}
+
+	/* clear the tag so no other completion matches this tag */
+	bs->tag = 0;
+	if (!bs->retry) {
+		boot_work = 0;
+		__beiscsi_log(phba, KERN_ERR,
+			      "BG_%d : failed to setup boot target: status %d action %d\n",
+			      status, bs->action);
+	}
+	if (!boot_work) {
+		/* wait for next event to start boot_work */
+		clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
+		return;
+	}
+	schedule_work(&phba->boot_work);
+}
+
 /**
 /**
- * be_mgmt_get_boot_shandle()- Get the session handle
- * @phba: device priv structure instance
- * @s_handle: session handle returned for boot session.
+ * beiscsi_boot_logout_sess()- Logout from boot FW session
+ * @phba: Device priv structure instance
+ *
+ * return
+ *	the TAG used for MBOX Command
  *
  *
- * Get the boot target session handle. In case of
- * crashdump mode driver has to issue and MBX Cmd
- * for FW to login to boot target
+ */
+unsigned int beiscsi_boot_logout_sess(struct beiscsi_hba *phba)
+{
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_logout_fw_sess *req;
+	unsigned int tag;
+
+	mutex_lock(&ctrl->mbox_lock);
+	wrb = alloc_mcc_wrb(phba, &tag);
+	if (!wrb) {
+		mutex_unlock(&ctrl->mbox_lock);
+		return 0;
+	}
+
+	req = embedded_payload(wrb);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+			   OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
+			   sizeof(struct be_cmd_req_logout_fw_sess));
+	/* Use the session handle copied into boot_sess */
+	req->session_handle = phba->boot_struct.boot_sess.session_handle;
+
+	phba->boot_struct.tag = tag;
+	set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+	ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
+
+	be_mcc_notify(phba, tag);
+	mutex_unlock(&ctrl->mbox_lock);
+
+	return tag;
+}
+/**
+ * beiscsi_boot_reopen_sess()- Reopen boot session
+ * @phba: Device priv structure instance
  *
  *
  * return
  * return
- *	Success: 0
- *	Failure: Non-Zero value
+ *	the TAG used for MBOX Command
  *
  *
  **/
  **/
-int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
-			      unsigned int *s_handle)
+unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba)
 {
 {
-	struct be_cmd_get_boot_target_resp *boot_resp;
+	struct be_ctrl_info *ctrl = &phba->ctrl;
 	struct be_mcc_wrb *wrb;
 	struct be_mcc_wrb *wrb;
+	struct be_cmd_reopen_session_req *req;
 	unsigned int tag;
 	unsigned int tag;
-	uint8_t boot_retry = 3;
-	int rc;
 
 
-	do {
-		/* Get the Boot Target Session Handle and Count*/
-		tag = mgmt_get_boot_target(phba);
-		if (!tag) {
-			beiscsi_log(phba, KERN_ERR,
-				    BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
-				    "BG_%d : Getting Boot Target Info Failed\n");
-			return -EAGAIN;
-		}
+	mutex_lock(&ctrl->mbox_lock);
+	wrb = alloc_mcc_wrb(phba, &tag);
+	if (!wrb) {
+		mutex_unlock(&ctrl->mbox_lock);
+		return 0;
+	}
 
 
-		rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
-		if (rc) {
-			beiscsi_log(phba, KERN_ERR,
-				    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-				    "BG_%d : MBX CMD get_boot_target Failed\n");
-			return -EBUSY;
-		}
+	req = embedded_payload(wrb);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+			   OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
+			   sizeof(struct be_cmd_reopen_session_resp));
+	req->reopen_type = BE_REOPEN_BOOT_SESSIONS;
+	req->session_handle = BE_BOOT_INVALID_SHANDLE;
 
 
-		boot_resp = embedded_payload(wrb);
+	phba->boot_struct.tag = tag;
+	set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+	ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
 
 
-		/* Check if the there are any Boot targets configured */
-		if (!boot_resp->boot_session_count) {
-			beiscsi_log(phba, KERN_INFO,
-				    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-				    "BG_%d  ;No boot targets configured\n");
-			return -ENXIO;
-		}
+	be_mcc_notify(phba, tag);
+	mutex_unlock(&ctrl->mbox_lock);
+	return tag;
+}
 
 
-		/* FW returns the session handle of the boot session */
-		if (boot_resp->boot_session_handle != INVALID_SESS_HANDLE) {
-			*s_handle = boot_resp->boot_session_handle;
-			return 0;
-		}
 
 
-		/* Issue MBX Cmd to FW to login to the boot target */
-		tag = mgmt_reopen_session(phba, BE_REOPEN_BOOT_SESSIONS,
-					  INVALID_SESS_HANDLE);
-		if (!tag) {
-			beiscsi_log(phba, KERN_ERR,
-				    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-				    "BG_%d : mgmt_reopen_session Failed\n");
-			return -EAGAIN;
-		}
+/**
+ * beiscsi_boot_get_sinfo()- Get boot session info
+ * @phba: device priv structure instance
+ *
+ * Fetches the boot_struct.s_handle info from FW.
+ * return
+ *	the TAG used for MBOX Command
+ *
+ **/
+unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
+{
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+	struct be_cmd_get_session_resp *resp;
+	struct be_cmd_get_session_req *req;
+	struct be_dma_mem *nonemb_cmd;
+	struct be_mcc_wrb *wrb;
+	struct be_sge *sge;
+	unsigned int tag;
 
 
-		rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
-		if (rc) {
-			beiscsi_log(phba, KERN_ERR,
-				    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-				    "BG_%d : mgmt_reopen_session Failed");
-			return rc;
-		}
-	} while (--boot_retry);
+	mutex_lock(&ctrl->mbox_lock);
+	wrb = alloc_mcc_wrb(phba, &tag);
+	if (!wrb) {
+		mutex_unlock(&ctrl->mbox_lock);
+		return 0;
+	}
+
+	nonemb_cmd = &phba->boot_struct.nonemb_cmd;
+	nonemb_cmd->size = sizeof(*resp);
+	nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
+					      sizeof(nonemb_cmd->size),
+					      &nonemb_cmd->dma);
+	if (!nonemb_cmd->va) {
+		mutex_unlock(&ctrl->mbox_lock);
+		return 0;
+	}
+
+	req = nonemb_cmd->va;
+	memset(req, 0, sizeof(*req));
+	sge = nonembedded_sgl(wrb);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+			   OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
+			   sizeof(*resp));
+	req->session_handle = phba->boot_struct.s_handle;
+	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+	sge->len = cpu_to_le32(nonemb_cmd->size);
+
+	phba->boot_struct.tag = tag;
+	set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+	ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
+
+	be_mcc_notify(phba, tag);
+	mutex_unlock(&ctrl->mbox_lock);
+	return tag;
+}
+
+unsigned int __beiscsi_boot_get_shandle(struct beiscsi_hba *phba, int async)
+{
+	struct be_ctrl_info *ctrl = &phba->ctrl;
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_get_boot_target_req *req;
+	unsigned int tag;
+
+	mutex_lock(&ctrl->mbox_lock);
+	wrb = alloc_mcc_wrb(phba, &tag);
+	if (!wrb) {
+		mutex_unlock(&ctrl->mbox_lock);
+		return 0;
+	}
+
+	req = embedded_payload(wrb);
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
+			   OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
+			   sizeof(struct be_cmd_get_boot_target_resp));
 
 
-	/* Couldn't log into the boot target */
-	beiscsi_log(phba, KERN_ERR,
-		    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-		    "BG_%d : Login to Boot Target Failed\n");
-	return -ENXIO;
+	if (async) {
+		phba->boot_struct.tag = tag;
+		set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
+		ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
+	}
+
+	be_mcc_notify(phba, tag);
+	mutex_unlock(&ctrl->mbox_lock);
+	return tag;
 }
 }
 
 
 /**
 /**
- * mgmt_set_vlan()- Issue and wait for CMD completion
- * @phba: device private structure instance
- * @vlan_tag: VLAN tag
+ * beiscsi_boot_get_shandle()- Get boot session handle
+ * @phba: device priv structure instance
+ * @s_handle: session handle returned for boot session.
  *
  *
- * Issue the MBX Cmd and wait for the completion of the
- * command.
+ * return
+ *	Success: 1
+ *	Failure: negative
  *
  *
- * returns
- *	Success: 0
- *	Failure: Non-Xero Value
  **/
  **/
-int mgmt_set_vlan(struct beiscsi_hba *phba,
-		   uint16_t vlan_tag)
+int beiscsi_boot_get_shandle(struct beiscsi_hba *phba, unsigned int *s_handle)
 {
 {
-	int rc;
+	struct be_cmd_get_boot_target_resp *boot_resp;
+	struct be_mcc_wrb *wrb;
 	unsigned int tag;
 	unsigned int tag;
+	int rc;
 
 
-	tag = be_cmd_set_vlan(phba, vlan_tag);
+	*s_handle = BE_BOOT_INVALID_SHANDLE;
+	/* get configured boot session count and handle */
+	tag = __beiscsi_boot_get_shandle(phba, 0);
 	if (!tag) {
 	if (!tag) {
 		beiscsi_log(phba, KERN_ERR,
 		beiscsi_log(phba, KERN_ERR,
-			    (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
-			    "BG_%d : VLAN Setting Failed\n");
-		return -EBUSY;
+			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
+			    "BG_%d : Getting Boot Target Info Failed\n");
+		return -EAGAIN;
 	}
 	}
 
 
-	rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
+	rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
 	if (rc) {
 	if (rc) {
 		beiscsi_log(phba, KERN_ERR,
 		beiscsi_log(phba, KERN_ERR,
-			    (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
-			    "BS_%d : VLAN MBX Cmd Failed\n");
-		return rc;
+			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
+			    "BG_%d : MBX CMD get_boot_target Failed\n");
+		return -EBUSY;
 	}
 	}
-	return rc;
+
+	boot_resp = embedded_payload(wrb);
+	/* check if there are any boot targets configured */
+	if (!boot_resp->boot_session_count) {
+		__beiscsi_log(phba, KERN_INFO,
+			      "BG_%d : No boot targets configured\n");
+		return -ENXIO;
+	}
+
+	/* only if FW has logged in to the boot target, s_handle is valid */
+	*s_handle = boot_resp->boot_session_handle;
+	return 1;
 }
 }
 
 
 /**
 /**
@@ -1645,7 +1352,6 @@ void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
 {
 {
 	struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
 	struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
 
 
-	memset(pwrb, 0, sizeof(*pwrb));
 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
 		      max_send_data_segment_length, pwrb,
 		      max_send_data_segment_length, pwrb,
 		      params->dw[offsetof(struct amap_beiscsi_offload_params,
 		      params->dw[offsetof(struct amap_beiscsi_offload_params,
@@ -1717,8 +1423,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
 {
 {
 	struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
 	struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
 
 
-	memset(pwrb, 0, sizeof(*pwrb));
-
 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
 		      max_burst_length, pwrb, params->dw[offsetof
 		      max_burst_length, pwrb, params->dw[offsetof
 		      (struct amap_beiscsi_offload_params,
 		      (struct amap_beiscsi_offload_params,
@@ -1790,70 +1494,3 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
 		     (params->dw[offsetof(struct amap_beiscsi_offload_params,
 		     (params->dw[offsetof(struct amap_beiscsi_offload_params,
 		      exp_statsn) / 32] + 1));
 		      exp_statsn) / 32] + 1));
 }
 }
-
-/**
- * beiscsi_logout_fw_sess()- Firmware Session Logout
- * @phba: Device priv structure instance
- * @fw_sess_handle: FW session handle
- *
- * Logout from the FW established sessions.
- * returns
- *  Success: 0
- *  Failure: Non-Zero Value
- *
- */
-int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
-		uint32_t fw_sess_handle)
-{
-	struct be_ctrl_info *ctrl = &phba->ctrl;
-	struct be_mcc_wrb *wrb;
-	struct be_cmd_req_logout_fw_sess *req;
-	struct be_cmd_resp_logout_fw_sess *resp;
-	unsigned int tag;
-	int rc;
-
-	beiscsi_log(phba, KERN_INFO,
-		    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-		    "BG_%d : In bescsi_logout_fwboot_sess\n");
-
-	mutex_lock(&ctrl->mbox_lock);
-	wrb = alloc_mcc_wrb(phba, &tag);
-	if (!wrb) {
-		mutex_unlock(&ctrl->mbox_lock);
-		beiscsi_log(phba, KERN_INFO,
-			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
-			    "BG_%d : MBX Tag Failure\n");
-		return -EINVAL;
-	}
-
-	req = embedded_payload(wrb);
-	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
-	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
-			   OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
-			   sizeof(struct be_cmd_req_logout_fw_sess));
-
-	/* Set the session handle */
-	req->session_handle = fw_sess_handle;
-	be_mcc_notify(phba, tag);
-	mutex_unlock(&ctrl->mbox_lock);
-
-	rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
-	if (rc) {
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-			    "BG_%d : MBX CMD FW_SESSION_LOGOUT_TARGET Failed\n");
-		return -EBUSY;
-	}
-
-	resp = embedded_payload(wrb);
-	if (resp->session_status !=
-		BEISCSI_MGMT_SESSION_CLOSE) {
-		beiscsi_log(phba, KERN_ERR,
-			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
-			    "BG_%d : FW_SESSION_LOGOUT_TARGET resp : 0x%x\n",
-			    resp->session_status);
-		rc = -EINVAL;
-	}
-
-	return rc;
-}

+ 20 - 31
drivers/scsi/be2iscsi/be_mgmt.h

@@ -1,5 +1,5 @@
 /**
 /**
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
  * All rights reserved.
  * All rights reserved.
  *
  *
  * This program is free software; you can redistribute it and/or
  * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
  * as published by the Free Software Foundation.  The full GNU General
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  * Public License is included in this distribution in the file called COPYING.
  *
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
  *
  *
  * Contact Information:
  * Contact Information:
- * linux-drivers@avagotech.com
+ * linux-drivers@broadcom.com
  *
  *
  * Emulex
  * Emulex
  * 3333 Susan Street
  * 3333 Susan Street
@@ -96,7 +96,6 @@ struct mcc_wrb {
 	struct mcc_wrb_payload payload;
 	struct mcc_wrb_payload payload;
 };
 };
 
 
-int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute);
 int mgmt_open_connection(struct beiscsi_hba *phba,
 int mgmt_open_connection(struct beiscsi_hba *phba,
 			 struct sockaddr *dst_addr,
 			 struct sockaddr *dst_addr,
 			 struct beiscsi_endpoint *beiscsi_ep,
 			 struct beiscsi_endpoint *beiscsi_ep,
@@ -266,50 +265,41 @@ struct beiscsi_endpoint {
 	u16 cid_vld;
 	u16 cid_vld;
 };
 };
 
 
-int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
-				 struct beiscsi_hba *phba);
-int mgmt_get_port_name(struct be_ctrl_info *ctrl,
-		       struct beiscsi_hba *phba);
-
 unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
 unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
 					 struct beiscsi_endpoint *beiscsi_ep,
 					 struct beiscsi_endpoint *beiscsi_ep,
 					 unsigned short cid,
 					 unsigned short cid,
 					 unsigned short issue_reset,
 					 unsigned short issue_reset,
 					 unsigned short savecfg_flag);
 					 unsigned short savecfg_flag);
 
 
-int mgmt_set_ip(struct beiscsi_hba *phba,
-		struct iscsi_iface_param_info *ip_param,
-		struct iscsi_iface_param_info *subnet_param,
-		uint32_t boot_proto);
+int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type);
 
 
-unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba);
+int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
+			 u8 *ip, u8 *subnet);
 
 
-unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
-				  unsigned int reopen_type,
-				  unsigned sess_handle);
+int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw);
 
 
-unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
-				   u32 boot_session_handle,
-				   struct be_dma_mem *nonemb_cmd);
+int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
+		      struct be_cmd_get_def_gateway_resp *resp);
 
 
 int mgmt_get_nic_conf(struct beiscsi_hba *phba,
 int mgmt_get_nic_conf(struct beiscsi_hba *phba,
 		      struct be_cmd_get_nic_conf_resp *mac);
 		      struct be_cmd_get_nic_conf_resp *mac);
 
 
-int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type,
-		     struct be_cmd_get_if_info_resp **if_info);
+int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
+			struct be_cmd_get_if_info_resp **if_info);
+
+unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba);
+
+int beiscsi_if_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
 
 
-int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,
-		     struct be_cmd_get_def_gateway_resp *gateway);
+unsigned int beiscsi_boot_logout_sess(struct beiscsi_hba *phba);
 
 
-int mgmt_set_gateway(struct beiscsi_hba *phba,
-		     struct iscsi_iface_param_info *gateway_param);
+unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba);
 
 
-int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
-			      unsigned int *s_handle);
+unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba);
 
 
-unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba);
+unsigned int __beiscsi_boot_get_shandle(struct beiscsi_hba *phba, int async);
 
 
-int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
+int beiscsi_boot_get_shandle(struct beiscsi_hba *phba, unsigned int *s_handle);
 
 
 ssize_t beiscsi_drvr_ver_disp(struct device *dev,
 ssize_t beiscsi_drvr_ver_disp(struct device *dev,
 			       struct device_attribute *attr, char *buf);
 			       struct device_attribute *attr, char *buf);
@@ -339,7 +329,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
 			     struct wrb_handle *pwrb_handle,
 			     struct wrb_handle *pwrb_handle,
 			     struct hwi_wrb_context *pwrb_context);
 			     struct hwi_wrb_context *pwrb_context);
 
 
-void beiscsi_ue_detect(struct beiscsi_hba *phba);
 int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
 int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
 			 struct be_set_eqd *, int num);
 			 struct be_set_eqd *, int num);
 
 

+ 2 - 2
drivers/scsi/bfa/bfa_fcs_lport.c

@@ -5827,13 +5827,13 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
 	bfa_port_speed_t max_speed = 0;
 	bfa_port_speed_t max_speed = 0;
 	struct bfa_port_attr_s port_attr;
 	struct bfa_port_attr_s port_attr;
 	bfa_port_speed_t port_speed, rport_speed;
 	bfa_port_speed_t port_speed, rport_speed;
-	bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
-
+	bfa_boolean_t trl_enabled;
 
 
 	if (port == NULL)
 	if (port == NULL)
 		return 0;
 		return 0;
 
 
 	fcs = port->fcs;
 	fcs = port->fcs;
+	trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
 
 
 	/* Get Physical port's current speed */
 	/* Get Physical port's current speed */
 	bfa_fcport_get_attr(port->fcs->bfa, &port_attr);
 	bfa_fcport_get_attr(port->fcs->bfa, &port_attr);

+ 2 - 2
drivers/scsi/bnx2fc/bnx2fc_els.c

@@ -254,7 +254,7 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
 	return rc;
 	return rc;
 }
 }
 
 
-void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
+static void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
 {
 {
 	struct bnx2fc_mp_req *mp_req;
 	struct bnx2fc_mp_req *mp_req;
 	struct fc_frame_header *fc_hdr, *fh;
 	struct fc_frame_header *fc_hdr, *fh;
@@ -364,7 +364,7 @@ srr_compl_done:
 	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
 	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
 }
 }
 
 
-void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
+static void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
 {
 {
 	struct bnx2fc_cmd *orig_io_req, *new_io_req;
 	struct bnx2fc_cmd *orig_io_req, *new_io_req;
 	struct bnx2fc_cmd *rec_req;
 	struct bnx2fc_cmd *rec_req;

+ 6 - 6
drivers/scsi/bnx2fc/bnx2fc_fcoe.c

@@ -625,7 +625,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
  *
  *
  * @arg:	ptr to bnx2fc_percpu_info structure
  * @arg:	ptr to bnx2fc_percpu_info structure
  */
  */
-int bnx2fc_percpu_io_thread(void *arg)
+static int bnx2fc_percpu_io_thread(void *arg)
 {
 {
 	struct bnx2fc_percpu_s *p = arg;
 	struct bnx2fc_percpu_s *p = arg;
 	struct bnx2fc_work *work, *tmp;
 	struct bnx2fc_work *work, *tmp;
@@ -1410,9 +1410,10 @@ bind_err:
 	return NULL;
 	return NULL;
 }
 }
 
 
-struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
-				      struct net_device *netdev,
-				      enum fip_state fip_mode)
+static struct bnx2fc_interface *
+bnx2fc_interface_create(struct bnx2fc_hba *hba,
+			struct net_device *netdev,
+			enum fip_state fip_mode)
 {
 {
 	struct fcoe_ctlr_device *ctlr_dev;
 	struct fcoe_ctlr_device *ctlr_dev;
 	struct bnx2fc_interface *interface;
 	struct bnx2fc_interface *interface;
@@ -2765,8 +2766,7 @@ static void __exit bnx2fc_mod_exit(void)
 	 * held.
 	 * held.
 	 */
 	 */
 	mutex_lock(&bnx2fc_dev_lock);
 	mutex_lock(&bnx2fc_dev_lock);
-	list_splice(&adapter_list, &to_be_deleted);
-	INIT_LIST_HEAD(&adapter_list);
+	list_splice_init(&adapter_list, &to_be_deleted);
 	adapter_count = 0;
 	adapter_count = 0;
 	mutex_unlock(&bnx2fc_dev_lock);
 	mutex_unlock(&bnx2fc_dev_lock);
 
 

+ 1 - 1
drivers/scsi/bnx2fc/bnx2fc_hwi.c

@@ -994,7 +994,7 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
 
 
 }
 }
 
 
-struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
+static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
 {
 {
 	struct bnx2fc_work *work;
 	struct bnx2fc_work *work;
 	work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
 	work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);

+ 1 - 1
drivers/scsi/bnx2fc/bnx2fc_io.c

@@ -1079,7 +1079,7 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
 	return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
 	return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
 }
 }
 
 
-int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
+static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
 {
 {
 	struct bnx2fc_rport *tgt = io_req->tgt;
 	struct bnx2fc_rport *tgt = io_req->tgt;
 	int rc = SUCCESS;
 	int rc = SUCCESS;

+ 2 - 3
drivers/scsi/csiostor/csio_scsi.c

@@ -1721,7 +1721,7 @@ out:
 
 
 	/* Wake up waiting threads */
 	/* Wake up waiting threads */
 	csio_scsi_cmnd(req) = NULL;
 	csio_scsi_cmnd(req) = NULL;
-	complete_all(&req->cmplobj);
+	complete(&req->cmplobj);
 }
 }
 
 
 /*
 /*
@@ -1945,6 +1945,7 @@ csio_eh_abort_handler(struct scsi_cmnd *cmnd)
 	ready = csio_is_lnode_ready(ln);
 	ready = csio_is_lnode_ready(ln);
 	tmo = CSIO_SCSI_ABRT_TMO_MS;
 	tmo = CSIO_SCSI_ABRT_TMO_MS;
 
 
+	reinit_completion(&ioreq->cmplobj);
 	spin_lock_irq(&hw->lock);
 	spin_lock_irq(&hw->lock);
 	rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
 	rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
 	spin_unlock_irq(&hw->lock);
 	spin_unlock_irq(&hw->lock);
@@ -1964,8 +1965,6 @@ csio_eh_abort_handler(struct scsi_cmnd *cmnd)
 		goto inval_scmnd;
 		goto inval_scmnd;
 	}
 	}
 
 
-	/* Wait for completion */
-	init_completion(&ioreq->cmplobj);
 	wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
 	wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
 
 
 	/* FW didnt respond to abort within our timeout */
 	/* FW didnt respond to abort within our timeout */

+ 35 - 46
drivers/scsi/cxlflash/main.c

@@ -822,17 +822,6 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
 	}
 	}
 }
 }
 
 
-/**
- * cxlflash_shutdown() - shutdown handler
- * @pdev:	PCI device associated with the host.
- */
-static void cxlflash_shutdown(struct pci_dev *pdev)
-{
-	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
-
-	notify_shutdown(cfg, false);
-}
-
 /**
 /**
  * cxlflash_remove() - PCI entry point to tear down host
  * cxlflash_remove() - PCI entry point to tear down host
  * @pdev:	PCI device associated with the host.
  * @pdev:	PCI device associated with the host.
@@ -844,6 +833,11 @@ static void cxlflash_remove(struct pci_dev *pdev)
 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
 	ulong lock_flags;
 	ulong lock_flags;
 
 
+	if (!pci_is_enabled(pdev)) {
+		pr_debug("%s: Device is disabled\n", __func__);
+		return;
+	}
+
 	/* If a Task Management Function is active, wait for it to complete
 	/* If a Task Management Function is active, wait for it to complete
 	 * before continuing with remove.
 	 * before continuing with remove.
 	 */
 	 */
@@ -1046,6 +1040,8 @@ static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
 	do {
 	do {
 		msleep(delay_us / 1000);
 		msleep(delay_us / 1000);
 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+		if (status == U64_MAX)
+			nretry /= 2;
 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
 		 nretry--);
 		 nretry--);
 
 
@@ -1077,6 +1073,8 @@ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
 	do {
 	do {
 		msleep(delay_us / 1000);
 		msleep(delay_us / 1000);
 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+		if (status == U64_MAX)
+			nretry /= 2;
 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
 		 nretry--);
 		 nretry--);
 
 
@@ -1095,42 +1093,25 @@ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
  * online. This toggling action can cause this routine to delay up to a few
  * online. This toggling action can cause this routine to delay up to a few
  * seconds. When configured to use the internal LUN feature of the AFU, a
  * seconds. When configured to use the internal LUN feature of the AFU, a
  * failure to come online is overridden.
  * failure to come online is overridden.
- *
- * Return:
- *	0 when the WWPN is successfully written and the port comes back online
- *	-1 when the port fails to go offline or come back up online
  */
  */
-static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
-			u64 wwpn)
+static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
+			 u64 wwpn)
 {
 {
-	int rc = 0;
-
 	set_port_offline(fc_regs);
 	set_port_offline(fc_regs);
-
 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
 			       FC_PORT_STATUS_RETRY_CNT)) {
 			       FC_PORT_STATUS_RETRY_CNT)) {
 		pr_debug("%s: wait on port %d to go offline timed out\n",
 		pr_debug("%s: wait on port %d to go offline timed out\n",
 			 __func__, port);
 			 __func__, port);
-		rc = -1; /* but continue on to leave the port back online */
 	}
 	}
 
 
-	if (rc == 0)
-		writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
-
-	/* Always return success after programming WWPN */
-	rc = 0;
+	writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
 
 
 	set_port_online(fc_regs);
 	set_port_online(fc_regs);
-
 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
 			      FC_PORT_STATUS_RETRY_CNT)) {
 			      FC_PORT_STATUS_RETRY_CNT)) {
-		pr_err("%s: wait on port %d to go online timed out\n",
-		       __func__, port);
+		pr_debug("%s: wait on port %d to go online timed out\n",
+			 __func__, port);
 	}
 	}
-
-	pr_debug("%s: returning rc=%d\n", __func__, rc);
-
-	return rc;
 }
 }
 
 
 /**
 /**
@@ -1187,7 +1168,7 @@ static const struct asyc_intr_info ainfo[] = {
 	{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
 	{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
 	{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
 	{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
 	{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
 	{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
-	{SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST},
+	{SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
 	{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
 	{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
 	{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
 	{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
 	{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
 	{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
@@ -1195,7 +1176,7 @@ static const struct asyc_intr_info ainfo[] = {
 	{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
 	{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
 	{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
 	{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
 	{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
 	{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
-	{SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST},
+	{SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
 	{0x0, "", 0, 0}		/* terminator */
 	{0x0, "", 0, 0}		/* terminator */
 };
 };
 
 
@@ -1631,15 +1612,10 @@ static int init_global(struct cxlflash_cfg *cfg)
 			  [FC_CRC_THRESH / 8]);
 			  [FC_CRC_THRESH / 8]);
 
 
 		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
 		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
-		if (wwpn[i] != 0 &&
-		    afu_set_wwpn(afu, i,
-				 &afu->afu_map->global.fc_regs[i][0],
-				 wwpn[i])) {
-			dev_err(dev, "%s: failed to set WWPN on port %d\n",
-			       __func__, i);
-			rc = -EIO;
-			goto out;
-		}
+		if (wwpn[i] != 0)
+			afu_set_wwpn(afu, i,
+				     &afu->afu_map->global.fc_regs[i][0],
+				     wwpn[i]);
 		/* Programming WWPN back to back causes additional
 		/* Programming WWPN back to back causes additional
 		 * offline/online transitions and a PLOGI
 		 * offline/online transitions and a PLOGI
 		 */
 		 */
@@ -2048,6 +2024,11 @@ retry:
  * cxlflash_eh_host_reset_handler() - reset the host adapter
  * cxlflash_eh_host_reset_handler() - reset the host adapter
  * @scp:	SCSI command from stack identifying host.
  * @scp:	SCSI command from stack identifying host.
  *
  *
+ * Following a reset, the state is evaluated again in case an EEH occurred
+ * during the reset. In such a scenario, the host reset will either yield
+ * until the EEH recovery is complete or return success or failure based
+ * upon the current device state.
+ *
  * Return:
  * Return:
  *	SUCCESS as defined in scsi/scsi.h
  *	SUCCESS as defined in scsi/scsi.h
  *	FAILED as defined in scsi/scsi.h
  *	FAILED as defined in scsi/scsi.h
@@ -2080,7 +2061,8 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
 		} else
 		} else
 			cfg->state = STATE_NORMAL;
 			cfg->state = STATE_NORMAL;
 		wake_up_all(&cfg->reset_waitq);
 		wake_up_all(&cfg->reset_waitq);
-		break;
+		ssleep(1);
+		/* fall through */
 	case STATE_RESET:
 	case STATE_RESET:
 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
 		if (cfg->state == STATE_NORMAL)
 		if (cfg->state == STATE_NORMAL)
@@ -2596,6 +2578,9 @@ out_remove:
  * @pdev:	PCI device struct.
  * @pdev:	PCI device struct.
  * @state:	PCI channel state.
  * @state:	PCI channel state.
  *
  *
+ * When an EEH occurs during an active reset, wait until the reset is
+ * complete and then take action based upon the device state.
+ *
  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
  */
  */
 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
 static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
@@ -2609,6 +2594,10 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
 
 
 	switch (state) {
 	switch (state) {
 	case pci_channel_io_frozen:
 	case pci_channel_io_frozen:
+		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
+		if (cfg->state == STATE_FAILTERM)
+			return PCI_ERS_RESULT_DISCONNECT;
+
 		cfg->state = STATE_RESET;
 		cfg->state = STATE_RESET;
 		scsi_block_requests(cfg->host);
 		scsi_block_requests(cfg->host);
 		drain_ioctls(cfg);
 		drain_ioctls(cfg);
@@ -2685,7 +2674,7 @@ static struct pci_driver cxlflash_driver = {
 	.id_table = cxlflash_pci_table,
 	.id_table = cxlflash_pci_table,
 	.probe = cxlflash_probe,
 	.probe = cxlflash_probe,
 	.remove = cxlflash_remove,
 	.remove = cxlflash_remove,
-	.shutdown = cxlflash_shutdown,
+	.shutdown = cxlflash_remove,
 	.err_handler = &cxlflash_err_handler,
 	.err_handler = &cxlflash_err_handler,
 };
 };
 
 

+ 78 - 102
drivers/scsi/cxlflash/superpipe.c

@@ -709,14 +709,13 @@ int cxlflash_disk_release(struct scsi_device *sdev,
  * @cfg:	Internal structure associated with the host.
  * @cfg:	Internal structure associated with the host.
  * @ctxi:	Context to release.
  * @ctxi:	Context to release.
  *
  *
- * This routine is safe to be called with a a non-initialized context
- * and is tolerant of being called with the context's mutex held (it
- * will be unlocked if necessary before freeing). Also note that the
- * routine conditionally checks for the existence of the context control
- * map before clearing the RHT registers and context capabilities because
- * it is possible to destroy a context while the context is in the error
- * state (previous mapping was removed [so there is no need to worry about
- * clearing] and context is waiting for a new mapping).
+ * This routine is safe to be called with a a non-initialized context.
+ * Also note that the routine conditionally checks for the existence
+ * of the context control map before clearing the RHT registers and
+ * context capabilities because it is possible to destroy a context
+ * while the context is in the error state (previous mapping was
+ * removed [so there is no need to worry about clearing] and context
+ * is waiting for a new mapping).
  */
  */
 static void destroy_context(struct cxlflash_cfg *cfg,
 static void destroy_context(struct cxlflash_cfg *cfg,
 			    struct ctx_info *ctxi)
 			    struct ctx_info *ctxi)
@@ -732,9 +731,6 @@ static void destroy_context(struct cxlflash_cfg *cfg,
 			writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
 			writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
 			writeq_be(0, &ctxi->ctrl_map->ctx_cap);
 			writeq_be(0, &ctxi->ctrl_map->ctx_cap);
 		}
 		}
-
-		if (mutex_is_locked(&ctxi->mutex))
-			mutex_unlock(&ctxi->mutex);
 	}
 	}
 
 
 	/* Free memory associated with context */
 	/* Free memory associated with context */
@@ -792,32 +788,58 @@ err:
  * @cfg:	Internal structure associated with the host.
  * @cfg:	Internal structure associated with the host.
  * @ctx:	Previously obtained CXL context reference.
  * @ctx:	Previously obtained CXL context reference.
  * @ctxid:	Previously obtained process element associated with CXL context.
  * @ctxid:	Previously obtained process element associated with CXL context.
- * @adap_fd:	Previously obtained adapter fd associated with CXL context.
  * @file:	Previously obtained file associated with CXL context.
  * @file:	Previously obtained file associated with CXL context.
  * @perms:	User-specified permissions.
  * @perms:	User-specified permissions.
- *
- * Upon return, the context is marked as initialized and the context's mutex
- * is locked.
  */
  */
 static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
 static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
-			 struct cxl_context *ctx, int ctxid, int adap_fd,
-			 struct file *file, u32 perms)
+			 struct cxl_context *ctx, int ctxid, struct file *file,
+			 u32 perms)
 {
 {
 	struct afu *afu = cfg->afu;
 	struct afu *afu = cfg->afu;
 
 
 	ctxi->rht_perms = perms;
 	ctxi->rht_perms = perms;
 	ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
 	ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
 	ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
 	ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
-	ctxi->lfd = adap_fd;
 	ctxi->pid = current->tgid; /* tgid = pid */
 	ctxi->pid = current->tgid; /* tgid = pid */
 	ctxi->ctx = ctx;
 	ctxi->ctx = ctx;
+	ctxi->cfg = cfg;
 	ctxi->file = file;
 	ctxi->file = file;
 	ctxi->initialized = true;
 	ctxi->initialized = true;
 	mutex_init(&ctxi->mutex);
 	mutex_init(&ctxi->mutex);
+	kref_init(&ctxi->kref);
 	INIT_LIST_HEAD(&ctxi->luns);
 	INIT_LIST_HEAD(&ctxi->luns);
 	INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
 	INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
+}
 
 
+/**
+ * remove_context() - context kref release handler
+ * @kref:	Kernel reference associated with context to be removed.
+ *
+ * When a context no longer has any references it can safely be removed
+ * from global access and destroyed. Note that it is assumed the thread
+ * relinquishing access to the context holds its mutex.
+ */
+static void remove_context(struct kref *kref)
+{
+	struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
+	struct cxlflash_cfg *cfg = ctxi->cfg;
+	u64 ctxid = DECODE_CTXID(ctxi->ctxid);
+
+	/* Remove context from table/error list */
+	WARN_ON(!mutex_is_locked(&ctxi->mutex));
+	ctxi->unavail = true;
+	mutex_unlock(&ctxi->mutex);
+	mutex_lock(&cfg->ctx_tbl_list_mutex);
 	mutex_lock(&ctxi->mutex);
 	mutex_lock(&ctxi->mutex);
+
+	if (!list_empty(&ctxi->list))
+		list_del(&ctxi->list);
+	cfg->ctx_tbl[ctxid] = NULL;
+	mutex_unlock(&cfg->ctx_tbl_list_mutex);
+	mutex_unlock(&ctxi->mutex);
+
+	/* Context now completely uncoupled/unreachable */
+	destroy_context(cfg, ctxi);
 }
 }
 
 
 /**
 /**
@@ -845,7 +867,6 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
 
 
 	int i;
 	int i;
 	int rc = 0;
 	int rc = 0;
-	int lfd;
 	u64 ctxid = DECODE_CTXID(detach->context_id),
 	u64 ctxid = DECODE_CTXID(detach->context_id),
 	    rctxid = detach->context_id;
 	    rctxid = detach->context_id;
 
 
@@ -887,40 +908,13 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
 			break;
 			break;
 		}
 		}
 
 
-	/* Tear down context following last LUN cleanup */
-	if (list_empty(&ctxi->luns)) {
-		ctxi->unavail = true;
-		mutex_unlock(&ctxi->mutex);
-		mutex_lock(&cfg->ctx_tbl_list_mutex);
-		mutex_lock(&ctxi->mutex);
-
-		/* Might not have been in error list so conditionally remove */
-		if (!list_empty(&ctxi->list))
-			list_del(&ctxi->list);
-		cfg->ctx_tbl[ctxid] = NULL;
-		mutex_unlock(&cfg->ctx_tbl_list_mutex);
-		mutex_unlock(&ctxi->mutex);
-
-		lfd = ctxi->lfd;
-		destroy_context(cfg, ctxi);
-		ctxi = NULL;
+	/*
+	 * Release the context reference and the sdev reference that
+	 * bound this LUN to the context.
+	 */
+	if (kref_put(&ctxi->kref, remove_context))
 		put_ctx = false;
 		put_ctx = false;
-
-		/*
-		 * As a last step, clean up external resources when not
-		 * already on an external cleanup thread, i.e.: close(adap_fd).
-		 *
-		 * NOTE: this will free up the context from the CXL services,
-		 * allowing it to dole out the same context_id on a future
-		 * (or even currently in-flight) disk_attach operation.
-		 */
-		if (lfd != -1)
-			sys_close(lfd);
-	}
-
-	/* Release the sdev reference that bound this LUN to the context */
 	scsi_device_put(sdev);
 	scsi_device_put(sdev);
-
 out:
 out:
 	if (put_ctx)
 	if (put_ctx)
 		put_context(ctxi);
 		put_context(ctxi);
@@ -941,34 +935,18 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
  *
  *
  * This routine is the release handler for the fops registered with
  * This routine is the release handler for the fops registered with
  * the CXL services on an initial attach for a context. It is called
  * the CXL services on an initial attach for a context. It is called
- * when a close is performed on the adapter file descriptor returned
- * to the user. Programmatically, the user is not required to perform
- * the close, as it is handled internally via the detach ioctl when
- * a context is being removed. Note that nothing prevents the user
- * from performing a close, but the user should be aware that doing
- * so is considered catastrophic and subsequent usage of the superpipe
- * API with previously saved off tokens will fail.
- *
- * When initiated from an external close (either by the user or via
- * a process tear down), the routine derives the context reference
- * and calls detach for each LUN associated with the context. The
- * final detach operation will cause the context itself to be freed.
- * Note that the saved off lfd is reset prior to calling detach to
- * signify that the final detach should not perform a close.
- *
- * When initiated from a detach operation as part of the tear down
- * of a context, the context is first completely freed and then the
- * close is performed. This routine will fail to derive the context
- * reference (due to the context having already been freed) and then
- * call into the CXL release entry point.
+ * when a close (explicity by the user or as part of a process tear
+ * down) is performed on the adapter file descriptor returned to the
+ * user. The user should be aware that explicitly performing a close
+ * considered catastrophic and subsequent usage of the superpipe API
+ * with previously saved off tokens will fail.
  *
  *
- * Thus, with exception to when the CXL process element (context id)
- * lookup fails (a case that should theoretically never occur), every
- * call into this routine results in a complete freeing of a context.
- *
- * As part of the detach, all per-context resources associated with the LUN
- * are cleaned up. When detaching the last LUN for a context, the context
- * itself is cleaned up and released.
+ * This routine derives the context reference and calls detach for
+ * each LUN associated with the context.The final detach operation
+ * causes the context itself to be freed. With exception to when the
+ * CXL process element (context id) lookup fails (a case that should
+ * theoretically never occur), every call into this routine results
+ * in a complete freeing of a context.
  *
  *
  * Return: 0 on success
  * Return: 0 on success
  */
  */
@@ -1006,11 +984,8 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
 		goto out;
 		goto out;
 	}
 	}
 
 
-	dev_dbg(dev, "%s: close(%d) for context %d\n",
-		__func__, ctxi->lfd, ctxid);
+	dev_dbg(dev, "%s: close for context %d\n", __func__, ctxid);
 
 
-	/* Reset the file descriptor to indicate we're on a close() thread */
-	ctxi->lfd = -1;
 	detach.context_id = ctxi->ctxid;
 	detach.context_id = ctxi->ctxid;
 	list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
 	list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
 		_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
 		_cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
@@ -1110,8 +1085,7 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		goto err;
 		goto err;
 	}
 	}
 
 
-	dev_dbg(dev, "%s: fault(%d) for context %d\n",
-		__func__, ctxi->lfd, ctxid);
+	dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
 
 
 	if (likely(!ctxi->err_recovery_active)) {
 	if (likely(!ctxi->err_recovery_active)) {
 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -1186,8 +1160,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
 		goto out;
 		goto out;
 	}
 	}
 
 
-	dev_dbg(dev, "%s: mmap(%d) for context %d\n",
-		__func__, ctxi->lfd, ctxid);
+	dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
 
 
 	rc = cxl_fd_mmap(file, vma);
 	rc = cxl_fd_mmap(file, vma);
 	if (likely(!rc)) {
 	if (likely(!rc)) {
@@ -1377,12 +1350,12 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 	lun_access->lli = lli;
 	lun_access->lli = lli;
 	lun_access->sdev = sdev;
 	lun_access->sdev = sdev;
 
 
-	/* Non-NULL context indicates reuse */
+	/* Non-NULL context indicates reuse (another context reference) */
 	if (ctxi) {
 	if (ctxi) {
 		dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
 		dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
 			__func__, rctxid);
 			__func__, rctxid);
+		kref_get(&ctxi->kref);
 		list_add(&lun_access->list, &ctxi->luns);
 		list_add(&lun_access->list, &ctxi->luns);
-		fd = ctxi->lfd;
 		goto out_attach;
 		goto out_attach;
 	}
 	}
 
 
@@ -1430,7 +1403,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 	perms = SISL_RHT_PERM(attach->hdr.flags + 1);
 	perms = SISL_RHT_PERM(attach->hdr.flags + 1);
 
 
 	/* Context mutex is locked upon return */
 	/* Context mutex is locked upon return */
-	init_context(ctxi, cfg, ctx, ctxid, fd, file, perms);
+	init_context(ctxi, cfg, ctx, ctxid, file, perms);
 
 
 	rc = afu_attach(cfg, ctxi);
 	rc = afu_attach(cfg, ctxi);
 	if (unlikely(rc)) {
 	if (unlikely(rc)) {
@@ -1445,7 +1418,6 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 	 * knows about us yet; we can be the only one holding our mutex.
 	 * knows about us yet; we can be the only one holding our mutex.
 	 */
 	 */
 	list_add(&lun_access->list, &ctxi->luns);
 	list_add(&lun_access->list, &ctxi->luns);
-	mutex_unlock(&ctxi->mutex);
 	mutex_lock(&cfg->ctx_tbl_list_mutex);
 	mutex_lock(&cfg->ctx_tbl_list_mutex);
 	mutex_lock(&ctxi->mutex);
 	mutex_lock(&ctxi->mutex);
 	cfg->ctx_tbl[ctxid] = ctxi;
 	cfg->ctx_tbl[ctxid] = ctxi;
@@ -1453,7 +1425,11 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
 	fd_install(fd, file);
 	fd_install(fd, file);
 
 
 out_attach:
 out_attach:
-	attach->hdr.return_flags = 0;
+	if (fd != -1)
+		attach->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD;
+	else
+		attach->hdr.return_flags = 0;
+
 	attach->context_id = ctxi->ctxid;
 	attach->context_id = ctxi->ctxid;
 	attach->block_size = gli->blk_len;
 	attach->block_size = gli->blk_len;
 	attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
 	attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
@@ -1494,7 +1470,7 @@ err:
 		file = NULL;
 		file = NULL;
 	}
 	}
 
 
-	/* Cleanup our context; safe to call even with mutex locked */
+	/* Cleanup our context */
 	if (ctxi) {
 	if (ctxi) {
 		destroy_context(cfg, ctxi);
 		destroy_context(cfg, ctxi);
 		ctxi = NULL;
 		ctxi = NULL;
@@ -1509,16 +1485,19 @@ err:
  * recover_context() - recovers a context in error
  * recover_context() - recovers a context in error
  * @cfg:	Internal structure associated with the host.
  * @cfg:	Internal structure associated with the host.
  * @ctxi:	Context to release.
  * @ctxi:	Context to release.
+ * @adap_fd:	Adapter file descriptor associated with new/recovered context.
  *
  *
  * Restablishes the state for a context-in-error.
  * Restablishes the state for a context-in-error.
  *
  *
  * Return: 0 on success, -errno on failure
  * Return: 0 on success, -errno on failure
  */
  */
-static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
+static int recover_context(struct cxlflash_cfg *cfg,
+			   struct ctx_info *ctxi,
+			   int *adap_fd)
 {
 {
 	struct device *dev = &cfg->dev->dev;
 	struct device *dev = &cfg->dev->dev;
 	int rc = 0;
 	int rc = 0;
-	int old_fd, fd = -1;
+	int fd = -1;
 	int ctxid = -1;
 	int ctxid = -1;
 	struct file *file;
 	struct file *file;
 	struct cxl_context *ctx;
 	struct cxl_context *ctx;
@@ -1566,9 +1545,7 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
 	 * No error paths after this point. Once the fd is installed it's
 	 * No error paths after this point. Once the fd is installed it's
 	 * visible to user space and can't be undone safely on this thread.
 	 * visible to user space and can't be undone safely on this thread.
 	 */
 	 */
-	old_fd = ctxi->lfd;
 	ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
 	ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
-	ctxi->lfd = fd;
 	ctxi->ctx = ctx;
 	ctxi->ctx = ctx;
 	ctxi->file = file;
 	ctxi->file = file;
 
 
@@ -1585,9 +1562,7 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
 	cfg->ctx_tbl[ctxid] = ctxi;
 	cfg->ctx_tbl[ctxid] = ctxi;
 	mutex_unlock(&cfg->ctx_tbl_list_mutex);
 	mutex_unlock(&cfg->ctx_tbl_list_mutex);
 	fd_install(fd, file);
 	fd_install(fd, file);
-
-	/* Release the original adapter fd and associated CXL resources */
-	sys_close(old_fd);
+	*adap_fd = fd;
 out:
 out:
 	dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
 	dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
 		__func__, ctxid, fd, rc);
 		__func__, ctxid, fd, rc);
@@ -1646,6 +1621,7 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
 	    rctxid = recover->context_id;
 	    rctxid = recover->context_id;
 	long reg;
 	long reg;
 	int lretry = 20; /* up to 2 seconds */
 	int lretry = 20; /* up to 2 seconds */
+	int new_adap_fd = -1;
 	int rc = 0;
 	int rc = 0;
 
 
 	atomic_inc(&cfg->recovery_threads);
 	atomic_inc(&cfg->recovery_threads);
@@ -1675,7 +1651,7 @@ retry:
 
 
 	if (ctxi->err_recovery_active) {
 	if (ctxi->err_recovery_active) {
 retry_recover:
 retry_recover:
-		rc = recover_context(cfg, ctxi);
+		rc = recover_context(cfg, ctxi, &new_adap_fd);
 		if (unlikely(rc)) {
 		if (unlikely(rc)) {
 			dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
 			dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
 				__func__, ctxid, rc);
 				__func__, ctxid, rc);
@@ -1697,9 +1673,9 @@ retry_recover:
 
 
 		ctxi->err_recovery_active = false;
 		ctxi->err_recovery_active = false;
 		recover->context_id = ctxi->ctxid;
 		recover->context_id = ctxi->ctxid;
-		recover->adap_fd = ctxi->lfd;
+		recover->adap_fd = new_adap_fd;
 		recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
 		recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
-		recover->hdr.return_flags |=
+		recover->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
 			DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
 			DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
 		goto out;
 		goto out;
 	}
 	}

+ 2 - 1
drivers/scsi/cxlflash/superpipe.h

@@ -100,13 +100,14 @@ struct ctx_info {
 
 
 	struct cxl_ioctl_start_work work;
 	struct cxl_ioctl_start_work work;
 	u64 ctxid;
 	u64 ctxid;
-	int lfd;
 	pid_t pid;
 	pid_t pid;
 	bool initialized;
 	bool initialized;
 	bool unavail;
 	bool unavail;
 	bool err_recovery_active;
 	bool err_recovery_active;
 	struct mutex mutex; /* Context protection */
 	struct mutex mutex; /* Context protection */
+	struct kref kref;
 	struct cxl_context *ctx;
 	struct cxl_context *ctx;
+	struct cxlflash_cfg *cfg;
 	struct list_head luns;	/* LUNs attached to this context */
 	struct list_head luns;	/* LUNs attached to this context */
 	const struct vm_operations_struct *cxl_mmap_vmops;
 	const struct vm_operations_struct *cxl_mmap_vmops;
 	struct file *file;
 	struct file *file;

+ 2 - 11
drivers/scsi/cxlflash/vlun.c

@@ -1135,14 +1135,13 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
 	    ctxid_dst = DECODE_CTXID(clone->context_id_dst),
 	    ctxid_dst = DECODE_CTXID(clone->context_id_dst),
 	    rctxid_src = clone->context_id_src,
 	    rctxid_src = clone->context_id_src,
 	    rctxid_dst = clone->context_id_dst;
 	    rctxid_dst = clone->context_id_dst;
-	int adap_fd_src = clone->adap_fd_src;
 	int i, j;
 	int i, j;
 	int rc = 0;
 	int rc = 0;
 	bool found;
 	bool found;
 	LIST_HEAD(sidecar);
 	LIST_HEAD(sidecar);
 
 
-	pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu adap_fd_src=%d\n",
-		 __func__, ctxid_src, ctxid_dst, adap_fd_src);
+	pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu\n",
+		 __func__, ctxid_src, ctxid_dst);
 
 
 	/* Do not clone yourself */
 	/* Do not clone yourself */
 	if (unlikely(rctxid_src == rctxid_dst)) {
 	if (unlikely(rctxid_src == rctxid_dst)) {
@@ -1166,13 +1165,6 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
 		goto out;
 		goto out;
 	}
 	}
 
 
-	if (unlikely(adap_fd_src != ctxi_src->lfd)) {
-		pr_debug("%s: Invalid source adapter fd! (%d)\n",
-			 __func__, adap_fd_src);
-		rc = -EINVAL;
-		goto out;
-	}
-
 	/* Verify there is no open resource handle in the destination context */
 	/* Verify there is no open resource handle in the destination context */
 	for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
 	for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
 		if (ctxi_dst->rht_start[i].nmask != 0) {
 		if (ctxi_dst->rht_start[i].nmask != 0) {
@@ -1257,7 +1249,6 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
 
 
 out_success:
 out_success:
 	list_splice(&sidecar, &ctxi_dst->luns);
 	list_splice(&sidecar, &ctxi_dst->luns);
-	sys_close(adap_fd_src);
 
 
 	/* fall through */
 	/* fall through */
 out:
 out:

+ 1 - 0
drivers/scsi/device_handler/scsi_dh_alua.c

@@ -583,6 +583,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
 			sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n",
 			sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n",
 				    ALUA_DH_NAME);
 				    ALUA_DH_NAME);
 			scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
 			scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
+			kfree(buff);
 			return err;
 			return err;
 		}
 		}
 		sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n",
 		sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n",

+ 0 - 447
drivers/scsi/dtc.c

@@ -1,447 +0,0 @@
-/*
- * DTC 3180/3280 driver, by
- *	Ray Van Tassle	rayvt@comm.mot.com
- *
- *	taken from ...
- *	Trantor T128/T128F/T228 driver by...
- *
- * 	Drew Eckhardt
- *	Visionary Computing
- *	(Unix and Linux consulting and custom programming)
- *	drew@colorado.edu
- *      +1 (303) 440-4894
- */
-
-/*
- * The card is detected and initialized in one of several ways : 
- * 1.  Autoprobe (default) - since the board is memory mapped, 
- *     a BIOS signature is scanned for to locate the registers.
- *     An interrupt is triggered to autoprobe for the interrupt
- *     line.
- *
- * 2.  With command line overrides - dtc=address,irq may be 
- *     used on the LILO command line to override the defaults.
- * 
-*/
-
-/*----------------------------------------------------------------*/
-/* the following will set the monitor border color (useful to find
- where something crashed or gets stuck at */
-/* 1 = blue
- 2 = green
- 3 = cyan
- 4 = red
- 5 = magenta
- 6 = yellow
- 7 = white
-*/
-#if 0
-#define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);}
-#else
-#define rtrc(i) {}
-#endif
-
-
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <scsi/scsi_host.h>
-
-#include "dtc.h"
-#include "NCR5380.h"
-
-/*
- * The DTC3180 & 3280 boards are memory mapped.
- * 
- */
-
-/*
- */
-/* Offset from DTC_5380_OFFSET */
-#define DTC_CONTROL_REG		0x100	/* rw */
-#define D_CR_ACCESS		0x80	/* ro set=can access 3280 registers */
-#define CSR_DIR_READ		0x40	/* rw direction, 1 = read 0 = write */
-
-#define CSR_RESET              0x80	/* wo  Resets 53c400 */
-#define CSR_5380_REG           0x80	/* ro  5380 registers can be accessed */
-#define CSR_TRANS_DIR          0x40	/* rw  Data transfer direction */
-#define CSR_SCSI_BUFF_INTR     0x20	/* rw  Enable int on transfer ready */
-#define CSR_5380_INTR          0x10	/* rw  Enable 5380 interrupts */
-#define CSR_SHARED_INTR        0x08	/* rw  Interrupt sharing */
-#define CSR_HOST_BUF_NOT_RDY   0x04	/* ro  Host buffer not ready */
-#define CSR_SCSI_BUF_RDY       0x02	/* ro  SCSI buffer ready */
-#define CSR_GATED_5380_IRQ     0x01	/* ro  Last block xferred */
-#define CSR_INT_BASE (CSR_SCSI_BUFF_INTR | CSR_5380_INTR)
-
-
-#define DTC_BLK_CNT		0x101	/* rw 
-					 * # of 128-byte blocks to transfer */
-
-
-#define D_CR_ACCESS             0x80	/* ro set=can access 3280 registers */
-
-#define DTC_SWITCH_REG		0x3982	/* ro - DIP switches */
-#define DTC_RESUME_XFER		0x3982	/* wo - resume data xfer 
-					 * after disconnect/reconnect*/
-
-#define DTC_5380_OFFSET		0x3880	/* 8 registers here, see NCR5380.h */
-
-/*!!!! for dtc, it's a 128 byte buffer at 3900 !!! */
-#define DTC_DATA_BUF		0x3900	/* rw 128 bytes long */
-
-static struct override {
-	unsigned int address;
-	int irq;
-} overrides
-#ifdef OVERRIDE
-[] __initdata = OVERRIDE;
-#else
-[4] __initdata = {
-	{ 0, IRQ_AUTO }, { 0, IRQ_AUTO }, { 0, IRQ_AUTO }, { 0, IRQ_AUTO }
-};
-#endif
-
-#define NO_OVERRIDES ARRAY_SIZE(overrides)
-
-static struct base {
-	unsigned long address;
-	int noauto;
-} bases[] __initdata = {
-	{ 0xcc000, 0 },
-	{ 0xc8000, 0 },
-	{ 0xdc000, 0 },
-	{ 0xd8000, 0 }
-};
-
-#define NO_BASES ARRAY_SIZE(bases)
-
-static const struct signature {
-	const char *string;
-	int offset;
-} signatures[] = {
-	{"DATA TECHNOLOGY CORPORATION BIOS", 0x25},
-};
-
-#define NO_SIGNATURES ARRAY_SIZE(signatures)
-
-#ifndef MODULE
-/*
- * Function : dtc_setup(char *str, int *ints)
- *
- * Purpose : LILO command line initialization of the overrides array,
- *
- * Inputs : str - unused, ints - array of integer parameters with ints[0]
- *	equal to the number of ints.
- *
- */
-
-static int __init dtc_setup(char *str)
-{
-	static int commandline_current;
-	int i;
-	int ints[10];
-
-	get_options(str, ARRAY_SIZE(ints), ints);
-	if (ints[0] != 2)
-		printk("dtc_setup: usage dtc=address,irq\n");
-	else if (commandline_current < NO_OVERRIDES) {
-		overrides[commandline_current].address = ints[1];
-		overrides[commandline_current].irq = ints[2];
-		for (i = 0; i < NO_BASES; ++i)
-			if (bases[i].address == ints[1]) {
-				bases[i].noauto = 1;
-				break;
-			}
-		++commandline_current;
-	}
-	return 1;
-}
-
-__setup("dtc=", dtc_setup);
-#endif
-
-/* 
- * Function : int dtc_detect(struct scsi_host_template * tpnt)
- *
- * Purpose : detects and initializes DTC 3180/3280 controllers
- *	that were autoprobed, overridden on the LILO command line, 
- *	or specified at compile time.
- *
- * Inputs : tpnt - template for this SCSI adapter.
- * 
- * Returns : 1 if a host adapter was found, 0 if not.
- *
-*/
-
-static int __init dtc_detect(struct scsi_host_template * tpnt)
-{
-	static int current_override, current_base;
-	struct Scsi_Host *instance;
-	unsigned int addr;
-	void __iomem *base;
-	int sig, count;
-
-	for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
-		addr = 0;
-		base = NULL;
-
-		if (overrides[current_override].address) {
-			addr = overrides[current_override].address;
-			base = ioremap(addr, 0x2000);
-			if (!base)
-				addr = 0;
-		} else
-			for (; !addr && (current_base < NO_BASES); ++current_base) {
-				dprintk(NDEBUG_INIT, "dtc: probing address 0x%08x\n",
-				        (unsigned int)bases[current_base].address);
-				if (bases[current_base].noauto)
-					continue;
-				base = ioremap(bases[current_base].address, 0x2000);
-				if (!base)
-					continue;
-				for (sig = 0; sig < NO_SIGNATURES; ++sig) {
-					if (check_signature(base + signatures[sig].offset, signatures[sig].string, strlen(signatures[sig].string))) {
-						addr = bases[current_base].address;
-						dprintk(NDEBUG_INIT, "dtc: detected board\n");
-						goto found;
-					}
-				}
-				iounmap(base);
-			}
-
-		dprintk(NDEBUG_INIT, "dtc: addr = 0x%08x\n", addr);
-
-		if (!addr)
-			break;
-
-found:
-		instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata));
-		if (instance == NULL)
-			goto out_unmap;
-
-		instance->base = addr;
-		((struct NCR5380_hostdata *)(instance)->hostdata)->base = base;
-
-		if (NCR5380_init(instance, FLAG_LATE_DMA_SETUP))
-			goto out_unregister;
-
-		NCR5380_maybe_reset_bus(instance);
-
-		NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR);	/* Enable int's */
-		if (overrides[current_override].irq != IRQ_AUTO)
-			instance->irq = overrides[current_override].irq;
-		else
-			instance->irq = NCR5380_probe_irq(instance, DTC_IRQS);
-
-		/* Compatibility with documented NCR5380 kernel parameters */
-		if (instance->irq == 255)
-			instance->irq = NO_IRQ;
-
-		/* With interrupts enabled, it will sometimes hang when doing heavy
-		 * reads. So better not enable them until I finger it out. */
-		instance->irq = NO_IRQ;
-
-		if (instance->irq != NO_IRQ)
-			if (request_irq(instance->irq, dtc_intr, 0,
-					"dtc", instance)) {
-				printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
-				instance->irq = NO_IRQ;
-			}
-
-		if (instance->irq == NO_IRQ) {
-			printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
-			printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
-		}
-
-		dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n",
-		        instance->host_no, instance->irq);
-
-		++current_override;
-		++count;
-	}
-	return count;
-
-out_unregister:
-	scsi_unregister(instance);
-out_unmap:
-	iounmap(base);
-	return count;
-}
-
-/*
- * Function : int dtc_biosparam(Disk * disk, struct block_device *dev, int *ip)
- *
- * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for 
- *	the specified device / size.
- * 
- * Inputs : size = size of device in sectors (512 bytes), dev = block device
- *	major / minor, ip[] = {heads, sectors, cylinders}  
- *
- * Returns : always 0 (success), initializes ip
- *	
-*/
-
-/* 
- * XXX Most SCSI boards use this mapping, I could be incorrect.  Some one
- * using hard disks on a trantor should verify that this mapping corresponds
- * to that used by the BIOS / ASPI driver by running the linux fdisk program
- * and matching the H_C_S coordinates to what DOS uses.
-*/
-
-static int dtc_biosparam(struct scsi_device *sdev, struct block_device *dev,
-			 sector_t capacity, int *ip)
-{
-	int size = capacity;
-
-	ip[0] = 64;
-	ip[1] = 32;
-	ip[2] = size >> 11;
-	return 0;
-}
-
-
-/****************************************************************
- * Function : int NCR5380_pread (struct Scsi_Host *instance, 
- *	unsigned char *dst, int len)
- *
- * Purpose : Fast 5380 pseudo-dma read function, reads len bytes to 
- *	dst
- * 
- * Inputs : dst = destination, len = length in bytes
- *
- * Returns : 0 on success, non zero on a failure such as a watchdog 
- * 	timeout.
-*/
-
-static inline int dtc_pread(struct Scsi_Host *instance,
-                            unsigned char *dst, int len)
-{
-	unsigned char *d = dst;
-	int i;			/* For counting time spent in the poll-loop */
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-	i = 0;
-	if (instance->irq == NO_IRQ)
-		NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ);
-	else
-		NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ | CSR_INT_BASE);
-	NCR5380_write(DTC_BLK_CNT, len >> 7);	/* Block count */
-	rtrc(1);
-	while (len > 0) {
-		rtrc(2);
-		while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY)
-			++i;
-		rtrc(3);
-		memcpy_fromio(d, hostdata->base + DTC_DATA_BUF, 128);
-		d += 128;
-		len -= 128;
-		rtrc(7);
-		/*** with int's on, it sometimes hangs after here.
-		 * Looks like something makes HBNR go away. */
-	}
-	rtrc(4);
-	while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
-		++i;
-	rtrc(0);
-	return (0);
-}
-
-/****************************************************************
- * Function : int NCR5380_pwrite (struct Scsi_Host *instance, 
- *	unsigned char *src, int len)
- *
- * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
- *	src
- * 
- * Inputs : src = source, len = length in bytes
- *
- * Returns : 0 on success, non zero on a failure such as a watchdog 
- * 	timeout.
-*/
-
-static inline int dtc_pwrite(struct Scsi_Host *instance,
-                             unsigned char *src, int len)
-{
-	int i;
-	struct NCR5380_hostdata *hostdata = shost_priv(instance);
-
-	if (instance->irq == NO_IRQ)
-		NCR5380_write(DTC_CONTROL_REG, 0);
-	else
-		NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR);
-	NCR5380_write(DTC_BLK_CNT, len >> 7);	/* Block count */
-	for (i = 0; len > 0; ++i) {
-		rtrc(5);
-		/* Poll until the host buffer can accept data. */
-		while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY)
-			++i;
-		rtrc(3);
-		memcpy_toio(hostdata->base + DTC_DATA_BUF, src, 128);
-		src += 128;
-		len -= 128;
-	}
-	rtrc(4);
-	while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
-		++i;
-	rtrc(6);
-	/* Wait until the last byte has been sent to the disk */
-	while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
-		++i;
-	rtrc(7);
-	/* Check for parity error here. fixme. */
-	rtrc(0);
-	return (0);
-}
-
-static int dtc_dma_xfer_len(struct scsi_cmnd *cmd)
-{
-	int transfersize = cmd->transfersize;
-
-	/* Limit transfers to 32K, for xx400 & xx406
-	 * pseudoDMA that transfers in 128 bytes blocks.
-	 */
-	if (transfersize > 32 * 1024 && cmd->SCp.this_residual &&
-	    !(cmd->SCp.this_residual % transfersize))
-		transfersize = 32 * 1024;
-
-	return transfersize;
-}
-
-MODULE_LICENSE("GPL");
-
-#include "NCR5380.c"
-
-static int dtc_release(struct Scsi_Host *shost)
-{
-	struct NCR5380_hostdata *hostdata = shost_priv(shost);
-
-	if (shost->irq != NO_IRQ)
-		free_irq(shost->irq, shost);
-	NCR5380_exit(shost);
-	scsi_unregister(shost);
-	iounmap(hostdata->base);
-	return 0;
-}
-
-static struct scsi_host_template driver_template = {
-	.name			= "DTC 3180/3280",
-	.detect			= dtc_detect,
-	.release		= dtc_release,
-	.proc_name		= "dtc3x80",
-	.info			= dtc_info,
-	.queuecommand		= dtc_queue_command,
-	.eh_abort_handler	= dtc_abort,
-	.eh_bus_reset_handler	= dtc_bus_reset,
-	.bios_param		= dtc_biosparam,
-	.can_queue		= 32,
-	.this_id		= 7,
-	.sg_tablesize		= SG_ALL,
-	.cmd_per_lun		= 2,
-	.use_clustering		= DISABLE_CLUSTERING,
-	.cmd_size		= NCR5380_CMD_SIZE,
-	.max_sectors		= 128,
-};
-#include "scsi_module.c"

+ 0 - 42
drivers/scsi/dtc.h

@@ -1,42 +0,0 @@
-/*
- * DTC controller, taken from T128 driver by...
- * Copyright 1993, Drew Eckhardt
- *	Visionary Computing
- *	(Unix and Linux consulting and custom programming)
- *	drew@colorado.edu
- *      +1 (303) 440-4894
- */
-
-#ifndef DTC3280_H
-#define DTC3280_H
-
-#define NCR5380_implementation_fields \
-    void __iomem *base
-
-#define DTC_address(reg) \
-	(((struct NCR5380_hostdata *)shost_priv(instance))->base + DTC_5380_OFFSET + reg)
-
-#define NCR5380_read(reg) (readb(DTC_address(reg)))
-#define NCR5380_write(reg, value) (writeb(value, DTC_address(reg)))
-
-#define NCR5380_dma_xfer_len(instance, cmd, phase) \
-        dtc_dma_xfer_len(cmd)
-#define NCR5380_dma_recv_setup		dtc_pread
-#define NCR5380_dma_send_setup		dtc_pwrite
-#define NCR5380_dma_residual(instance)	(0)
-
-#define NCR5380_intr			dtc_intr
-#define NCR5380_queue_command		dtc_queue_command
-#define NCR5380_abort			dtc_abort
-#define NCR5380_bus_reset		dtc_bus_reset
-#define NCR5380_info			dtc_info
-
-#define NCR5380_io_delay(x)		udelay(x)
-
-/* 15 12 11 10
-   1001 1100 0000 0000 */
-
-#define DTC_IRQS 0x9c00
-
-
-#endif /* DTC3280_H */

+ 0 - 4
drivers/scsi/esas2r/esas2r_init.c

@@ -963,10 +963,6 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
 
 
 	/* initialize the allocated memory */
 	/* initialize the allocated memory */
 	if (test_bit(AF_FIRST_INIT, &a->flags)) {
 	if (test_bit(AF_FIRST_INIT, &a->flags)) {
-		memset(a->req_table, 0,
-		       (num_requests + num_ae_requests +
-			1) * sizeof(struct esas2r_request *));
-
 		esas2r_targ_db_initialize(a);
 		esas2r_targ_db_initialize(a);
 
 
 		/* prime parts of the inbound list */
 		/* prime parts of the inbound list */

+ 1 - 1
drivers/scsi/esas2r/esas2r_main.c

@@ -194,7 +194,7 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj,
 	int length = min(sizeof(struct atto_ioctl), count);
 	int length = min(sizeof(struct atto_ioctl), count);
 
 
 	if (!a->local_atto_ioctl) {
 	if (!a->local_atto_ioctl) {
-		a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl),
+		a->local_atto_ioctl = kmalloc(sizeof(struct atto_ioctl),
 					      GFP_KERNEL);
 					      GFP_KERNEL);
 		if (a->local_atto_ioctl == NULL) {
 		if (a->local_atto_ioctl == NULL) {
 			esas2r_log(ESAS2R_LOG_WARN,
 			esas2r_log(ESAS2R_LOG_WARN,

+ 36 - 17
drivers/scsi/fcoe/fcoe_transport.c

@@ -83,6 +83,41 @@ static struct notifier_block libfcoe_notifier = {
 	.notifier_call = libfcoe_device_notification,
 	.notifier_call = libfcoe_device_notification,
 };
 };
 
 
+static const struct {
+	u32 fc_port_speed;
+#define SPEED_2000	2000
+#define SPEED_4000	4000
+#define SPEED_8000	8000
+#define SPEED_16000	16000
+#define SPEED_32000	32000
+	u32 eth_port_speed;
+} fcoe_port_speed_mapping[] = {
+	{ FC_PORTSPEED_1GBIT,   SPEED_1000   },
+	{ FC_PORTSPEED_2GBIT,   SPEED_2000   },
+	{ FC_PORTSPEED_4GBIT,   SPEED_4000   },
+	{ FC_PORTSPEED_8GBIT,   SPEED_8000   },
+	{ FC_PORTSPEED_10GBIT,  SPEED_10000  },
+	{ FC_PORTSPEED_16GBIT,  SPEED_16000  },
+	{ FC_PORTSPEED_20GBIT,  SPEED_20000  },
+	{ FC_PORTSPEED_25GBIT,  SPEED_25000  },
+	{ FC_PORTSPEED_32GBIT,  SPEED_32000  },
+	{ FC_PORTSPEED_40GBIT,  SPEED_40000  },
+	{ FC_PORTSPEED_50GBIT,  SPEED_50000  },
+	{ FC_PORTSPEED_100GBIT, SPEED_100000 },
+};
+
+static inline u32 eth2fc_speed(u32 eth_port_speed)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fcoe_port_speed_mapping); i++) {
+		if (fcoe_port_speed_mapping[i].eth_port_speed == eth_port_speed)
+			return fcoe_port_speed_mapping[i].fc_port_speed;
+	}
+
+	return FC_PORTSPEED_UNKNOWN;
+}
+
 /**
 /**
  * fcoe_link_speed_update() - Update the supported and actual link speeds
  * fcoe_link_speed_update() - Update the supported and actual link speeds
  * @lport: The local port to update speeds for
  * @lport: The local port to update speeds for
@@ -126,23 +161,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
 			    SUPPORTED_40000baseLR4_Full))
 			    SUPPORTED_40000baseLR4_Full))
 			lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
 			lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
 
 
-		switch (ecmd.base.speed) {
-		case SPEED_1000:
-			lport->link_speed = FC_PORTSPEED_1GBIT;
-			break;
-		case SPEED_10000:
-			lport->link_speed = FC_PORTSPEED_10GBIT;
-			break;
-		case SPEED_20000:
-			lport->link_speed = FC_PORTSPEED_20GBIT;
-			break;
-		case SPEED_40000:
-			lport->link_speed = FC_PORTSPEED_40GBIT;
-			break;
-		default:
-			lport->link_speed = FC_PORTSPEED_UNKNOWN;
-			break;
-		}
+		lport->link_speed = eth2fc_speed(ecmd.base.speed);
 		return 0;
 		return 0;
 	}
 	}
 	return -1;
 	return -1;

+ 17 - 1
drivers/scsi/hisi_sas/hisi_sas.h

@@ -23,7 +23,7 @@
 #include <scsi/sas_ata.h>
 #include <scsi/sas_ata.h>
 #include <scsi/libsas.h>
 #include <scsi/libsas.h>
 
 
-#define DRV_VERSION "v1.5"
+#define DRV_VERSION "v1.6"
 
 
 #define HISI_SAS_MAX_PHYS	9
 #define HISI_SAS_MAX_PHYS	9
 #define HISI_SAS_MAX_QUEUES	32
 #define HISI_SAS_MAX_QUEUES	32
@@ -56,6 +56,11 @@ enum dev_status {
 	HISI_SAS_DEV_EH,
 	HISI_SAS_DEV_EH,
 };
 };
 
 
+enum {
+	HISI_SAS_INT_ABT_CMD = 0,
+	HISI_SAS_INT_ABT_DEV = 1,
+};
+
 enum hisi_sas_dev_type {
 enum hisi_sas_dev_type {
 	HISI_SAS_DEV_TYPE_STP = 0,
 	HISI_SAS_DEV_TYPE_STP = 0,
 	HISI_SAS_DEV_TYPE_SSP,
 	HISI_SAS_DEV_TYPE_SSP,
@@ -89,6 +94,13 @@ struct hisi_sas_port {
 
 
 struct hisi_sas_cq {
 struct hisi_sas_cq {
 	struct hisi_hba *hisi_hba;
 	struct hisi_hba *hisi_hba;
+	int	rd_point;
+	int	id;
+};
+
+struct hisi_sas_dq {
+	struct hisi_hba *hisi_hba;
+	int	wr_point;
 	int	id;
 	int	id;
 };
 };
 
 
@@ -146,6 +158,9 @@ struct hisi_sas_hw {
 			struct hisi_sas_slot *slot);
 			struct hisi_sas_slot *slot);
 	int (*prep_stp)(struct hisi_hba *hisi_hba,
 	int (*prep_stp)(struct hisi_hba *hisi_hba,
 			struct hisi_sas_slot *slot);
 			struct hisi_sas_slot *slot);
+	int (*prep_abort)(struct hisi_hba *hisi_hba,
+			  struct hisi_sas_slot *slot,
+			  int device_id, int abort_flag, int tag_to_abort);
 	int (*slot_complete)(struct hisi_hba *hisi_hba,
 	int (*slot_complete)(struct hisi_hba *hisi_hba,
 			     struct hisi_sas_slot *slot, int abort);
 			     struct hisi_sas_slot *slot, int abort);
 	void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
 	void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
@@ -185,6 +200,7 @@ struct hisi_hba {
 	struct Scsi_Host *shost;
 	struct Scsi_Host *shost;
 
 
 	struct hisi_sas_cq cq[HISI_SAS_MAX_QUEUES];
 	struct hisi_sas_cq cq[HISI_SAS_MAX_QUEUES];
+	struct hisi_sas_dq dq[HISI_SAS_MAX_QUEUES];
 	struct hisi_sas_phy phy[HISI_SAS_MAX_PHYS];
 	struct hisi_sas_phy phy[HISI_SAS_MAX_PHYS];
 	struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
 	struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
 
 

+ 202 - 36
drivers/scsi/hisi_sas/hisi_sas_main.c

@@ -17,6 +17,10 @@
 
 
 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
 				u8 *lun, struct hisi_sas_tmf_task *tmf);
 				u8 *lun, struct hisi_sas_tmf_task *tmf);
+static int
+hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
+			     struct domain_device *device,
+			     int abort_flag, int tag);
 
 
 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
 {
 {
@@ -93,7 +97,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
 	slot->task = NULL;
 	slot->task = NULL;
 	slot->port = NULL;
 	slot->port = NULL;
 	hisi_sas_slot_index_free(hisi_hba, slot->idx);
 	hisi_sas_slot_index_free(hisi_hba, slot->idx);
-	memset(slot, 0, sizeof(*slot));
+	/* slot memory is fully zeroed when it is reused */
 }
 }
 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
 
 
@@ -116,6 +120,14 @@ static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
 	return hisi_hba->hw->prep_stp(hisi_hba, slot);
 	return hisi_hba->hw->prep_stp(hisi_hba, slot);
 }
 }
 
 
+static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
+		struct hisi_sas_slot *slot,
+		int device_id, int abort_flag, int tag_to_abort)
+{
+	return hisi_hba->hw->prep_abort(hisi_hba, slot,
+			device_id, abort_flag, tag_to_abort);
+}
+
 /*
 /*
  * This function will issue an abort TMF regardless of whether the
  * This function will issue an abort TMF regardless of whether the
  * task is in the sdev or not. Then it will do the task complete
  * task is in the sdev or not. Then it will do the task complete
@@ -192,27 +204,13 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
 		return rc;
 		return rc;
 	}
 	}
 	port = device->port->lldd_port;
 	port = device->port->lldd_port;
-	if (port && !port->port_attached && !tmf) {
-		if (sas_protocol_ata(task->task_proto)) {
-			struct task_status_struct *ts = &task->task_status;
-
-			dev_info(dev,
-				 "task prep: SATA/STP port%d not attach device\n",
-				 device->port->id);
-			ts->resp = SAS_TASK_COMPLETE;
-			ts->stat = SAS_PHY_DOWN;
-			task->task_done(task);
-		} else {
-			struct task_status_struct *ts = &task->task_status;
-
-			dev_info(dev,
-				 "task prep: SAS port%d does not attach device\n",
-				 device->port->id);
-			ts->resp = SAS_TASK_UNDELIVERED;
-			ts->stat = SAS_PHY_DOWN;
-			task->task_done(task);
-		}
-		return 0;
+	if (port && !port->port_attached) {
+		dev_info(dev, "task prep: %s port%d not attach device\n",
+			 (sas_protocol_ata(task->task_proto)) ?
+			 "SATA/STP" : "SAS",
+			 device->port->id);
+
+		return SAS_PHY_DOWN;
 	}
 	}
 
 
 	if (!sas_protocol_ata(task->task_proto)) {
 	if (!sas_protocol_ata(task->task_proto)) {
@@ -609,6 +607,9 @@ static void hisi_sas_dev_gone(struct domain_device *device)
 	dev_info(dev, "found dev[%lld:%x] is gone\n",
 	dev_info(dev, "found dev[%lld:%x] is gone\n",
 		 sas_dev->device_id, sas_dev->dev_type);
 		 sas_dev->device_id, sas_dev->dev_type);
 
 
+	hisi_sas_internal_task_abort(hisi_hba, device,
+				     HISI_SAS_INT_ABT_DEV, 0);
+
 	hisi_hba->hw->free_device(hisi_hba, sas_dev);
 	hisi_hba->hw->free_device(hisi_hba, sas_dev);
 	device->lldd_dev = NULL;
 	device->lldd_dev = NULL;
 	memset(sas_dev, 0, sizeof(*sas_dev));
 	memset(sas_dev, 0, sizeof(*sas_dev));
@@ -728,6 +729,12 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
 			break;
 			break;
 		}
 		}
 
 
+		if (task->task_status.resp == SAS_TASK_COMPLETE &&
+			task->task_status.stat == TMF_RESP_FUNC_SUCC) {
+			res = TMF_RESP_FUNC_SUCC;
+			break;
+		}
+
 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
 		if (task->task_status.resp == SAS_TASK_COMPLETE &&
 		      task->task_status.stat == SAS_DATA_UNDERRUN) {
 		      task->task_status.stat == SAS_DATA_UNDERRUN) {
 			/* no error, but return the number of bytes of
 			/* no error, but return the number of bytes of
@@ -826,18 +833,22 @@ static int hisi_sas_abort_task(struct sas_task *task)
 			}
 			}
 		}
 		}
 
 
+		hisi_sas_internal_task_abort(hisi_hba, device,
+					     HISI_SAS_INT_ABT_CMD, tag);
 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
 	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
 		task->task_proto & SAS_PROTOCOL_STP) {
 		task->task_proto & SAS_PROTOCOL_STP) {
 		if (task->dev->dev_type == SAS_SATA_DEV) {
 		if (task->dev->dev_type == SAS_SATA_DEV) {
-			struct hisi_slot_info *slot = task->lldd_task;
-
-			dev_notice(dev, "abort task: hba=%p task=%p slot=%p\n",
-				   hisi_hba, task, slot);
-			task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+			hisi_sas_internal_task_abort(hisi_hba, device,
+						     HISI_SAS_INT_ABT_DEV, 0);
 			rc = TMF_RESP_FUNC_COMPLETE;
 			rc = TMF_RESP_FUNC_COMPLETE;
-			goto out;
 		}
 		}
+	} else if (task->task_proto & SAS_PROTOCOL_SMP) {
+		/* SMP */
+		struct hisi_sas_slot *slot = task->lldd_task;
+		u32 tag = slot->idx;
 
 
+		hisi_sas_internal_task_abort(hisi_hba, device,
+					     HISI_SAS_INT_ABT_CMD, tag);
 	}
 	}
 
 
 out:
 out:
@@ -954,6 +965,157 @@ static int hisi_sas_query_task(struct sas_task *task)
 	return rc;
 	return rc;
 }
 }
 
 
+static int
+hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
+				  struct sas_task *task, int abort_flag,
+				  int task_tag)
+{
+	struct domain_device *device = task->dev;
+	struct hisi_sas_device *sas_dev = device->lldd_dev;
+	struct device *dev = &hisi_hba->pdev->dev;
+	struct hisi_sas_port *port;
+	struct hisi_sas_slot *slot;
+	struct hisi_sas_cmd_hdr *cmd_hdr_base;
+	int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
+
+	if (!device->port)
+		return -1;
+
+	port = device->port->lldd_port;
+
+	/* simply get a slot and send abort command */
+	rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
+	if (rc)
+		goto err_out;
+	rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
+					 &dlvry_queue_slot);
+	if (rc)
+		goto err_out_tag;
+
+	slot = &hisi_hba->slot_info[slot_idx];
+	memset(slot, 0, sizeof(struct hisi_sas_slot));
+
+	slot->idx = slot_idx;
+	slot->n_elem = n_elem;
+	slot->dlvry_queue = dlvry_queue;
+	slot->dlvry_queue_slot = dlvry_queue_slot;
+	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
+	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
+	slot->task = task;
+	slot->port = port;
+	task->lldd_task = slot;
+
+	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
+
+	rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
+				      abort_flag, task_tag);
+	if (rc)
+		goto err_out_tag;
+
+	/* Port structure is static for the HBA, so
+	*  even if the port is deformed it is ok
+	*  to reference.
+	*/
+	list_add_tail(&slot->entry, &port->list);
+	spin_lock(&task->task_state_lock);
+	task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+	spin_unlock(&task->task_state_lock);
+
+	hisi_hba->slot_prep = slot;
+
+	sas_dev->running_req++;
+	/* send abort command to our chip */
+	hisi_hba->hw->start_delivery(hisi_hba);
+
+	return 0;
+
+err_out_tag:
+	hisi_sas_slot_index_free(hisi_hba, slot_idx);
+err_out:
+	dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
+
+	return rc;
+}
+
+/**
+ * hisi_sas_internal_task_abort -- execute an internal
+ * abort command for single IO command or a device
+ * @hisi_hba: host controller struct
+ * @device: domain device
+ * @abort_flag: mode of operation, device or single IO
+ * @tag: tag of IO to be aborted (only relevant to single
+ *       IO mode)
+ */
+static int
+hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
+			     struct domain_device *device,
+			     int abort_flag, int tag)
+{
+	struct sas_task *task;
+	struct hisi_sas_device *sas_dev = device->lldd_dev;
+	struct device *dev = &hisi_hba->pdev->dev;
+	int res;
+	unsigned long flags;
+
+	if (!hisi_hba->hw->prep_abort)
+		return -EOPNOTSUPP;
+
+	task = sas_alloc_slow_task(GFP_KERNEL);
+	if (!task)
+		return -ENOMEM;
+
+	task->dev = device;
+	task->task_proto = device->tproto;
+	task->task_done = hisi_sas_task_done;
+	task->slow_task->timer.data = (unsigned long)task;
+	task->slow_task->timer.function = hisi_sas_tmf_timedout;
+	task->slow_task->timer.expires = jiffies + 20*HZ;
+	add_timer(&task->slow_task->timer);
+
+	/* Lock as we are alloc'ing a slot, which cannot be interrupted */
+	spin_lock_irqsave(&hisi_hba->lock, flags);
+	res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
+						task, abort_flag, tag);
+	spin_unlock_irqrestore(&hisi_hba->lock, flags);
+	if (res) {
+		del_timer(&task->slow_task->timer);
+		dev_err(dev, "internal task abort: executing internal task failed: %d\n",
+			res);
+		goto exit;
+	}
+	wait_for_completion(&task->slow_task->completion);
+	res = TMF_RESP_FUNC_FAILED;
+
+	if (task->task_status.resp == SAS_TASK_COMPLETE &&
+		task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
+		res = TMF_RESP_FUNC_COMPLETE;
+		goto exit;
+	}
+
+	/* TMF timed out, return direct. */
+	if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+		if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+			dev_err(dev, "internal task abort: timeout.\n");
+			if (task->lldd_task) {
+				struct hisi_sas_slot *slot = task->lldd_task;
+
+				hisi_sas_slot_task_free(hisi_hba, task, slot);
+			}
+		}
+	}
+
+exit:
+	dev_info(dev, "internal task abort: task to dev %016llx task=%p "
+		"resp: 0x%x sts 0x%x\n",
+		SAS_ADDR(device->sas_addr),
+		task,
+		task->task_status.resp, /* 0 is complete, -1 is undelivered */
+		task->task_status.stat);
+	sas_free_task(task);
+
+	return res;
+}
+
 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
 static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
 {
 {
 	hisi_sas_port_notify_formed(sas_phy);
 	hisi_sas_port_notify_formed(sas_phy);
@@ -1063,11 +1225,16 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
 
 
 	for (i = 0; i < hisi_hba->queue_count; i++) {
 	for (i = 0; i < hisi_hba->queue_count; i++) {
 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
 		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
+		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
 
 
 		/* Completion queue structure */
 		/* Completion queue structure */
 		cq->id = i;
 		cq->id = i;
 		cq->hisi_hba = hisi_hba;
 		cq->hisi_hba = hisi_hba;
 
 
+		/* Delivery queue structure */
+		dq->id = i;
+		dq->hisi_hba = hisi_hba;
+
 		/* Delivery queue */
 		/* Delivery queue */
 		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
 		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
 		hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
 		hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
@@ -1128,7 +1295,7 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
 	memset(hisi_hba->breakpoint, 0, s);
 	memset(hisi_hba->breakpoint, 0, s);
 
 
 	hisi_hba->slot_index_count = max_command_entries;
 	hisi_hba->slot_index_count = max_command_entries;
-	s = hisi_hba->slot_index_count / sizeof(unsigned long);
+	s = hisi_hba->slot_index_count / BITS_PER_BYTE;
 	hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
 	hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
 	if (!hisi_hba->slot_index_tags)
 	if (!hisi_hba->slot_index_tags)
 		goto err_out;
 		goto err_out;
@@ -1272,6 +1439,12 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
 				     &hisi_hba->queue_count))
 				     &hisi_hba->queue_count))
 		goto err_out;
 		goto err_out;
 
 
+	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
+	    dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+		dev_err(dev, "No usable DMA addressing method\n");
+		goto err_out;
+	}
+
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	hisi_hba->regs = devm_ioremap_resource(dev, res);
 	hisi_hba->regs = devm_ioremap_resource(dev, res);
 	if (IS_ERR(hisi_hba->regs))
 	if (IS_ERR(hisi_hba->regs))
@@ -1319,13 +1492,6 @@ int hisi_sas_probe(struct platform_device *pdev,
 	hisi_hba = shost_priv(shost);
 	hisi_hba = shost_priv(shost);
 	platform_set_drvdata(pdev, sha);
 	platform_set_drvdata(pdev, sha);
 
 
-	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
-	    dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
-		dev_err(dev, "No usable DMA addressing method\n");
-		rc = -EIO;
-		goto err_out_ha;
-	}
-
 	phy_nr = port_nr = hisi_hba->n_phy;
 	phy_nr = port_nr = hisi_hba->n_phy;
 
 
 	arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
 	arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);

+ 13 - 23
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c

@@ -490,25 +490,17 @@ static void config_id_frame_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
 			__swab32(identify_buffer[0]));
 			__swab32(identify_buffer[0]));
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
-			identify_buffer[2]);
+			__swab32(identify_buffer[1]));
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
-			identify_buffer[1]);
+			__swab32(identify_buffer[2]));
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
-			identify_buffer[4]);
+			__swab32(identify_buffer[3]));
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
-			identify_buffer[3]);
+			__swab32(identify_buffer[4]));
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
 			__swab32(identify_buffer[5]));
 			__swab32(identify_buffer[5]));
 }
 }
 
 
-static void init_id_frame_v1_hw(struct hisi_hba *hisi_hba)
-{
-	int i;
-
-	for (i = 0; i < hisi_hba->n_phy; i++)
-		config_id_frame_v1_hw(hisi_hba, i);
-}
-
 static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
 static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
 			     struct hisi_sas_device *sas_dev)
 			     struct hisi_sas_device *sas_dev)
 {
 {
@@ -774,8 +766,6 @@ static int hw_init_v1_hw(struct hisi_hba *hisi_hba)
 	msleep(100);
 	msleep(100);
 	init_reg_v1_hw(hisi_hba);
 	init_reg_v1_hw(hisi_hba);
 
 
-	init_id_frame_v1_hw(hisi_hba);
-
 	return 0;
 	return 0;
 }
 }
 
 
@@ -875,12 +865,13 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
 static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s)
 static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s)
 {
 {
 	struct device *dev = &hisi_hba->pdev->dev;
 	struct device *dev = &hisi_hba->pdev->dev;
+	struct hisi_sas_dq *dq;
 	u32 r, w;
 	u32 r, w;
 	int queue = hisi_hba->queue;
 	int queue = hisi_hba->queue;
 
 
 	while (1) {
 	while (1) {
-		w = hisi_sas_read32_relaxed(hisi_hba,
-				    DLVRY_Q_0_WR_PTR + (queue * 0x14));
+		dq = &hisi_hba->dq[queue];
+		w = dq->wr_point;
 		r = hisi_sas_read32_relaxed(hisi_hba,
 		r = hisi_sas_read32_relaxed(hisi_hba,
 				    DLVRY_Q_0_RD_PTR + (queue * 0x14));
 				    DLVRY_Q_0_RD_PTR + (queue * 0x14));
 		if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
 		if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
@@ -903,10 +894,11 @@ static void start_delivery_v1_hw(struct hisi_hba *hisi_hba)
 {
 {
 	int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
 	int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
 	int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
 	int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
+	struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
 
 
-	hisi_sas_write32(hisi_hba,
-			 DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
-			 ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS);
+	dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
+	hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
+			 dq->wr_point);
 }
 }
 
 
 static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
 static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
@@ -1565,14 +1557,11 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
 	struct hisi_sas_complete_v1_hdr *complete_queue =
 	struct hisi_sas_complete_v1_hdr *complete_queue =
 			(struct hisi_sas_complete_v1_hdr *)
 			(struct hisi_sas_complete_v1_hdr *)
 			hisi_hba->complete_hdr[queue];
 			hisi_hba->complete_hdr[queue];
-	u32 irq_value, rd_point, wr_point;
+	u32 irq_value, rd_point = cq->rd_point, wr_point;
 
 
 	irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
 	irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
 
 
 	hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
 	hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
-
-	rd_point = hisi_sas_read32(hisi_hba,
-			COMPL_Q_0_RD_PTR + (0x14 * queue));
 	wr_point = hisi_sas_read32(hisi_hba,
 	wr_point = hisi_sas_read32(hisi_hba,
 			COMPL_Q_0_WR_PTR + (0x14 * queue));
 			COMPL_Q_0_WR_PTR + (0x14 * queue));
 
 
@@ -1600,6 +1589,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
 	}
 	}
 
 
 	/* update rd_point */
 	/* update rd_point */
+	cq->rd_point = rd_point;
 	hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
 	hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
 
 
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;

+ 94 - 36
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c

@@ -117,6 +117,8 @@
 #define SL_CONTROL			(PORT_BASE + 0x94)
 #define SL_CONTROL			(PORT_BASE + 0x94)
 #define SL_CONTROL_NOTIFY_EN_OFF	0
 #define SL_CONTROL_NOTIFY_EN_OFF	0
 #define SL_CONTROL_NOTIFY_EN_MSK	(0x1 << SL_CONTROL_NOTIFY_EN_OFF)
 #define SL_CONTROL_NOTIFY_EN_MSK	(0x1 << SL_CONTROL_NOTIFY_EN_OFF)
+#define SL_CONTROL_CTA_OFF		17
+#define SL_CONTROL_CTA_MSK		(0x1 << SL_CONTROL_CTA_OFF)
 #define TX_ID_DWORD0			(PORT_BASE + 0x9c)
 #define TX_ID_DWORD0			(PORT_BASE + 0x9c)
 #define TX_ID_DWORD1			(PORT_BASE + 0xa0)
 #define TX_ID_DWORD1			(PORT_BASE + 0xa0)
 #define TX_ID_DWORD2			(PORT_BASE + 0xa4)
 #define TX_ID_DWORD2			(PORT_BASE + 0xa4)
@@ -124,6 +126,9 @@
 #define TX_ID_DWORD4			(PORT_BASE + 0xaC)
 #define TX_ID_DWORD4			(PORT_BASE + 0xaC)
 #define TX_ID_DWORD5			(PORT_BASE + 0xb0)
 #define TX_ID_DWORD5			(PORT_BASE + 0xb0)
 #define TX_ID_DWORD6			(PORT_BASE + 0xb4)
 #define TX_ID_DWORD6			(PORT_BASE + 0xb4)
+#define TXID_AUTO			(PORT_BASE + 0xb8)
+#define TXID_AUTO_CT3_OFF		1
+#define TXID_AUTO_CT3_MSK		(0x1 << TXID_AUTO_CT3_OFF)
 #define RX_IDAF_DWORD0			(PORT_BASE + 0xc4)
 #define RX_IDAF_DWORD0			(PORT_BASE + 0xc4)
 #define RX_IDAF_DWORD1			(PORT_BASE + 0xc8)
 #define RX_IDAF_DWORD1			(PORT_BASE + 0xc8)
 #define RX_IDAF_DWORD2			(PORT_BASE + 0xcc)
 #define RX_IDAF_DWORD2			(PORT_BASE + 0xcc)
@@ -174,6 +179,10 @@
 /* HW dma structures */
 /* HW dma structures */
 /* Delivery queue header */
 /* Delivery queue header */
 /* dw0 */
 /* dw0 */
+#define CMD_HDR_ABORT_FLAG_OFF		0
+#define CMD_HDR_ABORT_FLAG_MSK		(0x3 << CMD_HDR_ABORT_FLAG_OFF)
+#define CMD_HDR_ABORT_DEVICE_TYPE_OFF	2
+#define CMD_HDR_ABORT_DEVICE_TYPE_MSK	(0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
 #define CMD_HDR_RESP_REPORT_OFF		5
 #define CMD_HDR_RESP_REPORT_OFF		5
 #define CMD_HDR_RESP_REPORT_MSK		(0x1 << CMD_HDR_RESP_REPORT_OFF)
 #define CMD_HDR_RESP_REPORT_MSK		(0x1 << CMD_HDR_RESP_REPORT_OFF)
 #define CMD_HDR_TLR_CTRL_OFF		6
 #define CMD_HDR_TLR_CTRL_OFF		6
@@ -214,6 +223,8 @@
 #define CMD_HDR_DIF_SGL_LEN_MSK		(0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
 #define CMD_HDR_DIF_SGL_LEN_MSK		(0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
 #define CMD_HDR_DATA_SGL_LEN_OFF	16
 #define CMD_HDR_DATA_SGL_LEN_OFF	16
 #define CMD_HDR_DATA_SGL_LEN_MSK	(0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
 #define CMD_HDR_DATA_SGL_LEN_MSK	(0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
+#define CMD_HDR_ABORT_IPTT_OFF		16
+#define CMD_HDR_ABORT_IPTT_MSK		(0xffff << CMD_HDR_ABORT_IPTT_OFF)
 
 
 /* Completion header */
 /* Completion header */
 /* dw0 */
 /* dw0 */
@@ -221,6 +232,13 @@
 #define CMPLT_HDR_RSPNS_XFRD_MSK	(0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
 #define CMPLT_HDR_RSPNS_XFRD_MSK	(0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
 #define CMPLT_HDR_ERX_OFF		12
 #define CMPLT_HDR_ERX_OFF		12
 #define CMPLT_HDR_ERX_MSK		(0x1 << CMPLT_HDR_ERX_OFF)
 #define CMPLT_HDR_ERX_MSK		(0x1 << CMPLT_HDR_ERX_OFF)
+#define CMPLT_HDR_ABORT_STAT_OFF	13
+#define CMPLT_HDR_ABORT_STAT_MSK	(0x7 << CMPLT_HDR_ABORT_STAT_OFF)
+/* abort_stat */
+#define STAT_IO_NOT_VALID		0x1
+#define STAT_IO_NO_DEVICE		0x2
+#define STAT_IO_COMPLETE		0x3
+#define STAT_IO_ABORTED			0x4
 /* dw1 */
 /* dw1 */
 #define CMPLT_HDR_IPTT_OFF		0
 #define CMPLT_HDR_IPTT_OFF		0
 #define CMPLT_HDR_IPTT_MSK		(0xffff << CMPLT_HDR_IPTT_OFF)
 #define CMPLT_HDR_IPTT_MSK		(0xffff << CMPLT_HDR_IPTT_OFF)
@@ -549,25 +567,17 @@ static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
 			__swab32(identify_buffer[0]));
 			__swab32(identify_buffer[0]));
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
-			identify_buffer[2]);
+			__swab32(identify_buffer[1]));
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
-			identify_buffer[1]);
+			__swab32(identify_buffer[2]));
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
-			identify_buffer[4]);
+			__swab32(identify_buffer[3]));
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
-			identify_buffer[3]);
+			__swab32(identify_buffer[4]));
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
 	hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
 			__swab32(identify_buffer[5]));
 			__swab32(identify_buffer[5]));
 }
 }
 
 
-static void init_id_frame_v2_hw(struct hisi_hba *hisi_hba)
-{
-	int i;
-
-	for (i = 0; i < hisi_hba->n_phy; i++)
-		config_id_frame_v2_hw(hisi_hba, i);
-}
-
 static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
 static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
 			     struct hisi_sas_device *sas_dev)
 			     struct hisi_sas_device *sas_dev)
 {
 {
@@ -589,6 +599,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
 		qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
 		qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
 		break;
 		break;
 	case SAS_SATA_DEV:
 	case SAS_SATA_DEV:
+	case SAS_SATA_PENDING:
 		if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
 		if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
 			qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
 			qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
 		else
 		else
@@ -672,9 +683,7 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
 	else
 	else
 		reset_val = 0x7ffff;
 		reset_val = 0x7ffff;
 
 
-	/* Disable all of the DQ */
-	for (i = 0; i < HISI_SAS_MAX_QUEUES; i++)
-		hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
+	hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
 
 
 	/* Disable all of the PHYs */
 	/* Disable all of the PHYs */
 	for (i = 0; i < hisi_hba->n_phy; i++) {
 	for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -810,6 +819,8 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
 		hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
 		hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
 		hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908);
 		hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908);
 		hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
 		hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
+		hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0);
+		hisi_sas_phy_write32(hisi_hba, i, TXID_AUTO, 0x2);
 		hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
 		hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
 		hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
@@ -901,8 +912,6 @@ static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
 	msleep(100);
 	msleep(100);
 	init_reg_v2_hw(hisi_hba);
 	init_reg_v2_hw(hisi_hba);
 
 
-	init_id_frame_v2_hw(hisi_hba);
-
 	return 0;
 	return 0;
 }
 }
 
 
@@ -952,14 +961,8 @@ static void start_phys_v2_hw(unsigned long data)
 
 
 static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
 static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
 {
 {
-	int i;
 	struct timer_list *timer = &hisi_hba->timer;
 	struct timer_list *timer = &hisi_hba->timer;
 
 
-	for (i = 0; i < hisi_hba->n_phy; i++) {
-		hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x6a);
-		hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK);
-	}
-
 	setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba);
 	setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba);
 	mod_timer(timer, jiffies + HZ);
 	mod_timer(timer, jiffies + HZ);
 }
 }
@@ -1010,12 +1013,13 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
 static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
 static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
 {
 {
 	struct device *dev = &hisi_hba->pdev->dev;
 	struct device *dev = &hisi_hba->pdev->dev;
+	struct hisi_sas_dq *dq;
 	u32 r, w;
 	u32 r, w;
 	int queue = hisi_hba->queue;
 	int queue = hisi_hba->queue;
 
 
 	while (1) {
 	while (1) {
-		w = hisi_sas_read32_relaxed(hisi_hba,
-					    DLVRY_Q_0_WR_PTR + (queue * 0x14));
+		dq = &hisi_hba->dq[queue];
+		w = dq->wr_point;
 		r = hisi_sas_read32_relaxed(hisi_hba,
 		r = hisi_sas_read32_relaxed(hisi_hba,
 					    DLVRY_Q_0_RD_PTR + (queue * 0x14));
 					    DLVRY_Q_0_RD_PTR + (queue * 0x14));
 		if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
 		if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
@@ -1038,9 +1042,11 @@ static void start_delivery_v2_hw(struct hisi_hba *hisi_hba)
 {
 {
 	int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
 	int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
 	int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
 	int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
+	struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
 
 
+	dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
 	hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
 	hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
-			 ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS);
+			 dq->wr_point);
 }
 }
 
 
 static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
 static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
@@ -1563,6 +1569,30 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
 		goto out;
 		goto out;
 	}
 	}
 
 
+	/* Use SAS+TMF status codes */
+	switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
+			>> CMPLT_HDR_ABORT_STAT_OFF) {
+	case STAT_IO_ABORTED:
+		/* this io has been aborted by abort command */
+		ts->stat = SAS_ABORTED_TASK;
+		goto out;
+	case STAT_IO_COMPLETE:
+		/* internal abort command complete */
+		ts->stat = TMF_RESP_FUNC_COMPLETE;
+		goto out;
+	case STAT_IO_NO_DEVICE:
+		ts->stat = TMF_RESP_FUNC_COMPLETE;
+		goto out;
+	case STAT_IO_NOT_VALID:
+		/* abort single io, controller don't find
+		 * the io need to abort
+		 */
+		ts->stat = TMF_RESP_FUNC_FAILED;
+		goto out;
+	default:
+		break;
+	}
+
 	if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
 	if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
 		(!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
 		(!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
 
 
@@ -1775,6 +1805,32 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
 	return 0;
 	return 0;
 }
 }
 
 
+static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
+		struct hisi_sas_slot *slot,
+		int device_id, int abort_flag, int tag_to_abort)
+{
+	struct sas_task *task = slot->task;
+	struct domain_device *dev = task->dev;
+	struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
+	struct hisi_sas_port *port = slot->port;
+
+	/* dw0 */
+	hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/
+			       (port->id << CMD_HDR_PORT_OFF) |
+			       ((dev_is_sata(dev) ? 1:0) <<
+				CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
+			       (abort_flag << CMD_HDR_ABORT_FLAG_OFF));
+
+	/* dw1 */
+	hdr->dw1 = cpu_to_le32(device_id << CMD_HDR_DEV_ID_OFF);
+
+	/* dw7 */
+	hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
+	hdr->transfer_tags = cpu_to_le32(slot->idx);
+
+	return 0;
+}
+
 static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
 static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
 {
 {
 	int i, res = 0;
 	int i, res = 0;
@@ -1818,9 +1874,6 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
 		frame_rcvd[i] = __swab32(idaf);
 		frame_rcvd[i] = __swab32(idaf);
 	}
 	}
 
 
-	/* Get the linkrates */
-	link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
-	link_rate = (link_rate >> (phy_no * 4)) & 0xf;
 	sas_phy->linkrate = link_rate;
 	sas_phy->linkrate = link_rate;
 	hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
 	hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
 						HARD_PHY_LINKRATE);
 						HARD_PHY_LINKRATE);
@@ -1855,16 +1908,21 @@ end:
 static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
 static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
 {
 {
 	int res = 0;
 	int res = 0;
-	u32 phy_cfg, phy_state;
+	u32 phy_state, sl_ctrl, txid_auto;
 
 
 	hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
 	hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
 
 
-	phy_cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
-
 	phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
 	phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
-
 	hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
 	hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
 
 
+	sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
+	hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
+			     sl_ctrl & ~SL_CONTROL_CTA_MSK);
+
+	txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
+	hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
+			     txid_auto | TXID_AUTO_CT3_MSK);
+
 	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
 	hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
 	hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
 	hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
 
 
@@ -1986,7 +2044,7 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
 	struct hisi_sas_slot *slot;
 	struct hisi_sas_slot *slot;
 	struct hisi_sas_itct *itct;
 	struct hisi_sas_itct *itct;
 	struct hisi_sas_complete_v2_hdr *complete_queue;
 	struct hisi_sas_complete_v2_hdr *complete_queue;
-	u32 irq_value, rd_point, wr_point, dev_id;
+	u32 irq_value, rd_point = cq->rd_point, wr_point, dev_id;
 	int queue = cq->id;
 	int queue = cq->id;
 
 
 	complete_queue = hisi_hba->complete_hdr[queue];
 	complete_queue = hisi_hba->complete_hdr[queue];
@@ -1994,8 +2052,6 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
 
 
 	hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
 	hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
 
 
-	rd_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_RD_PTR +
-				   (0x14 * queue));
 	wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
 	wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
 				   (0x14 * queue));
 				   (0x14 * queue));
 
 
@@ -2043,6 +2099,7 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
 	}
 	}
 
 
 	/* update rd_point */
 	/* update rd_point */
+	cq->rd_point = rd_point;
 	hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
 	hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
@@ -2239,6 +2296,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
 	.prep_smp = prep_smp_v2_hw,
 	.prep_smp = prep_smp_v2_hw,
 	.prep_ssp = prep_ssp_v2_hw,
 	.prep_ssp = prep_ssp_v2_hw,
 	.prep_stp = prep_ata_v2_hw,
 	.prep_stp = prep_ata_v2_hw,
+	.prep_abort = prep_abort_v2_hw,
 	.get_free_slot = get_free_slot_v2_hw,
 	.get_free_slot = get_free_slot_v2_hw,
 	.start_delivery = start_delivery_v2_hw,
 	.start_delivery = start_delivery_v2_hw,
 	.slot_complete = slot_complete_v2_hw,
 	.slot_complete = slot_complete_v2_hw,

+ 8 - 4
drivers/scsi/hosts.c

@@ -246,10 +246,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
 
 
 	shost->dma_dev = dma_dev;
 	shost->dma_dev = dma_dev;
 
 
-	error = device_add(&shost->shost_gendev);
-	if (error)
-		goto out_destroy_freelist;
-
 	/*
 	/*
 	 * Increase usage count temporarily here so that calling
 	 * Increase usage count temporarily here so that calling
 	 * scsi_autopm_put_host() will trigger runtime idle if there is
 	 * scsi_autopm_put_host() will trigger runtime idle if there is
@@ -260,6 +256,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
 	pm_runtime_enable(&shost->shost_gendev);
 	pm_runtime_enable(&shost->shost_gendev);
 	device_enable_async_suspend(&shost->shost_gendev);
 	device_enable_async_suspend(&shost->shost_gendev);
 
 
+	error = device_add(&shost->shost_gendev);
+	if (error)
+		goto out_destroy_freelist;
+
 	scsi_host_set_state(shost, SHOST_RUNNING);
 	scsi_host_set_state(shost, SHOST_RUNNING);
 	get_device(shost->shost_gendev.parent);
 	get_device(shost->shost_gendev.parent);
 
 
@@ -309,6 +309,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
  out_del_gendev:
  out_del_gendev:
 	device_del(&shost->shost_gendev);
 	device_del(&shost->shost_gendev);
  out_destroy_freelist:
  out_destroy_freelist:
+	device_disable_async_suspend(&shost->shost_gendev);
+	pm_runtime_disable(&shost->shost_gendev);
+	pm_runtime_set_suspended(&shost->shost_gendev);
+	pm_runtime_put_noidle(&shost->shost_gendev);
 	scsi_destroy_command_freelist(shost);
 	scsi_destroy_command_freelist(shost);
  out_destroy_tags:
  out_destroy_tags:
 	if (shost_use_blk_mq(shost))
 	if (shost_use_blk_mq(shost))

+ 112 - 27
drivers/scsi/hpsa.c

@@ -293,6 +293,8 @@ static int detect_controller_lockup(struct ctlr_info *h);
 static void hpsa_disable_rld_caching(struct ctlr_info *h);
 static void hpsa_disable_rld_caching(struct ctlr_info *h);
 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
 	struct ReportExtendedLUNdata *buf, int bufsize);
 	struct ReportExtendedLUNdata *buf, int bufsize);
+static bool hpsa_vpd_page_supported(struct ctlr_info *h,
+	unsigned char scsi3addr[], u8 page);
 static int hpsa_luns_changed(struct ctlr_info *h);
 static int hpsa_luns_changed(struct ctlr_info *h);
 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
 			       struct hpsa_scsi_dev_t *dev,
 			       struct hpsa_scsi_dev_t *dev,
@@ -2388,7 +2390,8 @@ static void hpsa_cmd_free_and_done(struct ctlr_info *h,
 		struct CommandList *c, struct scsi_cmnd *cmd)
 		struct CommandList *c, struct scsi_cmnd *cmd)
 {
 {
 	hpsa_cmd_resolve_and_free(h, c);
 	hpsa_cmd_resolve_and_free(h, c);
-	cmd->scsi_done(cmd);
+	if (cmd && cmd->scsi_done)
+		cmd->scsi_done(cmd);
 }
 }
 
 
 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
@@ -2489,7 +2492,17 @@ static void complete_scsi_command(struct CommandList *cp)
 	ei = cp->err_info;
 	ei = cp->err_info;
 	cmd = cp->scsi_cmd;
 	cmd = cp->scsi_cmd;
 	h = cp->h;
 	h = cp->h;
+
+	if (!cmd->device) {
+		cmd->result = DID_NO_CONNECT << 16;
+		return hpsa_cmd_free_and_done(h, cp, cmd);
+	}
+
 	dev = cmd->device->hostdata;
 	dev = cmd->device->hostdata;
+	if (!dev) {
+		cmd->result = DID_NO_CONNECT << 16;
+		return hpsa_cmd_free_and_done(h, cp, cmd);
+	}
 	c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
 	c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
 
 
 	scsi_dma_unmap(cmd); /* undo the DMA mappings */
 	scsi_dma_unmap(cmd); /* undo the DMA mappings */
@@ -2504,8 +2517,15 @@ static void complete_scsi_command(struct CommandList *cp)
 	cmd->result = (DID_OK << 16); 		/* host byte */
 	cmd->result = (DID_OK << 16); 		/* host byte */
 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
 
 
-	if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
-		atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
+	if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
+		if (dev->physical_device && dev->expose_device &&
+			dev->removed) {
+			cmd->result = DID_NO_CONNECT << 16;
+			return hpsa_cmd_free_and_done(h, cp, cmd);
+		}
+		if (likely(cp->phys_disk != NULL))
+			atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
+	}
 
 
 	/*
 	/*
 	 * We check for lockup status here as it may be set for
 	 * We check for lockup status here as it may be set for
@@ -3074,11 +3094,19 @@ static void hpsa_get_raid_level(struct ctlr_info *h,
 	buf = kzalloc(64, GFP_KERNEL);
 	buf = kzalloc(64, GFP_KERNEL);
 	if (!buf)
 	if (!buf)
 		return;
 		return;
-	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
+
+	if (!hpsa_vpd_page_supported(h, scsi3addr,
+		HPSA_VPD_LV_DEVICE_GEOMETRY))
+		goto exit;
+
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
+		HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
+
 	if (rc == 0)
 	if (rc == 0)
 		*raid_level = buf[8];
 		*raid_level = buf[8];
 	if (*raid_level > RAID_UNKNOWN)
 	if (*raid_level > RAID_UNKNOWN)
 		*raid_level = RAID_UNKNOWN;
 		*raid_level = RAID_UNKNOWN;
+exit:
 	kfree(buf);
 	kfree(buf);
 	return;
 	return;
 }
 }
@@ -3436,7 +3464,7 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
 }
 }
 
 
 /* Get a device id from inquiry page 0x83 */
 /* Get a device id from inquiry page 0x83 */
-static int hpsa_vpd_page_supported(struct ctlr_info *h,
+static bool hpsa_vpd_page_supported(struct ctlr_info *h,
 	unsigned char scsi3addr[], u8 page)
 	unsigned char scsi3addr[], u8 page)
 {
 {
 	int rc;
 	int rc;
@@ -3446,7 +3474,7 @@ static int hpsa_vpd_page_supported(struct ctlr_info *h,
 
 
 	buf = kzalloc(256, GFP_KERNEL);
 	buf = kzalloc(256, GFP_KERNEL);
 	if (!buf)
 	if (!buf)
-		return 0;
+		return false;
 
 
 	/* Get the size of the page list first */
 	/* Get the size of the page list first */
 	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
 	rc = hpsa_scsi_do_inquiry(h, scsi3addr,
@@ -3473,10 +3501,10 @@ static int hpsa_vpd_page_supported(struct ctlr_info *h,
 			goto exit_supported;
 			goto exit_supported;
 exit_unsupported:
 exit_unsupported:
 	kfree(buf);
 	kfree(buf);
-	return 0;
+	return false;
 exit_supported:
 exit_supported:
 	kfree(buf);
 	kfree(buf);
-	return 1;
+	return true;
 }
 }
 
 
 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
@@ -3525,18 +3553,25 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
 	int rc;
 	int rc;
 	unsigned char *buf;
 	unsigned char *buf;
 
 
-	if (buflen > 16)
-		buflen = 16;
+	/* Does controller have VPD for device id? */
+	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
+		return 1; /* not supported */
+
 	buf = kzalloc(64, GFP_KERNEL);
 	buf = kzalloc(64, GFP_KERNEL);
 	if (!buf)
 	if (!buf)
 		return -ENOMEM;
 		return -ENOMEM;
-	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
-	if (rc == 0)
-		memcpy(device_id, &buf[index], buflen);
+
+	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
+					HPSA_VPD_LV_DEVICE_ID, buf, 64);
+	if (rc == 0) {
+		if (buflen > 16)
+			buflen = 16;
+		memcpy(device_id, &buf[8], buflen);
+	}
 
 
 	kfree(buf);
 	kfree(buf);
 
 
-	return rc != 0;
+	return rc; /*0 - got id,  otherwise, didn't */
 }
 }
 
 
 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
@@ -3807,8 +3842,15 @@ static int hpsa_update_device_info(struct ctlr_info *h,
 		sizeof(this_device->model));
 		sizeof(this_device->model));
 	memset(this_device->device_id, 0,
 	memset(this_device->device_id, 0,
 		sizeof(this_device->device_id));
 		sizeof(this_device->device_id));
-	hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
-		sizeof(this_device->device_id));
+	if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
+		sizeof(this_device->device_id)))
+		dev_err(&h->pdev->dev,
+			"hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
+			h->ctlr, __func__,
+			h->scsi_host->host_no,
+			this_device->target, this_device->lun,
+			scsi_device_type(this_device->devtype),
+			this_device->model);
 
 
 	if ((this_device->devtype == TYPE_DISK ||
 	if ((this_device->devtype == TYPE_DISK ||
 		this_device->devtype == TYPE_ZBC) &&
 		this_device->devtype == TYPE_ZBC) &&
@@ -4034,7 +4076,17 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
 		struct bmic_identify_physical_device *id_phys)
 		struct bmic_identify_physical_device *id_phys)
 {
 {
 	int rc;
 	int rc;
-	struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
+	struct ext_report_lun_entry *rle;
+
+	/*
+	 * external targets don't support BMIC
+	 */
+	if (dev->external) {
+		dev->queue_depth = 7;
+		return;
+	}
+
+	rle = &rlep->LUN[rle_index];
 
 
 	dev->ioaccel_handle = rle->ioaccel_handle;
 	dev->ioaccel_handle = rle->ioaccel_handle;
 	if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
 	if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
@@ -4270,6 +4322,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
 		lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
 		lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
 			i, nphysicals, nlogicals, physdev_list, logdev_list);
 			i, nphysicals, nlogicals, physdev_list, logdev_list);
 
 
+		/* Determine if this is a lun from an external target array */
+		tmpdevice->external =
+			figure_external_status(h, raid_ctlr_position, i,
+						nphysicals, nlocal_logicals);
+
 		/*
 		/*
 		 * Skip over some devices such as a spare.
 		 * Skip over some devices such as a spare.
 		 */
 		 */
@@ -4295,11 +4352,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
 			continue;
 			continue;
 		}
 		}
 
 
-		/* Determine if this is a lun from an external target array */
-		tmpdevice->external =
-			figure_external_status(h, raid_ctlr_position, i,
-						nphysicals, nlocal_logicals);
-
 		figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
 		figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
 		hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
 		hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
 		this_device = currentsd[ncurrent];
 		this_device = currentsd[ncurrent];
@@ -4513,7 +4565,9 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
 	case READ_6:
 	case READ_6:
 	case READ_12:
 	case READ_12:
 		if (*cdb_len == 6) {
 		if (*cdb_len == 6) {
-			block = get_unaligned_be16(&cdb[2]);
+			block = (((cdb[1] & 0x1F) << 16) |
+				(cdb[2] << 8) |
+				cdb[3]);
 			block_cnt = cdb[4];
 			block_cnt = cdb[4];
 			if (block_cnt == 0)
 			if (block_cnt == 0)
 				block_cnt = 256;
 				block_cnt = 256;
@@ -4638,6 +4692,9 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
 	struct scsi_cmnd *cmd = c->scsi_cmd;
 	struct scsi_cmnd *cmd = c->scsi_cmd;
 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
 
 
+	if (!dev)
+		return -1;
+
 	c->phys_disk = dev;
 	c->phys_disk = dev;
 
 
 	return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
 	return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
@@ -4670,9 +4727,11 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h,
 	 */
 	 */
 	switch (cmd->cmnd[0]) {
 	switch (cmd->cmnd[0]) {
 	/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
 	/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
-	case WRITE_6:
 	case READ_6:
 	case READ_6:
-		first_block = get_unaligned_be16(&cmd->cmnd[2]);
+	case WRITE_6:
+		first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
+				(cmd->cmnd[2] << 8) |
+				cmd->cmnd[3]);
 		break;
 		break;
 	case WRITE_10:
 	case WRITE_10:
 	case READ_10:
 	case READ_10:
@@ -4714,6 +4773,12 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
 	u32 len;
 	u32 len;
 	u32 total_len = 0;
 	u32 total_len = 0;
 
 
+	if (!cmd->device)
+		return -1;
+
+	if (!cmd->device->hostdata)
+		return -1;
+
 	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
 	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
 
 
 	if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
 	if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
@@ -4822,6 +4887,12 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
 	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
 	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
 	u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
 {
 {
+	if (!c->scsi_cmd->device)
+		return -1;
+
+	if (!c->scsi_cmd->device->hostdata)
+		return -1;
+
 	/* Try to honor the device's queue depth */
 	/* Try to honor the device's queue depth */
 	if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
 	if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
 					phys_disk->queue_depth) {
 					phys_disk->queue_depth) {
@@ -4902,12 +4973,17 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
 #endif
 #endif
 	int offload_to_mirror;
 	int offload_to_mirror;
 
 
+	if (!dev)
+		return -1;
+
 	/* check for valid opcode, get LBA and block count */
 	/* check for valid opcode, get LBA and block count */
 	switch (cmd->cmnd[0]) {
 	switch (cmd->cmnd[0]) {
 	case WRITE_6:
 	case WRITE_6:
 		is_write = 1;
 		is_write = 1;
 	case READ_6:
 	case READ_6:
-		first_block = get_unaligned_be16(&cmd->cmnd[2]);
+		first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
+				(cmd->cmnd[2] << 8) |
+				cmd->cmnd[3]);
 		block_cnt = cmd->cmnd[4];
 		block_cnt = cmd->cmnd[4];
 		if (block_cnt == 0)
 		if (block_cnt == 0)
 			block_cnt = 256;
 			block_cnt = 256;
@@ -5314,6 +5390,9 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
 	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
 	int rc = IO_ACCEL_INELIGIBLE;
 	int rc = IO_ACCEL_INELIGIBLE;
 
 
+	if (!dev)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
 	cmd->host_scribble = (unsigned char *) c;
 	cmd->host_scribble = (unsigned char *) c;
 
 
 	if (dev->offload_enabled) {
 	if (dev->offload_enabled) {
@@ -5852,6 +5931,9 @@ static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
 	struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
 	struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
 	struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
 	struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
 
 
+	if (!dev)
+		return;
+
 	/*
 	/*
 	 * We're overlaying struct hpsa_tmf_struct on top of something which
 	 * We're overlaying struct hpsa_tmf_struct on top of something which
 	 * was allocated as a struct io_accel2_cmd, so we better be sure it
 	 * was allocated as a struct io_accel2_cmd, so we better be sure it
@@ -5935,7 +6017,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
 			"Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
 			"Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
 			psa[0], psa[1], psa[2], psa[3],
 			psa[0], psa[1], psa[2], psa[3],
 			psa[4], psa[5], psa[6], psa[7]);
 			psa[4], psa[5], psa[6], psa[7]);
-	rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
+	rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
 	if (rc != 0) {
 	if (rc != 0) {
 		dev_warn(&h->pdev->dev,
 		dev_warn(&h->pdev->dev,
 			"Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
 			"Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
@@ -5972,6 +6054,9 @@ static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
 	struct io_accel2_cmd *c2;
 	struct io_accel2_cmd *c2;
 
 
 	dev = abort->scsi_cmd->device->hostdata;
 	dev = abort->scsi_cmd->device->hostdata;
+	if (!dev)
+		return -1;
+
 	if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
 	if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
 		return -1;
 		return -1;
 
 

+ 0 - 1
drivers/scsi/hpsa.h

@@ -312,7 +312,6 @@ struct offline_device_entry {
 #define HPSA_DEVICE_RESET_MSG 1
 #define HPSA_DEVICE_RESET_MSG 1
 #define HPSA_RESET_TYPE_CONTROLLER 0x00
 #define HPSA_RESET_TYPE_CONTROLLER 0x00
 #define HPSA_RESET_TYPE_BUS 0x01
 #define HPSA_RESET_TYPE_BUS 0x01
-#define HPSA_RESET_TYPE_TARGET 0x03
 #define HPSA_RESET_TYPE_LUN 0x04
 #define HPSA_RESET_TYPE_LUN 0x04
 #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
 #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
 #define HPSA_MSG_SEND_RETRY_LIMIT 10
 #define HPSA_MSG_SEND_RETRY_LIMIT 10

+ 1 - 0
drivers/scsi/hpsa_cmd.h

@@ -157,6 +157,7 @@
 
 
 /* VPD Inquiry types */
 /* VPD Inquiry types */
 #define HPSA_VPD_SUPPORTED_PAGES        0x00
 #define HPSA_VPD_SUPPORTED_PAGES        0x00
+#define HPSA_VPD_LV_DEVICE_ID           0x83
 #define HPSA_VPD_LV_DEVICE_GEOMETRY     0xC1
 #define HPSA_VPD_LV_DEVICE_GEOMETRY     0xC1
 #define HPSA_VPD_LV_IOACCEL_STATUS      0xC2
 #define HPSA_VPD_LV_IOACCEL_STATUS      0xC2
 #define HPSA_VPD_LV_STATUS		0xC3
 #define HPSA_VPD_LV_STATUS		0xC3

+ 11 - 1
drivers/scsi/ibmvscsi/ibmvfc.c

@@ -52,6 +52,7 @@ static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
+static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
 static LIST_HEAD(ibmvfc_head);
 static LIST_HEAD(ibmvfc_head);
 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
 static struct scsi_transport_template *ibmvfc_transport_template;
 static struct scsi_transport_template *ibmvfc_transport_template;
@@ -86,6 +87,9 @@ MODULE_PARM_DESC(debug, "Enable driver debug information. "
 module_param_named(log_level, log_level, uint, 0);
 module_param_named(log_level, log_level, uint, 0);
 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
 		 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
 		 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
+module_param_named(cls3_error, cls3_error, uint, 0);
+MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
+		 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
 
 
 static const struct {
 static const struct {
 	u16 status;
 	u16 status;
@@ -717,7 +721,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
 	spin_lock_irqsave(vhost->host->host_lock, flags);
 	spin_lock_irqsave(vhost->host->host_lock, flags);
 	vhost->state = IBMVFC_NO_CRQ;
 	vhost->state = IBMVFC_NO_CRQ;
 	vhost->logged_in = 0;
 	vhost->logged_in = 0;
-	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
 
 
 	/* Clean out the queue */
 	/* Clean out the queue */
 	memset(crq->msgs, 0, PAGE_SIZE);
 	memset(crq->msgs, 0, PAGE_SIZE);
@@ -1335,6 +1338,9 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
 	struct srp_direct_buf *data = &vfc_cmd->ioba;
 	struct srp_direct_buf *data = &vfc_cmd->ioba;
 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
 
 
+	if (cls3_error)
+		vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
+
 	sg_mapped = scsi_dma_map(scmd);
 	sg_mapped = scsi_dma_map(scmd);
 	if (!sg_mapped) {
 	if (!sg_mapped) {
 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
@@ -3381,6 +3387,10 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
 	prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
 	prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
 	prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
 	prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
 	prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
 	prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
+	prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
+
+	if (cls3_error)
+		prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
 
 
 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {

+ 1 - 0
drivers/scsi/ibmvscsi/ibmvfc.h

@@ -54,6 +54,7 @@
 #define IBMVFC_DEV_LOSS_TMO		(5 * 60)
 #define IBMVFC_DEV_LOSS_TMO		(5 * 60)
 #define IBMVFC_DEFAULT_LOG_LEVEL	2
 #define IBMVFC_DEFAULT_LOG_LEVEL	2
 #define IBMVFC_MAX_CDB_LEN		16
 #define IBMVFC_MAX_CDB_LEN		16
+#define IBMVFC_CLS3_ERROR		0
 
 
 /*
 /*
  * Ensure we have resources for ERP and initialization:
  * Ensure we have resources for ERP and initialization:

+ 7 - 30
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c

@@ -1606,8 +1606,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
 
 
 	if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
 	if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
 		list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
 		list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
-			pr_debug("send_messages cmd %p\n", cmd);
-
 			iue = cmd->iue;
 			iue = cmd->iue;
 
 
 			crq->valid = VALID_CMD_RESP_EL;
 			crq->valid = VALID_CMD_RESP_EL;
@@ -1934,6 +1932,8 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
 	/*
 	/*
 	 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
 	 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
 	 */
 	 */
+	target_wait_for_sess_cmds(se_sess);
+	transport_deregister_session_configfs(se_sess);
 	transport_deregister_session(se_sess);
 	transport_deregister_session(se_sess);
 	tport->ibmv_nexus = NULL;
 	tport->ibmv_nexus = NULL;
 	kfree(nexus);
 	kfree(nexus);
@@ -1978,7 +1978,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
 		reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
 		reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
 	else if (fmt->buffers & (~SUPPORTED_FORMATS))
 	else if (fmt->buffers & (~SUPPORTED_FORMATS))
 		reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
 		reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
-	else if ((fmt->buffers | SUPPORTED_FORMATS) == 0)
+	else if ((fmt->buffers & SUPPORTED_FORMATS) == 0)
 		reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
 		reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
 
 
 	if (vscsi->state == SRP_PROCESSING)
 	if (vscsi->state == SRP_PROCESSING)
@@ -2554,10 +2554,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
 
 
 	srp->lun.scsi_lun[0] &= 0x3f;
 	srp->lun.scsi_lun[0] &= 0x3f;
 
 
-	pr_debug("calling submit_cmd, se_cmd %p, lun 0x%llx, cdb 0x%x, attr:%d\n",
-		 &cmd->se_cmd, scsilun_to_int(&srp->lun), (int)srp->cdb[0],
-		 attr);
-
 	rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
 	rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
 			       cmd->sense_buf, scsilun_to_int(&srp->lun),
 			       cmd->sense_buf, scsilun_to_int(&srp->lun),
 			       data_len, attr, dir, 0);
 			       data_len, attr, dir, 0);
@@ -3142,8 +3138,6 @@ static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
 	long tx_len;
 	long tx_len;
 	long rc = 0;
 	long rc = 0;
 
 
-	pr_debug("rdma: dir %d, bytes 0x%x\n", dir, bytes);
-
 	if (bytes == 0)
 	if (bytes == 0)
 		return 0;
 		return 0;
 
 
@@ -3192,12 +3186,6 @@ static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
 					 vscsi->dds.window[LOCAL].liobn,
 					 vscsi->dds.window[LOCAL].liobn,
 					 server_ioba);
 					 server_ioba);
 		} else {
 		} else {
-			/* write to client */
-			struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
-
-			if (!READ_CMD(srp->cdb))
-				print_hex_dump_bytes(" data:", DUMP_PREFIX_NONE,
-						     sg_virt(sgp), buf_len);
 			/* The h_copy_rdma will cause phyp, running in another
 			/* The h_copy_rdma will cause phyp, running in another
 			 * partition, to read memory, so we need to make sure
 			 * partition, to read memory, so we need to make sure
 			 * the data has been written out, hence these syncs.
 			 * the data has been written out, hence these syncs.
@@ -3322,12 +3310,9 @@ cmd_work:
 				rc = ibmvscsis_trans_event(vscsi, crq);
 				rc = ibmvscsis_trans_event(vscsi, crq);
 			} else if (vscsi->flags & TRANS_EVENT) {
 			} else if (vscsi->flags & TRANS_EVENT) {
 				/*
 				/*
-				 * if a tranport event has occurred leave
+				 * if a transport event has occurred leave
 				 * everything but transport events on the queue
 				 * everything but transport events on the queue
-				 */
-				pr_debug("handle_crq, ignoring\n");
-
-				/*
+				 *
 				 * need to decrement the queue index so we can
 				 * need to decrement the queue index so we can
 				 * look at the elment again
 				 * look at the elment again
 				 */
 				 */
@@ -3461,6 +3446,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
 	vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
 	vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
 					 DMA_BIDIRECTIONAL);
 					 DMA_BIDIRECTIONAL);
 	if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
 	if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
+		rc = -ENOMEM;
 		dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
 		dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
 		goto free_buf;
 		goto free_buf;
 	}
 	}
@@ -3693,12 +3679,9 @@ static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
 						 se_cmd);
 						 se_cmd);
 	struct scsi_info *vscsi = cmd->adapter;
 	struct scsi_info *vscsi = cmd->adapter;
 
 
-	pr_debug("release_cmd %p, flags %d\n", se_cmd, cmd->flags);
-
 	spin_lock_bh(&vscsi->intr_lock);
 	spin_lock_bh(&vscsi->intr_lock);
 	/* Remove from active_q */
 	/* Remove from active_q */
-	list_del(&cmd->list);
-	list_add_tail(&cmd->list, &vscsi->waiting_rsp);
+	list_move_tail(&cmd->list, &vscsi->waiting_rsp);
 	ibmvscsis_send_messages(vscsi);
 	ibmvscsis_send_messages(vscsi);
 	spin_unlock_bh(&vscsi->intr_lock);
 	spin_unlock_bh(&vscsi->intr_lock);
 }
 }
@@ -3715,9 +3698,6 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
 	struct iu_entry *iue = cmd->iue;
 	struct iu_entry *iue = cmd->iue;
 	int rc;
 	int rc;
 
 
-	pr_debug("write_pending, se_cmd %p, length 0x%x\n",
-		 se_cmd, se_cmd->data_length);
-
 	rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
 	rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
 			       1, 1);
 			       1, 1);
 	if (rc) {
 	if (rc) {
@@ -3756,9 +3736,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
 	uint len = 0;
 	uint len = 0;
 	int rc;
 	int rc;
 
 
-	pr_debug("queue_data_in, se_cmd %p, length 0x%x\n",
-		 se_cmd, se_cmd->data_length);
-
 	rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
 	rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
 			       1);
 			       1);
 	if (rc) {
 	if (rc) {

+ 0 - 2302
drivers/scsi/in2000.c

@@ -1,2302 +0,0 @@
-/*
- *    in2000.c -  Linux device driver for the
- *                Always IN2000 ISA SCSI card.
- *
- * Copyright (c) 1996 John Shifflett, GeoLog Consulting
- *    john@geolog.com
- *    jshiffle@netcom.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * For the avoidance of doubt the "preferred form" of this code is one which
- * is in an open non patent encumbered format. Where cryptographic key signing
- * forms part of the process of creating an executable the information
- * including keys needed to generate an equivalently functional executable
- * are deemed to be part of the source code.
- *
- * Drew Eckhardt's excellent 'Generic NCR5380' sources provided
- * much of the inspiration and some of the code for this driver.
- * The Linux IN2000 driver distributed in the Linux kernels through
- * version 1.2.13 was an extremely valuable reference on the arcane
- * (and still mysterious) workings of the IN2000's fifo. It also
- * is where I lifted in2000_biosparam(), the gist of the card
- * detection scheme, and other bits of code. Many thanks to the
- * talented and courageous people who wrote, contributed to, and
- * maintained that driver (including Brad McLean, Shaun Savage,
- * Bill Earnest, Larry Doolittle, Roger Sunshine, John Luckey,
- * Matt Postiff, Peter Lu, zerucha@shell.portal.com, and Eric
- * Youngdale). I should also mention the driver written by
- * Hamish Macdonald for the (GASP!) Amiga A2091 card, included
- * in the Linux-m68k distribution; it gave me a good initial
- * understanding of the proper way to run a WD33c93 chip, and I
- * ended up stealing lots of code from it.
- *
- * _This_ driver is (I feel) an improvement over the old one in
- * several respects:
- *    -  All problems relating to the data size of a SCSI request are
- *          gone (as far as I know). The old driver couldn't handle
- *          swapping to partitions because that involved 4k blocks, nor
- *          could it deal with the st.c tape driver unmodified, because
- *          that usually involved 4k - 32k blocks. The old driver never
- *          quite got away from a morbid dependence on 2k block sizes -
- *          which of course is the size of the card's fifo.
- *
- *    -  Target Disconnection/Reconnection is now supported. Any
- *          system with more than one device active on the SCSI bus
- *          will benefit from this. The driver defaults to what I'm
- *          calling 'adaptive disconnect' - meaning that each command
- *          is evaluated individually as to whether or not it should
- *          be run with the option to disconnect/reselect (if the
- *          device chooses), or as a "SCSI-bus-hog".
- *
- *    -  Synchronous data transfers are now supported. Because there
- *          are a few devices (and many improperly terminated systems)
- *          that choke when doing sync, the default is sync DISABLED
- *          for all devices. This faster protocol can (and should!)
- *          be enabled on selected devices via the command-line.
- *
- *    -  Runtime operating parameters can now be specified through
- *       either the LILO or the 'insmod' command line. For LILO do:
- *          "in2000=blah,blah,blah"
- *       and with insmod go like:
- *          "insmod /usr/src/linux/modules/in2000.o setup_strings=blah,blah"
- *       The defaults should be good for most people. See the comment
- *       for 'setup_strings' below for more details.
- *
- *    -  The old driver relied exclusively on what the Western Digital
- *          docs call "Combination Level 2 Commands", which are a great
- *          idea in that the CPU is relieved of a lot of interrupt
- *          overhead. However, by accepting a certain (user-settable)
- *          amount of additional interrupts, this driver achieves
- *          better control over the SCSI bus, and data transfers are
- *          almost as fast while being much easier to define, track,
- *          and debug.
- *
- *    -  You can force detection of a card whose BIOS has been disabled.
- *
- *    -  Multiple IN2000 cards might almost be supported. I've tried to
- *       keep it in mind, but have no way to test...
- *
- *
- * TODO:
- *       tagged queuing. multiple cards.
- *
- *
- * NOTE:
- *       When using this or any other SCSI driver as a module, you'll
- *       find that with the stock kernel, at most _two_ SCSI hard
- *       drives will be linked into the device list (ie, usable).
- *       If your IN2000 card has more than 2 disks on its bus, you
- *       might want to change the define of 'SD_EXTRA_DEVS' in the
- *       'hosts.h' file from 2 to whatever is appropriate. It took
- *       me a while to track down this surprisingly obscure and
- *       undocumented little "feature".
- *
- *
- * People with bug reports, wish-lists, complaints, comments,
- * or improvements are asked to pah-leeez email me (John Shifflett)
- * at john@geolog.com or jshiffle@netcom.com! I'm anxious to get
- * this thing into as good a shape as possible, and I'm positive
- * there are lots of lurking bugs and "Stupid Places".
- *
- * Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk>
- *	- Using new_eh handler
- *	- Hopefully got all the locking right again
- *	See "FIXME" notes for items that could do with more work
- */
-
-#include <linux/module.h>
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/proc_fs.h>
-#include <linux/ioport.h>
-#include <linux/stat.h>
-
-#include <asm/io.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-
-#define IN2000_VERSION    "1.33-2.5"
-#define IN2000_DATE       "2002/11/03"
-
-#include "in2000.h"
-
-
-/*
- * 'setup_strings' is a single string used to pass operating parameters and
- * settings from the kernel/module command-line to the driver. 'setup_args[]'
- * is an array of strings that define the compile-time default values for
- * these settings. If Linux boots with a LILO or insmod command-line, those
- * settings are combined with 'setup_args[]'. Note that LILO command-lines
- * are prefixed with "in2000=" while insmod uses a "setup_strings=" prefix.
- * The driver recognizes the following keywords (lower case required) and
- * arguments:
- *
- * -  ioport:addr    -Where addr is IO address of a (usually ROM-less) card.
- * -  noreset        -No optional args. Prevents SCSI bus reset at boot time.
- * -  nosync:x       -x is a bitmask where the 1st 7 bits correspond with
- *                    the 7 possible SCSI devices (bit 0 for device #0, etc).
- *                    Set a bit to PREVENT sync negotiation on that device.
- *                    The driver default is sync DISABLED on all devices.
- * -  period:ns      -ns is the minimum # of nanoseconds in a SCSI data transfer
- *                    period. Default is 500; acceptable values are 250 - 1000.
- * -  disconnect:x   -x = 0 to never allow disconnects, 2 to always allow them.
- *                    x = 1 does 'adaptive' disconnects, which is the default
- *                    and generally the best choice.
- * -  debug:x        -If 'DEBUGGING_ON' is defined, x is a bitmask that causes
- *                    various types of debug output to printed - see the DB_xxx
- *                    defines in in2000.h
- * -  proc:x         -If 'PROC_INTERFACE' is defined, x is a bitmask that
- *                    determines how the /proc interface works and what it
- *                    does - see the PR_xxx defines in in2000.h
- *
- * Syntax Notes:
- * -  Numeric arguments can be decimal or the '0x' form of hex notation. There
- *    _must_ be a colon between a keyword and its numeric argument, with no
- *    spaces.
- * -  Keywords are separated by commas, no spaces, in the standard kernel
- *    command-line manner.
- * -  A keyword in the 'nth' comma-separated command-line member will overwrite
- *    the 'nth' element of setup_args[]. A blank command-line member (in
- *    other words, a comma with no preceding keyword) will _not_ overwrite
- *    the corresponding setup_args[] element.
- *
- * A few LILO examples (for insmod, use 'setup_strings' instead of 'in2000'):
- * -  in2000=ioport:0x220,noreset
- * -  in2000=period:250,disconnect:2,nosync:0x03
- * -  in2000=debug:0x1e
- * -  in2000=proc:3
- */
-
-/* Normally, no defaults are specified... */
-static char *setup_args[] = { "", "", "", "", "", "", "", "", "" };
-
-/* filled in by 'insmod' */
-static char *setup_strings;
-
-module_param(setup_strings, charp, 0);
-
-static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num)
-{
-	write1_io(reg_num, IO_WD_ADDR);
-	return read1_io(IO_WD_DATA);
-}
-
-
-#define READ_AUX_STAT() read1_io(IO_WD_ASR)
-
-
-static inline void write_3393(struct IN2000_hostdata *hostdata, uchar reg_num, uchar value)
-{
-	write1_io(reg_num, IO_WD_ADDR);
-	write1_io(value, IO_WD_DATA);
-}
-
-
-static inline void write_3393_cmd(struct IN2000_hostdata *hostdata, uchar cmd)
-{
-/*   while (READ_AUX_STAT() & ASR_CIP)
-      printk("|");*/
-	write1_io(WD_COMMAND, IO_WD_ADDR);
-	write1_io(cmd, IO_WD_DATA);
-}
-
-
-static uchar read_1_byte(struct IN2000_hostdata *hostdata)
-{
-	uchar asr, x = 0;
-
-	write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
-	write_3393_cmd(hostdata, WD_CMD_TRANS_INFO | 0x80);
-	do {
-		asr = READ_AUX_STAT();
-		if (asr & ASR_DBR)
-			x = read_3393(hostdata, WD_DATA);
-	} while (!(asr & ASR_INT));
-	return x;
-}
-
-
-static void write_3393_count(struct IN2000_hostdata *hostdata, unsigned long value)
-{
-	write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR);
-	write1_io((value >> 16), IO_WD_DATA);
-	write1_io((value >> 8), IO_WD_DATA);
-	write1_io(value, IO_WD_DATA);
-}
-
-
-static unsigned long read_3393_count(struct IN2000_hostdata *hostdata)
-{
-	unsigned long value;
-
-	write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR);
-	value = read1_io(IO_WD_DATA) << 16;
-	value |= read1_io(IO_WD_DATA) << 8;
-	value |= read1_io(IO_WD_DATA);
-	return value;
-}
-
-
-/* The 33c93 needs to be told which direction a command transfers its
- * data; we use this function to figure it out. Returns true if there
- * will be a DATA_OUT phase with this command, false otherwise.
- * (Thanks to Joerg Dorchain for the research and suggestion.)
- */
-static int is_dir_out(Scsi_Cmnd * cmd)
-{
-	switch (cmd->cmnd[0]) {
-	case WRITE_6:
-	case WRITE_10:
-	case WRITE_12:
-	case WRITE_LONG:
-	case WRITE_SAME:
-	case WRITE_BUFFER:
-	case WRITE_VERIFY:
-	case WRITE_VERIFY_12:
-	case COMPARE:
-	case COPY:
-	case COPY_VERIFY:
-	case SEARCH_EQUAL:
-	case SEARCH_HIGH:
-	case SEARCH_LOW:
-	case SEARCH_EQUAL_12:
-	case SEARCH_HIGH_12:
-	case SEARCH_LOW_12:
-	case FORMAT_UNIT:
-	case REASSIGN_BLOCKS:
-	case RESERVE:
-	case MODE_SELECT:
-	case MODE_SELECT_10:
-	case LOG_SELECT:
-	case SEND_DIAGNOSTIC:
-	case CHANGE_DEFINITION:
-	case UPDATE_BLOCK:
-	case SET_WINDOW:
-	case MEDIUM_SCAN:
-	case SEND_VOLUME_TAG:
-	case 0xea:
-		return 1;
-	default:
-		return 0;
-	}
-}
-
-
-
-static struct sx_period sx_table[] = {
-	{1, 0x20},
-	{252, 0x20},
-	{376, 0x30},
-	{500, 0x40},
-	{624, 0x50},
-	{752, 0x60},
-	{876, 0x70},
-	{1000, 0x00},
-	{0, 0}
-};
-
-static int round_period(unsigned int period)
-{
-	int x;
-
-	for (x = 1; sx_table[x].period_ns; x++) {
-		if ((period <= sx_table[x - 0].period_ns) && (period > sx_table[x - 1].period_ns)) {
-			return x;
-		}
-	}
-	return 7;
-}
-
-static uchar calc_sync_xfer(unsigned int period, unsigned int offset)
-{
-	uchar result;
-
-	period *= 4;		/* convert SDTR code to ns */
-	result = sx_table[round_period(period)].reg_value;
-	result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF;
-	return result;
-}
-
-
-
-static void in2000_execute(struct Scsi_Host *instance);
-
-static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
-{
-	struct Scsi_Host *instance;
-	struct IN2000_hostdata *hostdata;
-	Scsi_Cmnd *tmp;
-
-	instance = cmd->device->host;
-	hostdata = (struct IN2000_hostdata *) instance->hostdata;
-
-	DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x(", cmd->cmnd[0]))
-
-/* Set up a few fields in the Scsi_Cmnd structure for our own use:
- *  - host_scribble is the pointer to the next cmd in the input queue
- *  - scsi_done points to the routine we call when a cmd is finished
- *  - result is what you'd expect
- */
-	    cmd->host_scribble = NULL;
-	cmd->scsi_done = done;
-	cmd->result = 0;
-
-/* We use the Scsi_Pointer structure that's included with each command
- * as a scratchpad (as it's intended to be used!). The handy thing about
- * the SCp.xxx fields is that they're always associated with a given
- * cmd, and are preserved across disconnect-reselect. This means we
- * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages
- * if we keep all the critical pointers and counters in SCp:
- *  - SCp.ptr is the pointer into the RAM buffer
- *  - SCp.this_residual is the size of that buffer
- *  - SCp.buffer points to the current scatter-gather buffer
- *  - SCp.buffers_residual tells us how many S.G. buffers there are
- *  - SCp.have_data_in helps keep track of >2048 byte transfers
- *  - SCp.sent_command is not used
- *  - SCp.phase records this command's SRCID_ER bit setting
- */
-
-	if (scsi_bufflen(cmd)) {
-		cmd->SCp.buffer = scsi_sglist(cmd);
-		cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
-		cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
-		cmd->SCp.this_residual = cmd->SCp.buffer->length;
-	} else {
-		cmd->SCp.buffer = NULL;
-		cmd->SCp.buffers_residual = 0;
-		cmd->SCp.ptr = NULL;
-		cmd->SCp.this_residual = 0;
-	}
-	cmd->SCp.have_data_in = 0;
-
-/* We don't set SCp.phase here - that's done in in2000_execute() */
-
-/* WD docs state that at the conclusion of a "LEVEL2" command, the
- * status byte can be retrieved from the LUN register. Apparently,
- * this is the case only for *uninterrupted* LEVEL2 commands! If
- * there are any unexpected phases entered, even if they are 100%
- * legal (different devices may choose to do things differently),
- * the LEVEL2 command sequence is exited. This often occurs prior
- * to receiving the status byte, in which case the driver does a
- * status phase interrupt and gets the status byte on its own.
- * While such a command can then be "resumed" (ie restarted to
- * finish up as a LEVEL2 command), the LUN register will NOT be
- * a valid status byte at the command's conclusion, and we must
- * use the byte obtained during the earlier interrupt. Here, we
- * preset SCp.Status to an illegal value (0xff) so that when
- * this command finally completes, we can tell where the actual
- * status byte is stored.
- */
-
-	cmd->SCp.Status = ILLEGAL_STATUS_BYTE;
-
-/* We need to disable interrupts before messing with the input
- * queue and calling in2000_execute().
- */
-
-	/*
-	 * Add the cmd to the end of 'input_Q'. Note that REQUEST_SENSE
-	 * commands are added to the head of the queue so that the desired
-	 * sense data is not lost before REQUEST_SENSE executes.
-	 */
-
-	if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) {
-		cmd->host_scribble = (uchar *) hostdata->input_Q;
-		hostdata->input_Q = cmd;
-	} else {		/* find the end of the queue */
-		for (tmp = (Scsi_Cmnd *) hostdata->input_Q; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble);
-		tmp->host_scribble = (uchar *) cmd;
-	}
-
-/* We know that there's at least one command in 'input_Q' now.
- * Go see if any of them are runnable!
- */
-
-	in2000_execute(cmd->device->host);
-
-	DB(DB_QUEUE_COMMAND, printk(")Q "))
-	    return 0;
-}
-
-static DEF_SCSI_QCMD(in2000_queuecommand)
-
-
-
-/*
- * This routine attempts to start a scsi command. If the host_card is
- * already connected, we give up immediately. Otherwise, look through
- * the input_Q, using the first command we find that's intended
- * for a currently non-busy target/lun.
- * Note that this function is always called with interrupts already
- * disabled (either from in2000_queuecommand() or in2000_intr()).
- */
-static void in2000_execute(struct Scsi_Host *instance)
-{
-	struct IN2000_hostdata *hostdata;
-	Scsi_Cmnd *cmd, *prev;
-	int i;
-	unsigned short *sp;
-	unsigned short f;
-	unsigned short flushbuf[16];
-
-
-	hostdata = (struct IN2000_hostdata *) instance->hostdata;
-
-	DB(DB_EXECUTE, printk("EX("))
-
-	    if (hostdata->selecting || hostdata->connected) {
-
-		DB(DB_EXECUTE, printk(")EX-0 "))
-
-		    return;
-	}
-
-	/*
-	 * Search through the input_Q for a command destined
-	 * for an idle target/lun.
-	 */
-
-	cmd = (Scsi_Cmnd *) hostdata->input_Q;
-	prev = NULL;
-	while (cmd) {
-		if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
-			break;
-		prev = cmd;
-		cmd = (Scsi_Cmnd *) cmd->host_scribble;
-	}
-
-	/* quit if queue empty or all possible targets are busy */
-
-	if (!cmd) {
-
-		DB(DB_EXECUTE, printk(")EX-1 "))
-
-		    return;
-	}
-
-	/*  remove command from queue */
-
-	if (prev)
-		prev->host_scribble = cmd->host_scribble;
-	else
-		hostdata->input_Q = (Scsi_Cmnd *) cmd->host_scribble;
-
-#ifdef PROC_STATISTICS
-	hostdata->cmd_cnt[cmd->device->id]++;
-#endif
-
-/*
- * Start the selection process
- */
-
-	if (is_dir_out(cmd))
-		write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id);
-	else
-		write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
-
-/* Now we need to figure out whether or not this command is a good
- * candidate for disconnect/reselect. We guess to the best of our
- * ability, based on a set of hierarchical rules. When several
- * devices are operating simultaneously, disconnects are usually
- * an advantage. In a single device system, or if only 1 device
- * is being accessed, transfers usually go faster if disconnects
- * are not allowed:
- *
- * + Commands should NEVER disconnect if hostdata->disconnect =
- *   DIS_NEVER (this holds for tape drives also), and ALWAYS
- *   disconnect if hostdata->disconnect = DIS_ALWAYS.
- * + Tape drive commands should always be allowed to disconnect.
- * + Disconnect should be allowed if disconnected_Q isn't empty.
- * + Commands should NOT disconnect if input_Q is empty.
- * + Disconnect should be allowed if there are commands in input_Q
- *   for a different target/lun. In this case, the other commands
- *   should be made disconnect-able, if not already.
- *
- * I know, I know - this code would flunk me out of any
- * "C Programming 101" class ever offered. But it's easy
- * to change around and experiment with for now.
- */
-
-	cmd->SCp.phase = 0;	/* assume no disconnect */
-	if (hostdata->disconnect == DIS_NEVER)
-		goto no;
-	if (hostdata->disconnect == DIS_ALWAYS)
-		goto yes;
-	if (cmd->device->type == 1)	/* tape drive? */
-		goto yes;
-	if (hostdata->disconnected_Q)	/* other commands disconnected? */
-		goto yes;
-	if (!(hostdata->input_Q))	/* input_Q empty? */
-		goto no;
-	for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble) {
-		if ((prev->device->id != cmd->device->id) || (prev->device->lun != cmd->device->lun)) {
-			for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble)
-				prev->SCp.phase = 1;
-			goto yes;
-		}
-	}
-	goto no;
-
-      yes:
-	cmd->SCp.phase = 1;
-
-#ifdef PROC_STATISTICS
-	hostdata->disc_allowed_cnt[cmd->device->id]++;
-#endif
-
-      no:
-	write_3393(hostdata, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0));
-
-	write_3393(hostdata, WD_TARGET_LUN, cmd->device->lun);
-	write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]);
-	hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
-
-	if ((hostdata->level2 <= L2_NONE) || (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) {
-
-		/*
-		 * Do a 'Select-With-ATN' command. This will end with
-		 * one of the following interrupts:
-		 *    CSR_RESEL_AM:  failure - can try again later.
-		 *    CSR_TIMEOUT:   failure - give up.
-		 *    CSR_SELECT:    success - proceed.
-		 */
-
-		hostdata->selecting = cmd;
-
-/* Every target has its own synchronous transfer setting, kept in
- * the sync_xfer array, and a corresponding status byte in sync_stat[].
- * Each target's sync_stat[] entry is initialized to SS_UNSET, and its
- * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET
- * means that the parameters are undetermined as yet, and that we
- * need to send an SDTR message to this device after selection is
- * complete. We set SS_FIRST to tell the interrupt routine to do so,
- * unless we don't want to even _try_ synchronous transfers: In this
- * case we set SS_SET to make the defaults final.
- */
-		if (hostdata->sync_stat[cmd->device->id] == SS_UNSET) {
-			if (hostdata->sync_off & (1 << cmd->device->id))
-				hostdata->sync_stat[cmd->device->id] = SS_SET;
-			else
-				hostdata->sync_stat[cmd->device->id] = SS_FIRST;
-		}
-		hostdata->state = S_SELECTING;
-		write_3393_count(hostdata, 0);	/* this guarantees a DATA_PHASE interrupt */
-		write_3393_cmd(hostdata, WD_CMD_SEL_ATN);
-	}
-
-	else {
-
-		/*
-		 * Do a 'Select-With-ATN-Xfer' command. This will end with
-		 * one of the following interrupts:
-		 *    CSR_RESEL_AM:  failure - can try again later.
-		 *    CSR_TIMEOUT:   failure - give up.
-		 *    anything else: success - proceed.
-		 */
-
-		hostdata->connected = cmd;
-		write_3393(hostdata, WD_COMMAND_PHASE, 0);
-
-		/* copy command_descriptor_block into WD chip
-		 * (take advantage of auto-incrementing)
-		 */
-
-		write1_io(WD_CDB_1, IO_WD_ADDR);
-		for (i = 0; i < cmd->cmd_len; i++)
-			write1_io(cmd->cmnd[i], IO_WD_DATA);
-
-		/* The wd33c93 only knows about Group 0, 1, and 5 commands when
-		 * it's doing a 'select-and-transfer'. To be safe, we write the
-		 * size of the CDB into the OWN_ID register for every case. This
-		 * way there won't be problems with vendor-unique, audio, etc.
-		 */
-
-		write_3393(hostdata, WD_OWN_ID, cmd->cmd_len);
-
-		/* When doing a non-disconnect command, we can save ourselves a DATA
-		 * phase interrupt later by setting everything up now. With writes we
-		 * need to pre-fill the fifo; if there's room for the 32 flush bytes,
-		 * put them in there too - that'll avoid a fifo interrupt. Reads are
-		 * somewhat simpler.
-		 * KLUDGE NOTE: It seems that you can't completely fill the fifo here:
-		 * This results in the IO_FIFO_COUNT register rolling over to zero,
-		 * and apparently the gate array logic sees this as empty, not full,
-		 * so the 3393 chip is never signalled to start reading from the
-		 * fifo. Or maybe it's seen as a permanent fifo interrupt condition.
-		 * Regardless, we fix this by temporarily pretending that the fifo
-		 * is 16 bytes smaller. (I see now that the old driver has a comment
-		 * about "don't fill completely" in an analogous place - must be the
-		 * same deal.) This results in CDROM, swap partitions, and tape drives
-		 * needing an extra interrupt per write command - I think we can live
-		 * with that!
-		 */
-
-		if (!(cmd->SCp.phase)) {
-			write_3393_count(hostdata, cmd->SCp.this_residual);
-			write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
-			write1_io(0, IO_FIFO_WRITE);	/* clear fifo counter, write mode */
-
-			if (is_dir_out(cmd)) {
-				hostdata->fifo = FI_FIFO_WRITING;
-				if ((i = cmd->SCp.this_residual) > (IN2000_FIFO_SIZE - 16))
-					i = IN2000_FIFO_SIZE - 16;
-				cmd->SCp.have_data_in = i;	/* this much data in fifo */
-				i >>= 1;	/* Gulp. Assuming modulo 2. */
-				sp = (unsigned short *) cmd->SCp.ptr;
-				f = hostdata->io_base + IO_FIFO;
-
-#ifdef FAST_WRITE_IO
-
-				FAST_WRITE2_IO();
-#else
-				while (i--)
-					write2_io(*sp++, IO_FIFO);
-
-#endif
-
-				/* Is there room for the flush bytes? */
-
-				if (cmd->SCp.have_data_in <= ((IN2000_FIFO_SIZE - 16) - 32)) {
-					sp = flushbuf;
-					i = 16;
-
-#ifdef FAST_WRITE_IO
-
-					FAST_WRITE2_IO();
-#else
-					while (i--)
-						write2_io(0, IO_FIFO);
-
-#endif
-
-				}
-			}
-
-			else {
-				write1_io(0, IO_FIFO_READ);	/* put fifo in read mode */
-				hostdata->fifo = FI_FIFO_READING;
-				cmd->SCp.have_data_in = 0;	/* nothing transferred yet */
-			}
-
-		} else {
-			write_3393_count(hostdata, 0);	/* this guarantees a DATA_PHASE interrupt */
-		}
-		hostdata->state = S_RUNNING_LEVEL2;
-		write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
-	}
-
-	/*
-	 * Since the SCSI bus can handle only 1 connection at a time,
-	 * we get out of here now. If the selection fails, or when
-	 * the command disconnects, we'll come back to this routine
-	 * to search the input_Q again...
-	 */
-
-	DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
-
-}
-
-
-
-static void transfer_pio(uchar * buf, int cnt, int data_in_dir, struct IN2000_hostdata *hostdata)
-{
-	uchar asr;
-
-	DB(DB_TRANSFER, printk("(%p,%d,%s)", buf, cnt, data_in_dir ? "in" : "out"))
-
-	    write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
-	write_3393_count(hostdata, cnt);
-	write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
-	if (data_in_dir) {
-		do {
-			asr = READ_AUX_STAT();
-			if (asr & ASR_DBR)
-				*buf++ = read_3393(hostdata, WD_DATA);
-		} while (!(asr & ASR_INT));
-	} else {
-		do {
-			asr = READ_AUX_STAT();
-			if (asr & ASR_DBR)
-				write_3393(hostdata, WD_DATA, *buf++);
-		} while (!(asr & ASR_INT));
-	}
-
-	/* Note: we are returning with the interrupt UN-cleared.
-	 * Since (presumably) an entire I/O operation has
-	 * completed, the bus phase is probably different, and
-	 * the interrupt routine will discover this when it
-	 * responds to the uncleared int.
-	 */
-
-}
-
-
-
-static void transfer_bytes(Scsi_Cmnd * cmd, int data_in_dir)
-{
-	struct IN2000_hostdata *hostdata;
-	unsigned short *sp;
-	unsigned short f;
-	int i;
-
-	hostdata = (struct IN2000_hostdata *) cmd->device->host->hostdata;
-
-/* Normally, you'd expect 'this_residual' to be non-zero here.
- * In a series of scatter-gather transfers, however, this
- * routine will usually be called with 'this_residual' equal
- * to 0 and 'buffers_residual' non-zero. This means that a
- * previous transfer completed, clearing 'this_residual', and
- * now we need to setup the next scatter-gather buffer as the
- * source or destination for THIS transfer.
- */
-	if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
-		++cmd->SCp.buffer;
-		--cmd->SCp.buffers_residual;
-		cmd->SCp.this_residual = cmd->SCp.buffer->length;
-		cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
-	}
-
-/* Set up hardware registers */
-
-	write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]);
-	write_3393_count(hostdata, cmd->SCp.this_residual);
-	write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
-	write1_io(0, IO_FIFO_WRITE);	/* zero counter, assume write */
-
-/* Reading is easy. Just issue the command and return - we'll
- * get an interrupt later when we have actual data to worry about.
- */
-
-	if (data_in_dir) {
-		write1_io(0, IO_FIFO_READ);
-		if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
-			write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
-			write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
-			hostdata->state = S_RUNNING_LEVEL2;
-		} else
-			write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
-		hostdata->fifo = FI_FIFO_READING;
-		cmd->SCp.have_data_in = 0;
-		return;
-	}
-
-/* Writing is more involved - we'll start the WD chip and write as
- * much data to the fifo as we can right now. Later interrupts will
- * write any bytes that don't make it at this stage.
- */
-
-	if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
-		write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
-		write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
-		hostdata->state = S_RUNNING_LEVEL2;
-	} else
-		write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
-	hostdata->fifo = FI_FIFO_WRITING;
-	sp = (unsigned short *) cmd->SCp.ptr;
-
-	if ((i = cmd->SCp.this_residual) > IN2000_FIFO_SIZE)
-		i = IN2000_FIFO_SIZE;
-	cmd->SCp.have_data_in = i;
-	i >>= 1;		/* Gulp. We assume this_residual is modulo 2 */
-	f = hostdata->io_base + IO_FIFO;
-
-#ifdef FAST_WRITE_IO
-
-	FAST_WRITE2_IO();
-#else
-	while (i--)
-		write2_io(*sp++, IO_FIFO);
-
-#endif
-
-}
-
-
-/* We need to use spin_lock_irqsave() & spin_unlock_irqrestore() in this
- * function in order to work in an SMP environment. (I'd be surprised
- * if the driver is ever used by anyone on a real multi-CPU motherboard,
- * but it _does_ need to be able to compile and run in an SMP kernel.)
- */
-
-static irqreturn_t in2000_intr(int irqnum, void *dev_id)
-{
-	struct Scsi_Host *instance = dev_id;
-	struct IN2000_hostdata *hostdata;
-	Scsi_Cmnd *patch, *cmd;
-	uchar asr, sr, phs, id, lun, *ucp, msg;
-	int i, j;
-	unsigned long length;
-	unsigned short *sp;
-	unsigned short f;
-	unsigned long flags;
-
-	hostdata = (struct IN2000_hostdata *) instance->hostdata;
-
-/* Get the spin_lock and disable further ints, for SMP */
-
-	spin_lock_irqsave(instance->host_lock, flags);
-
-#ifdef PROC_STATISTICS
-	hostdata->int_cnt++;
-#endif
-
-/* The IN2000 card has 2 interrupt sources OR'ed onto its IRQ line - the
- * WD3393 chip and the 2k fifo (which is actually a dual-port RAM combined
- * with a big logic array, so it's a little different than what you might
- * expect). As far as I know, there's no reason that BOTH can't be active
- * at the same time, but there's a problem: while we can read the 3393
- * to tell if _it_ wants an interrupt, I don't know of a way to ask the
- * fifo the same question. The best we can do is check the 3393 and if
- * it _isn't_ the source of the interrupt, then we can be pretty sure
- * that the fifo is the culprit.
- *  UPDATE: I have it on good authority (Bill Earnest) that bit 0 of the
- *          IO_FIFO_COUNT register mirrors the fifo interrupt state. I
- *          assume that bit clear means interrupt active. As it turns
- *          out, the driver really doesn't need to check for this after
- *          all, so my remarks above about a 'problem' can safely be
- *          ignored. The way the logic is set up, there's no advantage
- *          (that I can see) to worrying about it.
- *
- * It seems that the fifo interrupt signal is negated when we extract
- * bytes during read or write bytes during write.
- *  - fifo will interrupt when data is moving from it to the 3393, and
- *    there are 31 (or less?) bytes left to go. This is sort of short-
- *    sighted: what if you don't WANT to do more? In any case, our
- *    response is to push more into the fifo - either actual data or
- *    dummy bytes if need be. Note that we apparently have to write at
- *    least 32 additional bytes to the fifo after an interrupt in order
- *    to get it to release the ones it was holding on to - writing fewer
- *    than 32 will result in another fifo int.
- *  UPDATE: Again, info from Bill Earnest makes this more understandable:
- *          32 bytes = two counts of the fifo counter register. He tells
- *          me that the fifo interrupt is a non-latching signal derived
- *          from a straightforward boolean interpretation of the 7
- *          highest bits of the fifo counter and the fifo-read/fifo-write
- *          state. Who'd a thought?
- */
-
-	write1_io(0, IO_LED_ON);
-	asr = READ_AUX_STAT();
-	if (!(asr & ASR_INT)) {	/* no WD33c93 interrupt? */
-
-/* Ok. This is definitely a FIFO-only interrupt.
- *
- * If FI_FIFO_READING is set, there are up to 2048 bytes waiting to be read,
- * maybe more to come from the SCSI bus. Read as many as we can out of the
- * fifo and into memory at the location of SCp.ptr[SCp.have_data_in], and
- * update have_data_in afterwards.
- *
- * If we have FI_FIFO_WRITING, the FIFO has almost run out of bytes to move
- * into the WD3393 chip (I think the interrupt happens when there are 31
- * bytes left, but it may be fewer...). The 3393 is still waiting, so we
- * shove some more into the fifo, which gets things moving again. If the
- * original SCSI command specified more than 2048 bytes, there may still
- * be some of that data left: fine - use it (from SCp.ptr[SCp.have_data_in]).
- * Don't forget to update have_data_in. If we've already written out the
- * entire buffer, feed 32 dummy bytes to the fifo - they're needed to
- * push out the remaining real data.
- *    (Big thanks to Bill Earnest for getting me out of the mud in here.)
- */
-
-		cmd = (Scsi_Cmnd *) hostdata->connected;	/* assume we're connected */
-		CHECK_NULL(cmd, "fifo_int")
-
-		    if (hostdata->fifo == FI_FIFO_READING) {
-
-			DB(DB_FIFO, printk("{R:%02x} ", read1_io(IO_FIFO_COUNT)))
-
-			    sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
-			i = read1_io(IO_FIFO_COUNT) & 0xfe;
-			i <<= 2;	/* # of words waiting in the fifo */
-			f = hostdata->io_base + IO_FIFO;
-
-#ifdef FAST_READ_IO
-
-			FAST_READ2_IO();
-#else
-			while (i--)
-				*sp++ = read2_io(IO_FIFO);
-
-#endif
-
-			i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
-			i <<= 1;
-			cmd->SCp.have_data_in += i;
-		}
-
-		else if (hostdata->fifo == FI_FIFO_WRITING) {
-
-			DB(DB_FIFO, printk("{W:%02x} ", read1_io(IO_FIFO_COUNT)))
-
-/* If all bytes have been written to the fifo, flush out the stragglers.
- * Note that while writing 16 dummy words seems arbitrary, we don't
- * have another choice that I can see. What we really want is to read
- * the 3393 transfer count register (that would tell us how many bytes
- * needed flushing), but the TRANSFER_INFO command hasn't completed
- * yet (not enough bytes!) and that register won't be accessible. So,
- * we use 16 words - a number obtained through trial and error.
- *  UPDATE: Bill says this is exactly what Always does, so there.
- *          More thanks due him for help in this section.
- */
-			    if (cmd->SCp.this_residual == cmd->SCp.have_data_in) {
-				i = 16;
-				while (i--)	/* write 32 dummy bytes */
-					write2_io(0, IO_FIFO);
-			}
-
-/* If there are still bytes left in the SCSI buffer, write as many as we
- * can out to the fifo.
- */
-
-			else {
-				sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
-				i = cmd->SCp.this_residual - cmd->SCp.have_data_in;	/* bytes yet to go */
-				j = read1_io(IO_FIFO_COUNT) & 0xfe;
-				j <<= 2;	/* how many words the fifo has room for */
-				if ((j << 1) > i)
-					j = (i >> 1);
-				while (j--)
-					write2_io(*sp++, IO_FIFO);
-
-				i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
-				i <<= 1;
-				cmd->SCp.have_data_in += i;
-			}
-		}
-
-		else {
-			printk("*** Spurious FIFO interrupt ***");
-		}
-
-		write1_io(0, IO_LED_OFF);
-
-/* release the SMP spin_lock and restore irq state */
-		spin_unlock_irqrestore(instance->host_lock, flags);
-		return IRQ_HANDLED;
-	}
-
-/* This interrupt was triggered by the WD33c93 chip. The fifo interrupt
- * may also be asserted, but we don't bother to check it: we get more
- * detailed info from FIFO_READING and FIFO_WRITING (see below).
- */
-
-	cmd = (Scsi_Cmnd *) hostdata->connected;	/* assume we're connected */
-	sr = read_3393(hostdata, WD_SCSI_STATUS);	/* clear the interrupt */
-	phs = read_3393(hostdata, WD_COMMAND_PHASE);
-
-	if (!cmd && (sr != CSR_RESEL_AM && sr != CSR_TIMEOUT && sr != CSR_SELECT)) {
-		printk("\nNR:wd-intr-1\n");
-		write1_io(0, IO_LED_OFF);
-
-/* release the SMP spin_lock and restore irq state */
-		spin_unlock_irqrestore(instance->host_lock, flags);
-		return IRQ_HANDLED;
-	}
-
-	DB(DB_INTR, printk("{%02x:%02x-", asr, sr))
-
-/* After starting a FIFO-based transfer, the next _WD3393_ interrupt is
- * guaranteed to be in response to the completion of the transfer.
- * If we were reading, there's probably data in the fifo that needs
- * to be copied into RAM - do that here. Also, we have to update
- * 'this_residual' and 'ptr' based on the contents of the
- * TRANSFER_COUNT register, in case the device decided to do an
- * intermediate disconnect (a device may do this if it has to
- * do a seek,  or just to be nice and let other devices have
- * some bus time during long transfers).
- * After doing whatever is necessary with the fifo, we go on and
- * service the WD3393 interrupt normally.
- */
-	    if (hostdata->fifo == FI_FIFO_READING) {
-
-/* buffer index = start-of-buffer + #-of-bytes-already-read */
-
-		sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
-
-/* bytes remaining in fifo = (total-wanted - #-not-got) - #-already-read */
-
-		i = (cmd->SCp.this_residual - read_3393_count(hostdata)) - cmd->SCp.have_data_in;
-		i >>= 1;	/* Gulp. We assume this will always be modulo 2 */
-		f = hostdata->io_base + IO_FIFO;
-
-#ifdef FAST_READ_IO
-
-		FAST_READ2_IO();
-#else
-		while (i--)
-			*sp++ = read2_io(IO_FIFO);
-
-#endif
-
-		hostdata->fifo = FI_FIFO_UNUSED;
-		length = cmd->SCp.this_residual;
-		cmd->SCp.this_residual = read_3393_count(hostdata);
-		cmd->SCp.ptr += (length - cmd->SCp.this_residual);
-
-		DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual))
-
-	}
-
-	else if (hostdata->fifo == FI_FIFO_WRITING) {
-		hostdata->fifo = FI_FIFO_UNUSED;
-		length = cmd->SCp.this_residual;
-		cmd->SCp.this_residual = read_3393_count(hostdata);
-		cmd->SCp.ptr += (length - cmd->SCp.this_residual);
-
-		DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual))
-
-	}
-
-/* Respond to the specific WD3393 interrupt - there are quite a few! */
-
-	switch (sr) {
-
-	case CSR_TIMEOUT:
-		DB(DB_INTR, printk("TIMEOUT"))
-
-		    if (hostdata->state == S_RUNNING_LEVEL2)
-			hostdata->connected = NULL;
-		else {
-			cmd = (Scsi_Cmnd *) hostdata->selecting;	/* get a valid cmd */
-			CHECK_NULL(cmd, "csr_timeout")
-			    hostdata->selecting = NULL;
-		}
-
-		cmd->result = DID_NO_CONNECT << 16;
-		hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
-		hostdata->state = S_UNCONNECTED;
-		cmd->scsi_done(cmd);
-
-/* We are not connected to a target - check to see if there
- * are commands waiting to be executed.
- */
-
-		in2000_execute(instance);
-		break;
-
-
-/* Note: this interrupt should not occur in a LEVEL2 command */
-
-	case CSR_SELECT:
-		DB(DB_INTR, printk("SELECT"))
-		    hostdata->connected = cmd = (Scsi_Cmnd *) hostdata->selecting;
-		CHECK_NULL(cmd, "csr_select")
-		    hostdata->selecting = NULL;
-
-		/* construct an IDENTIFY message with correct disconnect bit */
-
-		hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->device->lun);
-		if (cmd->SCp.phase)
-			hostdata->outgoing_msg[0] |= 0x40;
-
-		if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) {
-#ifdef SYNC_DEBUG
-			printk(" sending SDTR ");
-#endif
-
-			hostdata->sync_stat[cmd->device->id] = SS_WAITING;
-
-			/* tack on a 2nd message to ask about synchronous transfers */
-
-			hostdata->outgoing_msg[1] = EXTENDED_MESSAGE;
-			hostdata->outgoing_msg[2] = 3;
-			hostdata->outgoing_msg[3] = EXTENDED_SDTR;
-			hostdata->outgoing_msg[4] = OPTIMUM_SX_PER / 4;
-			hostdata->outgoing_msg[5] = OPTIMUM_SX_OFF;
-			hostdata->outgoing_len = 6;
-		} else
-			hostdata->outgoing_len = 1;
-
-		hostdata->state = S_CONNECTED;
-		break;
-
-
-	case CSR_XFER_DONE | PHS_DATA_IN:
-	case CSR_UNEXP | PHS_DATA_IN:
-	case CSR_SRV_REQ | PHS_DATA_IN:
-		DB(DB_INTR, printk("IN-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual))
-		    transfer_bytes(cmd, DATA_IN_DIR);
-		if (hostdata->state != S_RUNNING_LEVEL2)
-			hostdata->state = S_CONNECTED;
-		break;
-
-
-	case CSR_XFER_DONE | PHS_DATA_OUT:
-	case CSR_UNEXP | PHS_DATA_OUT:
-	case CSR_SRV_REQ | PHS_DATA_OUT:
-		DB(DB_INTR, printk("OUT-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual))
-		    transfer_bytes(cmd, DATA_OUT_DIR);
-		if (hostdata->state != S_RUNNING_LEVEL2)
-			hostdata->state = S_CONNECTED;
-		break;
-
-
-/* Note: this interrupt should not occur in a LEVEL2 command */
-
-	case CSR_XFER_DONE | PHS_COMMAND:
-	case CSR_UNEXP | PHS_COMMAND:
-	case CSR_SRV_REQ | PHS_COMMAND:
-		DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0]))
-		    transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata);
-		hostdata->state = S_CONNECTED;
-		break;
-
-
-	case CSR_XFER_DONE | PHS_STATUS:
-	case CSR_UNEXP | PHS_STATUS:
-	case CSR_SRV_REQ | PHS_STATUS:
-		DB(DB_INTR, printk("STATUS="))
-
-		    cmd->SCp.Status = read_1_byte(hostdata);
-		DB(DB_INTR, printk("%02x", cmd->SCp.Status))
-		    if (hostdata->level2 >= L2_BASIC) {
-			sr = read_3393(hostdata, WD_SCSI_STATUS);	/* clear interrupt */
-			hostdata->state = S_RUNNING_LEVEL2;
-			write_3393(hostdata, WD_COMMAND_PHASE, 0x50);
-			write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
-		} else {
-			hostdata->state = S_CONNECTED;
-		}
-		break;
-
-
-	case CSR_XFER_DONE | PHS_MESS_IN:
-	case CSR_UNEXP | PHS_MESS_IN:
-	case CSR_SRV_REQ | PHS_MESS_IN:
-		DB(DB_INTR, printk("MSG_IN="))
-
-		    msg = read_1_byte(hostdata);
-		sr = read_3393(hostdata, WD_SCSI_STATUS);	/* clear interrupt */
-
-		hostdata->incoming_msg[hostdata->incoming_ptr] = msg;
-		if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE)
-			msg = EXTENDED_MESSAGE;
-		else
-			hostdata->incoming_ptr = 0;
-
-		cmd->SCp.Message = msg;
-		switch (msg) {
-
-		case COMMAND_COMPLETE:
-			DB(DB_INTR, printk("CCMP"))
-			    write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
-			hostdata->state = S_PRE_CMP_DISC;
-			break;
-
-		case SAVE_POINTERS:
-			DB(DB_INTR, printk("SDP"))
-			    write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
-			hostdata->state = S_CONNECTED;
-			break;
-
-		case RESTORE_POINTERS:
-			DB(DB_INTR, printk("RDP"))
-			    if (hostdata->level2 >= L2_BASIC) {
-				write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
-				write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
-				hostdata->state = S_RUNNING_LEVEL2;
-			} else {
-				write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
-				hostdata->state = S_CONNECTED;
-			}
-			break;
-
-		case DISCONNECT:
-			DB(DB_INTR, printk("DIS"))
-			    cmd->device->disconnect = 1;
-			write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
-			hostdata->state = S_PRE_TMP_DISC;
-			break;
-
-		case MESSAGE_REJECT:
-			DB(DB_INTR, printk("REJ"))
-#ifdef SYNC_DEBUG
-			    printk("-REJ-");
-#endif
-			if (hostdata->sync_stat[cmd->device->id] == SS_WAITING)
-				hostdata->sync_stat[cmd->device->id] = SS_SET;
-			write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
-			hostdata->state = S_CONNECTED;
-			break;
-
-		case EXTENDED_MESSAGE:
-			DB(DB_INTR, printk("EXT"))
-
-			    ucp = hostdata->incoming_msg;
-
-#ifdef SYNC_DEBUG
-			printk("%02x", ucp[hostdata->incoming_ptr]);
-#endif
-			/* Is this the last byte of the extended message? */
-
-			if ((hostdata->incoming_ptr >= 2) && (hostdata->incoming_ptr == (ucp[1] + 1))) {
-
-				switch (ucp[2]) {	/* what's the EXTENDED code? */
-				case EXTENDED_SDTR:
-					id = calc_sync_xfer(ucp[3], ucp[4]);
-					if (hostdata->sync_stat[cmd->device->id] != SS_WAITING) {
-
-/* A device has sent an unsolicited SDTR message; rather than go
- * through the effort of decoding it and then figuring out what
- * our reply should be, we're just gonna say that we have a
- * synchronous fifo depth of 0. This will result in asynchronous
- * transfers - not ideal but so much easier.
- * Actually, this is OK because it assures us that if we don't
- * specifically ask for sync transfers, we won't do any.
- */
-
-						write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN);	/* want MESS_OUT */
-						hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
-						hostdata->outgoing_msg[1] = 3;
-						hostdata->outgoing_msg[2] = EXTENDED_SDTR;
-						hostdata->outgoing_msg[3] = hostdata->default_sx_per / 4;
-						hostdata->outgoing_msg[4] = 0;
-						hostdata->outgoing_len = 5;
-						hostdata->sync_xfer[cmd->device->id] = calc_sync_xfer(hostdata->default_sx_per / 4, 0);
-					} else {
-						hostdata->sync_xfer[cmd->device->id] = id;
-					}
-#ifdef SYNC_DEBUG
-					printk("sync_xfer=%02x", hostdata->sync_xfer[cmd->device->id]);
-#endif
-					hostdata->sync_stat[cmd->device->id] = SS_SET;
-					write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
-					hostdata->state = S_CONNECTED;
-					break;
-				case EXTENDED_WDTR:
-					write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN);	/* want MESS_OUT */
-					printk("sending WDTR ");
-					hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
-					hostdata->outgoing_msg[1] = 2;
-					hostdata->outgoing_msg[2] = EXTENDED_WDTR;
-					hostdata->outgoing_msg[3] = 0;	/* 8 bit transfer width */
-					hostdata->outgoing_len = 4;
-					write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
-					hostdata->state = S_CONNECTED;
-					break;
-				default:
-					write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN);	/* want MESS_OUT */
-					printk("Rejecting Unknown Extended Message(%02x). ", ucp[2]);
-					hostdata->outgoing_msg[0] = MESSAGE_REJECT;
-					hostdata->outgoing_len = 1;
-					write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
-					hostdata->state = S_CONNECTED;
-					break;
-				}
-				hostdata->incoming_ptr = 0;
-			}
-
-			/* We need to read more MESS_IN bytes for the extended message */
-
-			else {
-				hostdata->incoming_ptr++;
-				write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
-				hostdata->state = S_CONNECTED;
-			}
-			break;
-
-		default:
-			printk("Rejecting Unknown Message(%02x) ", msg);
-			write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN);	/* want MESS_OUT */
-			hostdata->outgoing_msg[0] = MESSAGE_REJECT;
-			hostdata->outgoing_len = 1;
-			write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
-			hostdata->state = S_CONNECTED;
-		}
-		break;
-
-
-/* Note: this interrupt will occur only after a LEVEL2 command */
-
-	case CSR_SEL_XFER_DONE:
-
-/* Make sure that reselection is enabled at this point - it may
- * have been turned off for the command that just completed.
- */
-
-		write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
-		if (phs == 0x60) {
-			DB(DB_INTR, printk("SX-DONE"))
-			    cmd->SCp.Message = COMMAND_COMPLETE;
-			lun = read_3393(hostdata, WD_TARGET_LUN);
-			DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
-			    hostdata->connected = NULL;
-			hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
-			hostdata->state = S_UNCONNECTED;
-			if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
-				cmd->SCp.Status = lun;
-			if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
-				cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
-			else
-				cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
-			cmd->scsi_done(cmd);
-
-/* We are no longer connected to a target - check to see if
- * there are commands waiting to be executed.
- */
-
-			in2000_execute(instance);
-		} else {
-			printk("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs);
-		}
-		break;
-
-
-/* Note: this interrupt will occur only after a LEVEL2 command */
-
-	case CSR_SDP:
-		DB(DB_INTR, printk("SDP"))
-		    hostdata->state = S_RUNNING_LEVEL2;
-		write_3393(hostdata, WD_COMMAND_PHASE, 0x41);
-		write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
-		break;
-
-
-	case CSR_XFER_DONE | PHS_MESS_OUT:
-	case CSR_UNEXP | PHS_MESS_OUT:
-	case CSR_SRV_REQ | PHS_MESS_OUT:
-		DB(DB_INTR, printk("MSG_OUT="))
-
-/* To get here, we've probably requested MESSAGE_OUT and have
- * already put the correct bytes in outgoing_msg[] and filled
- * in outgoing_len. We simply send them out to the SCSI bus.
- * Sometimes we get MESSAGE_OUT phase when we're not expecting
- * it - like when our SDTR message is rejected by a target. Some
- * targets send the REJECT before receiving all of the extended
- * message, and then seem to go back to MESSAGE_OUT for a byte
- * or two. Not sure why, or if I'm doing something wrong to
- * cause this to happen. Regardless, it seems that sending
- * NOP messages in these situations results in no harm and
- * makes everyone happy.
- */
-		    if (hostdata->outgoing_len == 0) {
-			hostdata->outgoing_len = 1;
-			hostdata->outgoing_msg[0] = NOP;
-		}
-		transfer_pio(hostdata->outgoing_msg, hostdata->outgoing_len, DATA_OUT_DIR, hostdata);
-		DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0]))
-		    hostdata->outgoing_len = 0;
-		hostdata->state = S_CONNECTED;
-		break;
-
-
-	case CSR_UNEXP_DISC:
-
-/* I think I've seen this after a request-sense that was in response
- * to an error condition, but not sure. We certainly need to do
- * something when we get this interrupt - the question is 'what?'.
- * Let's think positively, and assume some command has finished
- * in a legal manner (like a command that provokes a request-sense),
- * so we treat it as a normal command-complete-disconnect.
- */
-
-
-/* Make sure that reselection is enabled at this point - it may
- * have been turned off for the command that just completed.
- */
-
-		write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
-		if (cmd == NULL) {
-			printk(" - Already disconnected! ");
-			hostdata->state = S_UNCONNECTED;
-
-/* release the SMP spin_lock and restore irq state */
-			spin_unlock_irqrestore(instance->host_lock, flags);
-			return IRQ_HANDLED;
-		}
-		DB(DB_INTR, printk("UNEXP_DISC"))
-		    hostdata->connected = NULL;
-		hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
-		hostdata->state = S_UNCONNECTED;
-		if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
-			cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
-		else
-			cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
-		cmd->scsi_done(cmd);
-
-/* We are no longer connected to a target - check to see if
- * there are commands waiting to be executed.
- */
-
-		in2000_execute(instance);
-		break;
-
-
-	case CSR_DISC:
-
-/* Make sure that reselection is enabled at this point - it may
- * have been turned off for the command that just completed.
- */
-
-		write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
-		DB(DB_INTR, printk("DISC"))
-		    if (cmd == NULL) {
-			printk(" - Already disconnected! ");
-			hostdata->state = S_UNCONNECTED;
-		}
-		switch (hostdata->state) {
-		case S_PRE_CMP_DISC:
-			hostdata->connected = NULL;
-			hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
-			hostdata->state = S_UNCONNECTED;
-			DB(DB_INTR, printk(":%d", cmd->SCp.Status))
-			    if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
-				cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
-			else
-				cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
-			cmd->scsi_done(cmd);
-			break;
-		case S_PRE_TMP_DISC:
-		case S_RUNNING_LEVEL2:
-			cmd->host_scribble = (uchar *) hostdata->disconnected_Q;
-			hostdata->disconnected_Q = cmd;
-			hostdata->connected = NULL;
-			hostdata->state = S_UNCONNECTED;
-
-#ifdef PROC_STATISTICS
-			hostdata->disc_done_cnt[cmd->device->id]++;
-#endif
-
-			break;
-		default:
-			printk("*** Unexpected DISCONNECT interrupt! ***");
-			hostdata->state = S_UNCONNECTED;
-		}
-
-/* We are no longer connected to a target - check to see if
- * there are commands waiting to be executed.
- */
-
-		in2000_execute(instance);
-		break;
-
-
-	case CSR_RESEL_AM:
-		DB(DB_INTR, printk("RESEL"))
-
-		    /* First we have to make sure this reselection didn't */
-		    /* happen during Arbitration/Selection of some other device. */
-		    /* If yes, put losing command back on top of input_Q. */
-		    if (hostdata->level2 <= L2_NONE) {
-
-			if (hostdata->selecting) {
-				cmd = (Scsi_Cmnd *) hostdata->selecting;
-				hostdata->selecting = NULL;
-				hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
-				cmd->host_scribble = (uchar *) hostdata->input_Q;
-				hostdata->input_Q = cmd;
-			}
-		}
-
-		else {
-
-			if (cmd) {
-				if (phs == 0x00) {
-					hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
-					cmd->host_scribble = (uchar *) hostdata->input_Q;
-					hostdata->input_Q = cmd;
-				} else {
-					printk("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---", asr, sr, phs);
-					while (1)
-						printk("\r");
-				}
-			}
-
-		}
-
-		/* OK - find out which device reselected us. */
-
-		id = read_3393(hostdata, WD_SOURCE_ID);
-		id &= SRCID_MASK;
-
-		/* and extract the lun from the ID message. (Note that we don't
-		 * bother to check for a valid message here - I guess this is
-		 * not the right way to go, but....)
-		 */
-
-		lun = read_3393(hostdata, WD_DATA);
-		if (hostdata->level2 < L2_RESELECT)
-			write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
-		lun &= 7;
-
-		/* Now we look for the command that's reconnecting. */
-
-		cmd = (Scsi_Cmnd *) hostdata->disconnected_Q;
-		patch = NULL;
-		while (cmd) {
-			if (id == cmd->device->id && lun == cmd->device->lun)
-				break;
-			patch = cmd;
-			cmd = (Scsi_Cmnd *) cmd->host_scribble;
-		}
-
-		/* Hmm. Couldn't find a valid command.... What to do? */
-
-		if (!cmd) {
-			printk("---TROUBLE: target %d.%d not in disconnect queue---", id, lun);
-			break;
-		}
-
-		/* Ok, found the command - now start it up again. */
-
-		if (patch)
-			patch->host_scribble = cmd->host_scribble;
-		else
-			hostdata->disconnected_Q = (Scsi_Cmnd *) cmd->host_scribble;
-		hostdata->connected = cmd;
-
-		/* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]'
-		 * because these things are preserved over a disconnect.
-		 * But we DO need to fix the DPD bit so it's correct for this command.
-		 */
-
-		if (is_dir_out(cmd))
-			write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id);
-		else
-			write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
-		if (hostdata->level2 >= L2_RESELECT) {
-			write_3393_count(hostdata, 0);	/* we want a DATA_PHASE interrupt */
-			write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
-			write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
-			hostdata->state = S_RUNNING_LEVEL2;
-		} else
-			hostdata->state = S_CONNECTED;
-
-		    break;
-
-	default:
-		printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs);
-	}
-
-	write1_io(0, IO_LED_OFF);
-
-	DB(DB_INTR, printk("} "))
-
-/* release the SMP spin_lock and restore irq state */
-	    spin_unlock_irqrestore(instance->host_lock, flags);
-	return IRQ_HANDLED;
-}
-
-
-
-#define RESET_CARD         0
-#define RESET_CARD_AND_BUS 1
-#define B_FLAG 0x80
-
-/*
- *	Caller must hold instance lock!
- */
-
-static int reset_hardware(struct Scsi_Host *instance, int type)
-{
-	struct IN2000_hostdata *hostdata;
-	int qt, x;
-
-	hostdata = (struct IN2000_hostdata *) instance->hostdata;
-
-	write1_io(0, IO_LED_ON);
-	if (type == RESET_CARD_AND_BUS) {
-		write1_io(0, IO_CARD_RESET);
-		x = read1_io(IO_HARDWARE);
-	}
-	x = read_3393(hostdata, WD_SCSI_STATUS);	/* clear any WD intrpt */
-	write_3393(hostdata, WD_OWN_ID, instance->this_id | OWNID_EAF | OWNID_RAF | OWNID_FS_8);
-	write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
-	write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, calc_sync_xfer(hostdata->default_sx_per / 4, DEFAULT_SX_OFF));
-
-	write1_io(0, IO_FIFO_WRITE);	/* clear fifo counter */
-	write1_io(0, IO_FIFO_READ);	/* start fifo out in read mode */
-	write_3393(hostdata, WD_COMMAND, WD_CMD_RESET);
-	/* FIXME: timeout ?? */
-	while (!(READ_AUX_STAT() & ASR_INT))
-		cpu_relax();	/* wait for RESET to complete */
-
-	x = read_3393(hostdata, WD_SCSI_STATUS);	/* clear interrupt */
-
-	write_3393(hostdata, WD_QUEUE_TAG, 0xa5);	/* any random number */
-	qt = read_3393(hostdata, WD_QUEUE_TAG);
-	if (qt == 0xa5) {
-		x |= B_FLAG;
-		write_3393(hostdata, WD_QUEUE_TAG, 0);
-	}
-	write_3393(hostdata, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE);
-	write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
-	write1_io(0, IO_LED_OFF);
-	return x;
-}
-
-
-
-static int in2000_bus_reset(Scsi_Cmnd * cmd)
-{
-	struct Scsi_Host *instance;
-	struct IN2000_hostdata *hostdata;
-	int x;
-	unsigned long flags;
-
-	instance = cmd->device->host;
-	hostdata = (struct IN2000_hostdata *) instance->hostdata;
-
-	printk(KERN_WARNING "scsi%d: Reset. ", instance->host_no);
-
-	spin_lock_irqsave(instance->host_lock, flags);
-
-	/* do scsi-reset here */
-	reset_hardware(instance, RESET_CARD_AND_BUS);
-	for (x = 0; x < 8; x++) {
-		hostdata->busy[x] = 0;
-		hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF);
-		hostdata->sync_stat[x] = SS_UNSET;	/* using default sync values */
-	}
-	hostdata->input_Q = NULL;
-	hostdata->selecting = NULL;
-	hostdata->connected = NULL;
-	hostdata->disconnected_Q = NULL;
-	hostdata->state = S_UNCONNECTED;
-	hostdata->fifo = FI_FIFO_UNUSED;
-	hostdata->incoming_ptr = 0;
-	hostdata->outgoing_len = 0;
-
-	cmd->result = DID_RESET << 16;
-
-	spin_unlock_irqrestore(instance->host_lock, flags);
-	return SUCCESS;
-}
-
-static int __in2000_abort(Scsi_Cmnd * cmd)
-{
-	struct Scsi_Host *instance;
-	struct IN2000_hostdata *hostdata;
-	Scsi_Cmnd *tmp, *prev;
-	uchar sr, asr;
-	unsigned long timeout;
-
-	instance = cmd->device->host;
-	hostdata = (struct IN2000_hostdata *) instance->hostdata;
-
-	printk(KERN_DEBUG "scsi%d: Abort-", instance->host_no);
-	printk("(asr=%02x,count=%ld,resid=%d,buf_resid=%d,have_data=%d,FC=%02x)- ", READ_AUX_STAT(), read_3393_count(hostdata), cmd->SCp.this_residual, cmd->SCp.buffers_residual, cmd->SCp.have_data_in, read1_io(IO_FIFO_COUNT));
-
-/*
- * Case 1 : If the command hasn't been issued yet, we simply remove it
- *     from the inout_Q.
- */
-
-	tmp = (Scsi_Cmnd *) hostdata->input_Q;
-	prev = NULL;
-	while (tmp) {
-		if (tmp == cmd) {
-			if (prev)
-				prev->host_scribble = cmd->host_scribble;
-			cmd->host_scribble = NULL;
-			cmd->result = DID_ABORT << 16;
-			printk(KERN_WARNING "scsi%d: Abort - removing command from input_Q. ", instance->host_no);
-			cmd->scsi_done(cmd);
-			return SUCCESS;
-		}
-		prev = tmp;
-		tmp = (Scsi_Cmnd *) tmp->host_scribble;
-	}
-
-/*
- * Case 2 : If the command is connected, we're going to fail the abort
- *     and let the high level SCSI driver retry at a later time or
- *     issue a reset.
- *
- *     Timeouts, and therefore aborted commands, will be highly unlikely
- *     and handling them cleanly in this situation would make the common
- *     case of noresets less efficient, and would pollute our code.  So,
- *     we fail.
- */
-
-	if (hostdata->connected == cmd) {
-
-		printk(KERN_WARNING "scsi%d: Aborting connected command - ", instance->host_no);
-
-		printk("sending wd33c93 ABORT command - ");
-		write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
-		write_3393_cmd(hostdata, WD_CMD_ABORT);
-
-/* Now we have to attempt to flush out the FIFO... */
-
-		printk("flushing fifo - ");
-		timeout = 1000000;
-		do {
-			asr = READ_AUX_STAT();
-			if (asr & ASR_DBR)
-				read_3393(hostdata, WD_DATA);
-		} while (!(asr & ASR_INT) && timeout-- > 0);
-		sr = read_3393(hostdata, WD_SCSI_STATUS);
-		printk("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ", asr, sr, read_3393_count(hostdata), timeout);
-
-		/*
-		 * Abort command processed.
-		 * Still connected.
-		 * We must disconnect.
-		 */
-
-		printk("sending wd33c93 DISCONNECT command - ");
-		write_3393_cmd(hostdata, WD_CMD_DISCONNECT);
-
-		timeout = 1000000;
-		asr = READ_AUX_STAT();
-		while ((asr & ASR_CIP) && timeout-- > 0)
-			asr = READ_AUX_STAT();
-		sr = read_3393(hostdata, WD_SCSI_STATUS);
-		printk("asr=%02x, sr=%02x.", asr, sr);
-
-		hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
-		hostdata->connected = NULL;
-		hostdata->state = S_UNCONNECTED;
-		cmd->result = DID_ABORT << 16;
-		cmd->scsi_done(cmd);
-
-		in2000_execute(instance);
-
-		return SUCCESS;
-	}
-
-/*
- * Case 3: If the command is currently disconnected from the bus,
- * we're not going to expend much effort here: Let's just return
- * an ABORT_SNOOZE and hope for the best...
- */
-
-	for (tmp = (Scsi_Cmnd *) hostdata->disconnected_Q; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble)
-		if (cmd == tmp) {
-			printk(KERN_DEBUG "scsi%d: unable to abort disconnected command.\n", instance->host_no);
-			return FAILED;
-		}
-
-/*
- * Case 4 : If we reached this point, the command was not found in any of
- *     the queues.
- *
- * We probably reached this point because of an unlikely race condition
- * between the command completing successfully and the abortion code,
- * so we won't panic, but we will notify the user in case something really
- * broke.
- */
-
-	in2000_execute(instance);
-
-	printk("scsi%d: warning : SCSI command probably completed successfully" "         before abortion. ", instance->host_no);
-	return SUCCESS;
-}
-
-static int in2000_abort(Scsi_Cmnd * cmd)
-{
-	int rc;
-
-	spin_lock_irq(cmd->device->host->host_lock);
-	rc = __in2000_abort(cmd);
-	spin_unlock_irq(cmd->device->host->host_lock);
-
-	return rc;
-}
-
-
-#define MAX_IN2000_HOSTS 3
-#define MAX_SETUP_ARGS ARRAY_SIZE(setup_args)
-#define SETUP_BUFFER_SIZE 200
-static char setup_buffer[SETUP_BUFFER_SIZE];
-static char setup_used[MAX_SETUP_ARGS];
-static int done_setup = 0;
-
-static void __init in2000_setup(char *str, int *ints)
-{
-	int i;
-	char *p1, *p2;
-
-	strlcpy(setup_buffer, str, SETUP_BUFFER_SIZE);
-	p1 = setup_buffer;
-	i = 0;
-	while (*p1 && (i < MAX_SETUP_ARGS)) {
-		p2 = strchr(p1, ',');
-		if (p2) {
-			*p2 = '\0';
-			if (p1 != p2)
-				setup_args[i] = p1;
-			p1 = p2 + 1;
-			i++;
-		} else {
-			setup_args[i] = p1;
-			break;
-		}
-	}
-	for (i = 0; i < MAX_SETUP_ARGS; i++)
-		setup_used[i] = 0;
-	done_setup = 1;
-}
-
-
-/* check_setup_args() returns index if key found, 0 if not
- */
-
-static int __init check_setup_args(char *key, int *val, char *buf)
-{
-	int x;
-	char *cp;
-
-	for (x = 0; x < MAX_SETUP_ARGS; x++) {
-		if (setup_used[x])
-			continue;
-		if (!strncmp(setup_args[x], key, strlen(key)))
-			break;
-	}
-	if (x == MAX_SETUP_ARGS)
-		return 0;
-	setup_used[x] = 1;
-	cp = setup_args[x] + strlen(key);
-	*val = -1;
-	if (*cp != ':')
-		return ++x;
-	cp++;
-	if ((*cp >= '0') && (*cp <= '9')) {
-		*val = simple_strtoul(cp, NULL, 0);
-	}
-	return ++x;
-}
-
-
-
-/* The "correct" (ie portable) way to access memory-mapped hardware
- * such as the IN2000 EPROM and dip switch is through the use of
- * special macros declared in 'asm/io.h'. We use readb() and readl()
- * when reading from the card's BIOS area in in2000_detect().
- */
-static u32 bios_tab[] in2000__INITDATA = {
-	0xc8000,
-	0xd0000,
-	0xd8000,
-	0
-};
-
-static unsigned short base_tab[] in2000__INITDATA = {
-	0x220,
-	0x200,
-	0x110,
-	0x100,
-};
-
-static int int_tab[] in2000__INITDATA = {
-	15,
-	14,
-	11,
-	10
-};
-
-static int probe_bios(u32 addr, u32 *s1, uchar *switches)
-{
-	void __iomem *p = ioremap(addr, 0x34);
-	if (!p)
-		return 0;
-	*s1 = readl(p + 0x10);
-	if (*s1 == 0x41564f4e || readl(p + 0x30) == 0x61776c41) {
-		/* Read the switch image that's mapped into EPROM space */
-		*switches = ~readb(p + 0x20);
-		iounmap(p);
-		return 1;
-	}
-	iounmap(p);
-	return 0;
-}
-
-static int __init in2000_detect(struct scsi_host_template * tpnt)
-{
-	struct Scsi_Host *instance;
-	struct IN2000_hostdata *hostdata;
-	int detect_count;
-	int bios;
-	int x;
-	unsigned short base;
-	uchar switches;
-	uchar hrev;
-	unsigned long flags;
-	int val;
-	char buf[32];
-
-/* Thanks to help from Bill Earnest, probing for IN2000 cards is a
- * pretty straightforward and fool-proof operation. There are 3
- * possible locations for the IN2000 EPROM in memory space - if we
- * find a BIOS signature, we can read the dip switch settings from
- * the byte at BIOS+32 (shadowed in by logic on the card). From 2
- * of the switch bits we get the card's address in IO space. There's
- * an image of the dip switch there, also, so we have a way to back-
- * check that this really is an IN2000 card. Very nifty. Use the
- * 'ioport:xx' command-line parameter if your BIOS EPROM is absent
- * or disabled.
- */
-
-	if (!done_setup && setup_strings)
-		in2000_setup(setup_strings, NULL);
-
-	detect_count = 0;
-	for (bios = 0; bios_tab[bios]; bios++) {
-		u32 s1 = 0;
-		if (check_setup_args("ioport", &val, buf)) {
-			base = val;
-			switches = ~inb(base + IO_SWITCHES) & 0xff;
-			printk("Forcing IN2000 detection at IOport 0x%x ", base);
-			bios = 2;
-		}
-/*
- * There have been a couple of BIOS versions with different layouts
- * for the obvious ID strings. We look for the 2 most common ones and
- * hope that they cover all the cases...
- */
-		else if (probe_bios(bios_tab[bios], &s1, &switches)) {
-			printk("Found IN2000 BIOS at 0x%x ", (unsigned int) bios_tab[bios]);
-
-/* Find out where the IO space is */
-
-			x = switches & (SW_ADDR0 | SW_ADDR1);
-			base = base_tab[x];
-
-/* Check for the IN2000 signature in IO space. */
-
-			x = ~inb(base + IO_SWITCHES) & 0xff;
-			if (x != switches) {
-				printk("Bad IO signature: %02x vs %02x.\n", x, switches);
-				continue;
-			}
-		} else
-			continue;
-
-/* OK. We have a base address for the IO ports - run a few safety checks */
-
-		if (!(switches & SW_BIT7)) {	/* I _think_ all cards do this */
-			printk("There is no IN-2000 SCSI card at IOport 0x%03x!\n", base);
-			continue;
-		}
-
-/* Let's assume any hardware version will work, although the driver
- * has only been tested on 0x21, 0x22, 0x25, 0x26, and 0x27. We'll
- * print out the rev number for reference later, but accept them all.
- */
-
-		hrev = inb(base + IO_HARDWARE);
-
-		/* Bit 2 tells us if interrupts are disabled */
-		if (switches & SW_DISINT) {
-			printk("The IN-2000 SCSI card at IOport 0x%03x ", base);
-			printk("is not configured for interrupt operation!\n");
-			printk("This driver requires an interrupt: cancelling detection.\n");
-			continue;
-		}
-
-/* Ok. We accept that there's an IN2000 at ioaddr 'base'. Now
- * initialize it.
- */
-
-		tpnt->proc_name = "in2000";
-		instance = scsi_register(tpnt, sizeof(struct IN2000_hostdata));
-		if (instance == NULL)
-			continue;
-		detect_count++;
-		hostdata = (struct IN2000_hostdata *) instance->hostdata;
-		instance->io_port = hostdata->io_base = base;
-		hostdata->dip_switch = switches;
-		hostdata->hrev = hrev;
-
-		write1_io(0, IO_FIFO_WRITE);	/* clear fifo counter */
-		write1_io(0, IO_FIFO_READ);	/* start fifo out in read mode */
-		write1_io(0, IO_INTR_MASK);	/* allow all ints */
-		x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
-		if (request_irq(x, in2000_intr, 0, "in2000", instance)) {
-			printk("in2000_detect: Unable to allocate IRQ.\n");
-			detect_count--;
-			continue;
-		}
-		instance->irq = x;
-		instance->n_io_port = 13;
-		request_region(base, 13, "in2000");	/* lock in this IO space for our use */
-
-		for (x = 0; x < 8; x++) {
-			hostdata->busy[x] = 0;
-			hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF);
-			hostdata->sync_stat[x] = SS_UNSET;	/* using default sync values */
-#ifdef PROC_STATISTICS
-			hostdata->cmd_cnt[x] = 0;
-			hostdata->disc_allowed_cnt[x] = 0;
-			hostdata->disc_done_cnt[x] = 0;
-#endif
-		}
-		hostdata->input_Q = NULL;
-		hostdata->selecting = NULL;
-		hostdata->connected = NULL;
-		hostdata->disconnected_Q = NULL;
-		hostdata->state = S_UNCONNECTED;
-		hostdata->fifo = FI_FIFO_UNUSED;
-		hostdata->level2 = L2_BASIC;
-		hostdata->disconnect = DIS_ADAPTIVE;
-		hostdata->args = DEBUG_DEFAULTS;
-		hostdata->incoming_ptr = 0;
-		hostdata->outgoing_len = 0;
-		hostdata->default_sx_per = DEFAULT_SX_PER;
-
-/* Older BIOS's had a 'sync on/off' switch - use its setting */
-
-		if (s1 == 0x41564f4e && (switches & SW_SYNC_DOS5))
-			hostdata->sync_off = 0x00;	/* sync defaults to on */
-		else
-			hostdata->sync_off = 0xff;	/* sync defaults to off */
-
-#ifdef PROC_INTERFACE
-		hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP;
-#ifdef PROC_STATISTICS
-		hostdata->int_cnt = 0;
-#endif
-#endif
-
-		if (check_setup_args("nosync", &val, buf))
-			hostdata->sync_off = val;
-
-		if (check_setup_args("period", &val, buf))
-			hostdata->default_sx_per = sx_table[round_period((unsigned int) val)].period_ns;
-
-		if (check_setup_args("disconnect", &val, buf)) {
-			if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS))
-				hostdata->disconnect = val;
-			else
-				hostdata->disconnect = DIS_ADAPTIVE;
-		}
-
-		if (check_setup_args("noreset", &val, buf))
-			hostdata->args ^= A_NO_SCSI_RESET;
-
-		if (check_setup_args("level2", &val, buf))
-			hostdata->level2 = val;
-
-		if (check_setup_args("debug", &val, buf))
-			hostdata->args = (val & DB_MASK);
-
-#ifdef PROC_INTERFACE
-		if (check_setup_args("proc", &val, buf))
-			hostdata->proc = val;
-#endif
-
-
-		/* FIXME: not strictly needed I think but the called code expects
-		   to be locked */
-		spin_lock_irqsave(instance->host_lock, flags);
-		x = reset_hardware(instance, (hostdata->args & A_NO_SCSI_RESET) ? RESET_CARD : RESET_CARD_AND_BUS);
-		spin_unlock_irqrestore(instance->host_lock, flags);
-
-		hostdata->microcode = read_3393(hostdata, WD_CDB_1);
-		if (x & 0x01) {
-			if (x & B_FLAG)
-				hostdata->chip = C_WD33C93B;
-			else
-				hostdata->chip = C_WD33C93A;
-		} else
-			hostdata->chip = C_WD33C93;
-
-		printk("dip_switch=%02x irq=%d ioport=%02x floppy=%s sync/DOS5=%s ", (switches & 0x7f), instance->irq, hostdata->io_base, (switches & SW_FLOPPY) ? "Yes" : "No", (switches & SW_SYNC_DOS5) ? "Yes" : "No");
-		printk("hardware_ver=%02x chip=%s microcode=%02x\n", hrev, (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip == C_WD33C93A) ? "WD33c93A" : (hostdata->chip == C_WD33C93B) ? "WD33c93B" : "unknown", hostdata->microcode);
-#ifdef DEBUGGING_ON
-		printk("setup_args = ");
-		for (x = 0; x < MAX_SETUP_ARGS; x++)
-			printk("%s,", setup_args[x]);
-		printk("\n");
-#endif
-		if (hostdata->sync_off == 0xff)
-			printk("Sync-transfer DISABLED on all devices: ENABLE from command-line\n");
-		printk("IN2000 driver version %s - %s\n", IN2000_VERSION, IN2000_DATE);
-	}
-
-	return detect_count;
-}
-
-static int in2000_release(struct Scsi_Host *shost)
-{
-	if (shost->irq)
-		free_irq(shost->irq, shost);
-	if (shost->io_port && shost->n_io_port)
-		release_region(shost->io_port, shost->n_io_port);
-	return 0;
-}
-
-/* NOTE: I lifted this function straight out of the old driver,
- *       and have not tested it. Presumably it does what it's
- *       supposed to do...
- */
-
-static int in2000_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *iinfo)
-{
-	int size;
-
-	size = capacity;
-	iinfo[0] = 64;
-	iinfo[1] = 32;
-	iinfo[2] = size >> 11;
-
-/* This should approximate the large drive handling that the DOS ASPI manager
-   uses.  Drives very near the boundaries may not be handled correctly (i.e.
-   near 2.0 Gb and 4.0 Gb) */
-
-	if (iinfo[2] > 1024) {
-		iinfo[0] = 64;
-		iinfo[1] = 63;
-		iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
-	}
-	if (iinfo[2] > 1024) {
-		iinfo[0] = 128;
-		iinfo[1] = 63;
-		iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
-	}
-	if (iinfo[2] > 1024) {
-		iinfo[0] = 255;
-		iinfo[1] = 63;
-		iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
-	}
-	return 0;
-}
-
-
-static int in2000_write_info(struct Scsi_Host *instance, char *buf, int len)
-{
-
-#ifdef PROC_INTERFACE
-
-	char *bp;
-	struct IN2000_hostdata *hd;
-	int x, i;
-
-	hd = (struct IN2000_hostdata *) instance->hostdata;
-
-	buf[len] = '\0';
-	bp = buf;
-	if (!strncmp(bp, "debug:", 6)) {
-		bp += 6;
-		hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK;
-	} else if (!strncmp(bp, "disconnect:", 11)) {
-		bp += 11;
-		x = simple_strtoul(bp, NULL, 0);
-		if (x < DIS_NEVER || x > DIS_ALWAYS)
-			x = DIS_ADAPTIVE;
-		hd->disconnect = x;
-	} else if (!strncmp(bp, "period:", 7)) {
-		bp += 7;
-		x = simple_strtoul(bp, NULL, 0);
-		hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns;
-	} else if (!strncmp(bp, "resync:", 7)) {
-		bp += 7;
-		x = simple_strtoul(bp, NULL, 0);
-		for (i = 0; i < 7; i++)
-			if (x & (1 << i))
-				hd->sync_stat[i] = SS_UNSET;
-	} else if (!strncmp(bp, "proc:", 5)) {
-		bp += 5;
-		hd->proc = simple_strtoul(bp, NULL, 0);
-	} else if (!strncmp(bp, "level2:", 7)) {
-		bp += 7;
-		hd->level2 = simple_strtoul(bp, NULL, 0);
-	}
-#endif
-	return len;
-}
-
-static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
-{
-
-#ifdef PROC_INTERFACE
-	unsigned long flags;
-	struct IN2000_hostdata *hd;
-	Scsi_Cmnd *cmd;
-	int x;
-
-	hd = (struct IN2000_hostdata *) instance->hostdata;
-
-	spin_lock_irqsave(instance->host_lock, flags);
-	if (hd->proc & PR_VERSION)
-		seq_printf(m, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE);
-
-	if (hd->proc & PR_INFO) {
-		seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No");
-		seq_puts(m, "\nsync_xfer[] =       ");
-		for (x = 0; x < 7; x++)
-			seq_printf(m, "\t%02x", hd->sync_xfer[x]);
-		seq_puts(m, "\nsync_stat[] =       ");
-		for (x = 0; x < 7; x++)
-			seq_printf(m, "\t%02x", hd->sync_stat[x]);
-	}
-#ifdef PROC_STATISTICS
-	if (hd->proc & PR_STATISTICS) {
-		seq_puts(m, "\ncommands issued:    ");
-		for (x = 0; x < 7; x++)
-			seq_printf(m, "\t%ld", hd->cmd_cnt[x]);
-		seq_puts(m, "\ndisconnects allowed:");
-		for (x = 0; x < 7; x++)
-			seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]);
-		seq_puts(m, "\ndisconnects done:   ");
-		for (x = 0; x < 7; x++)
-			seq_printf(m, "\t%ld", hd->disc_done_cnt[x]);
-		seq_printf(m, "\ninterrupts:      \t%ld", hd->int_cnt);
-	}
-#endif
-	if (hd->proc & PR_CONNECTED) {
-		seq_puts(m, "\nconnected:     ");
-		if (hd->connected) {
-			cmd = (Scsi_Cmnd *) hd->connected;
-			seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
-		}
-	}
-	if (hd->proc & PR_INPUTQ) {
-		seq_puts(m, "\ninput_Q:       ");
-		cmd = (Scsi_Cmnd *) hd->input_Q;
-		while (cmd) {
-			seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
-			cmd = (Scsi_Cmnd *) cmd->host_scribble;
-		}
-	}
-	if (hd->proc & PR_DISCQ) {
-		seq_puts(m, "\ndisconnected_Q:");
-		cmd = (Scsi_Cmnd *) hd->disconnected_Q;
-		while (cmd) {
-			seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
-			cmd = (Scsi_Cmnd *) cmd->host_scribble;
-		}
-	}
-	if (hd->proc & PR_TEST) {
-		;		/* insert your own custom function here */
-	}
-	seq_putc(m, '\n');
-	spin_unlock_irqrestore(instance->host_lock, flags);
-#endif				/* PROC_INTERFACE */
-	return 0;
-}
-
-MODULE_LICENSE("GPL");
-
-
-static struct scsi_host_template driver_template = {
-	.proc_name       		= "in2000",
-	.write_info       		= in2000_write_info,
-	.show_info       		= in2000_show_info,
-	.name            		= "Always IN2000",
-	.detect          		= in2000_detect, 
-	.release			= in2000_release,
-	.queuecommand    		= in2000_queuecommand,
-	.eh_abort_handler		= in2000_abort,
-	.eh_bus_reset_handler		= in2000_bus_reset,
-	.bios_param      		= in2000_biosparam, 
-	.can_queue       		= IN2000_CAN_Q,
-	.this_id         		= IN2000_HOST_ID,
-	.sg_tablesize    		= IN2000_SG,
-	.cmd_per_lun     		= IN2000_CPL,
-	.use_clustering  		= DISABLE_CLUSTERING,
-};
-#include "scsi_module.c"

+ 0 - 412
drivers/scsi/in2000.h

@@ -1,412 +0,0 @@
-/*
- *    in2000.h -  Linux device driver definitions for the
- *                Always IN2000 ISA SCSI card.
- *
- *    IMPORTANT: This file is for version 1.33 - 26/Aug/1998
- *
- * Copyright (c) 1996 John Shifflett, GeoLog Consulting
- *    john@geolog.com
- *    jshiffle@netcom.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef IN2000_H
-#define IN2000_H
-
-#include <asm/io.h>
-
-#define PROC_INTERFACE     /* add code for /proc/scsi/in2000/xxx interface */
-#ifdef  PROC_INTERFACE
-#define PROC_STATISTICS    /* add code for keeping various real time stats */
-#endif
-
-#define SYNC_DEBUG         /* extra info on sync negotiation printed */
-#define DEBUGGING_ON       /* enable command-line debugging bitmask */
-#define DEBUG_DEFAULTS 0   /* default bitmask - change from command-line */
-
-#ifdef __i386__
-#define FAST_READ_IO       /* No problems with these on my machine */
-#define FAST_WRITE_IO
-#endif
-
-#ifdef DEBUGGING_ON
-#define DB(f,a) if (hostdata->args & (f)) a;
-#define CHECK_NULL(p,s) /* if (!(p)) {printk("\n"); while (1) printk("NP:%s\r",(s));} */
-#else
-#define DB(f,a)
-#define CHECK_NULL(p,s)
-#endif
-
-#define uchar unsigned char
-
-#define read1_io(a)     (inb(hostdata->io_base+(a)))
-#define read2_io(a)     (inw(hostdata->io_base+(a)))
-#define write1_io(b,a)  (outb((b),hostdata->io_base+(a)))
-#define write2_io(w,a)  (outw((w),hostdata->io_base+(a)))
-
-#ifdef __i386__
-/* These inline assembly defines are derived from a patch
- * sent to me by Bill Earnest. He's done a lot of very
- * valuable thinking, testing, and coding during his effort
- * to squeeze more speed out of this driver. I really think
- * that we are doing IO at close to the maximum now with
- * the fifo. (And yes, insw uses 'edi' while outsw uses
- * 'esi'. Thanks Bill!)
- */
-
-#define FAST_READ2_IO()    \
-({ \
-int __dummy_1,__dummy_2; \
-   __asm__ __volatile__ ("\n \
-   cld                    \n \
-   orl %%ecx, %%ecx       \n \
-   jz 1f                  \n \
-   rep                    \n \
-   insw (%%dx),%%es:(%%edi) \n \
-1: "                       \
-   : "=D" (sp) ,"=c" (__dummy_1) ,"=d" (__dummy_2)  /* output */   \
-   : "2" (f), "0" (sp), "1" (i)  /* input */    \
-   );       /* trashed */ \
-})
-
-#define FAST_WRITE2_IO()   \
-({ \
-int __dummy_1,__dummy_2; \
-   __asm__ __volatile__ ("\n \
-   cld                    \n \
-   orl %%ecx, %%ecx       \n \
-   jz 1f                  \n \
-   rep                    \n \
-   outsw %%ds:(%%esi),(%%dx) \n \
-1: "                       \
-   : "=S" (sp) ,"=c" (__dummy_1) ,"=d" (__dummy_2)/* output */   \
-   : "2" (f), "0" (sp), "1" (i)  /* input */    \
-   );       /* trashed */ \
-})
-#endif
-
-/* IN2000 io_port offsets */
-#define IO_WD_ASR       0x00     /* R - 3393 auxstat reg */
-#define     ASR_INT        0x80
-#define     ASR_LCI        0x40
-#define     ASR_BSY        0x20
-#define     ASR_CIP        0x10
-#define     ASR_PE         0x02
-#define     ASR_DBR        0x01
-#define IO_WD_ADDR      0x00     /* W - 3393 address reg */
-#define IO_WD_DATA      0x01     /* R/W - rest of 3393 regs */
-#define IO_FIFO         0x02     /* R/W - in2000 dual-port fifo (16 bits) */
-#define IN2000_FIFO_SIZE   2048  /*    fifo capacity in bytes */
-#define IO_CARD_RESET   0x03     /* W - in2000 start master reset */
-#define IO_FIFO_COUNT   0x04     /* R - in2000 fifo counter */
-#define IO_FIFO_WRITE   0x05     /* W - clear fifo counter, start write */
-#define IO_FIFO_READ    0x07     /* W - start fifo read */
-#define IO_LED_OFF      0x08     /* W - turn off in2000 activity LED */
-#define IO_SWITCHES     0x08     /* R - read in2000 dip switch */
-#define     SW_ADDR0       0x01  /*    bit 0 = bit 0 of index to io addr */
-#define     SW_ADDR1       0x02  /*    bit 1 = bit 1 of index io addr */
-#define     SW_DISINT      0x04  /*    bit 2 true if ints disabled */
-#define     SW_INT0        0x08  /*    bit 3 = bit 0 of index to interrupt */
-#define     SW_INT1        0x10  /*    bit 4 = bit 1 of index to interrupt */
-#define     SW_INT_SHIFT   3     /*    shift right this amount to right justify int bits */
-#define     SW_SYNC_DOS5   0x20  /*    bit 5 used by Always BIOS */
-#define     SW_FLOPPY      0x40  /*    bit 6 true if floppy enabled */
-#define     SW_BIT7        0x80  /*    bit 7 hardwired true (ground) */
-#define IO_LED_ON       0x09     /* W - turn on in2000 activity LED */
-#define IO_HARDWARE     0x0a     /* R - read in2000 hardware rev, stop reset */
-#define IO_INTR_MASK    0x0c     /* W - in2000 interrupt mask reg */
-#define     IMASK_WD       0x01  /*    WD33c93 interrupt mask */
-#define     IMASK_FIFO     0x02  /*    FIFO interrupt mask */
-
-/* wd register names */
-#define WD_OWN_ID    0x00
-#define WD_CONTROL   0x01
-#define WD_TIMEOUT_PERIOD  0x02
-#define WD_CDB_1     0x03
-#define WD_CDB_2     0x04
-#define WD_CDB_3     0x05
-#define WD_CDB_4     0x06
-#define WD_CDB_5     0x07
-#define WD_CDB_6     0x08
-#define WD_CDB_7     0x09
-#define WD_CDB_8     0x0a
-#define WD_CDB_9     0x0b
-#define WD_CDB_10    0x0c
-#define WD_CDB_11    0x0d
-#define WD_CDB_12    0x0e
-#define WD_TARGET_LUN      0x0f
-#define WD_COMMAND_PHASE   0x10
-#define WD_SYNCHRONOUS_TRANSFER  0x11
-#define WD_TRANSFER_COUNT_MSB 0x12
-#define WD_TRANSFER_COUNT  0x13
-#define WD_TRANSFER_COUNT_LSB 0x14
-#define WD_DESTINATION_ID  0x15
-#define WD_SOURCE_ID    0x16
-#define WD_SCSI_STATUS     0x17
-#define WD_COMMAND      0x18
-#define WD_DATA      0x19
-#define WD_QUEUE_TAG    0x1a
-#define WD_AUXILIARY_STATUS   0x1f
-
-/* WD commands */
-#define WD_CMD_RESET    0x00
-#define WD_CMD_ABORT    0x01
-#define WD_CMD_ASSERT_ATN  0x02
-#define WD_CMD_NEGATE_ACK  0x03
-#define WD_CMD_DISCONNECT  0x04
-#define WD_CMD_RESELECT    0x05
-#define WD_CMD_SEL_ATN     0x06
-#define WD_CMD_SEL      0x07
-#define WD_CMD_SEL_ATN_XFER   0x08
-#define WD_CMD_SEL_XFER    0x09
-#define WD_CMD_RESEL_RECEIVE  0x0a
-#define WD_CMD_RESEL_SEND  0x0b
-#define WD_CMD_WAIT_SEL_RECEIVE 0x0c
-#define WD_CMD_TRANS_ADDR  0x18
-#define WD_CMD_TRANS_INFO  0x20
-#define WD_CMD_TRANSFER_PAD   0x21
-#define WD_CMD_SBT_MODE    0x80
-
-/* SCSI Bus Phases */
-#define PHS_DATA_OUT    0x00
-#define PHS_DATA_IN     0x01
-#define PHS_COMMAND     0x02
-#define PHS_STATUS      0x03
-#define PHS_MESS_OUT    0x06
-#define PHS_MESS_IN     0x07
-
-/* Command Status Register definitions */
-
-  /* reset state interrupts */
-#define CSR_RESET    0x00
-#define CSR_RESET_AF    0x01
-
-  /* successful completion interrupts */
-#define CSR_RESELECT    0x10
-#define CSR_SELECT      0x11
-#define CSR_SEL_XFER_DONE  0x16
-#define CSR_XFER_DONE      0x18
-
-  /* paused or aborted interrupts */
-#define CSR_MSGIN    0x20
-#define CSR_SDP         0x21
-#define CSR_SEL_ABORT      0x22
-#define CSR_RESEL_ABORT    0x25
-#define CSR_RESEL_ABORT_AM 0x27
-#define CSR_ABORT    0x28
-
-  /* terminated interrupts */
-#define CSR_INVALID     0x40
-#define CSR_UNEXP_DISC     0x41
-#define CSR_TIMEOUT     0x42
-#define CSR_PARITY      0x43
-#define CSR_PARITY_ATN     0x44
-#define CSR_BAD_STATUS     0x45
-#define CSR_UNEXP    0x48
-
-  /* service required interrupts */
-#define CSR_RESEL    0x80
-#define CSR_RESEL_AM    0x81
-#define CSR_DISC     0x85
-#define CSR_SRV_REQ     0x88
-
-   /* Own ID/CDB Size register */
-#define OWNID_EAF    0x08
-#define OWNID_EHP    0x10
-#define OWNID_RAF    0x20
-#define OWNID_FS_8   0x00
-#define OWNID_FS_12  0x40
-#define OWNID_FS_16  0x80
-
-   /* Control register */
-#define CTRL_HSP     0x01
-#define CTRL_HA      0x02
-#define CTRL_IDI     0x04
-#define CTRL_EDI     0x08
-#define CTRL_HHP     0x10
-#define CTRL_POLLED  0x00
-#define CTRL_BURST   0x20
-#define CTRL_BUS     0x40
-#define CTRL_DMA     0x80
-
-   /* Timeout Period register */
-#define TIMEOUT_PERIOD_VALUE  20    /* results in 200 ms. */
-
-   /* Synchronous Transfer Register */
-#define STR_FSS      0x80
-
-   /* Destination ID register */
-#define DSTID_DPD    0x40
-#define DATA_OUT_DIR 0
-#define DATA_IN_DIR  1
-#define DSTID_SCC    0x80
-
-   /* Source ID register */
-#define SRCID_MASK   0x07
-#define SRCID_SIV    0x08
-#define SRCID_DSP    0x20
-#define SRCID_ES     0x40
-#define SRCID_ER     0x80
-
-
-
-#define ILLEGAL_STATUS_BYTE   0xff
-
-
-#define DEFAULT_SX_PER     500   /* (ns) fairly safe */
-#define DEFAULT_SX_OFF     0     /* aka async */
-
-#define OPTIMUM_SX_PER     252   /* (ns) best we can do (mult-of-4) */
-#define OPTIMUM_SX_OFF     12    /* size of in2000 fifo */
-
-struct sx_period {
-   unsigned int   period_ns;
-   uchar          reg_value;
-   };
-
-
-struct IN2000_hostdata {
-    struct Scsi_Host *next;
-    uchar            chip;             /* what kind of wd33c93 chip? */
-    uchar            microcode;        /* microcode rev if 'B' */
-    unsigned short   io_base;          /* IO port base */
-    unsigned int     dip_switch;       /* dip switch settings */
-    unsigned int     hrev;             /* hardware revision of card */
-    volatile uchar   busy[8];          /* index = target, bit = lun */
-    volatile Scsi_Cmnd *input_Q;       /* commands waiting to be started */
-    volatile Scsi_Cmnd *selecting;     /* trying to select this command */
-    volatile Scsi_Cmnd *connected;     /* currently connected command */
-    volatile Scsi_Cmnd *disconnected_Q;/* commands waiting for reconnect */
-    uchar            state;            /* what we are currently doing */
-    uchar            fifo;             /* what the FIFO is up to */
-    uchar            level2;           /* extent to which Level-2 commands are used */
-    uchar            disconnect;       /* disconnect/reselect policy */
-    unsigned int     args;             /* set from command-line argument */
-    uchar            incoming_msg[8];  /* filled during message_in phase */
-    int              incoming_ptr;     /* mainly used with EXTENDED messages */
-    uchar            outgoing_msg[8];  /* send this during next message_out */
-    int              outgoing_len;     /* length of outgoing message */
-    unsigned int     default_sx_per;   /* default transfer period for SCSI bus */
-    uchar            sync_xfer[8];     /* sync_xfer reg settings per target */
-    uchar            sync_stat[8];     /* status of sync negotiation per target */
-    uchar            sync_off;         /* bit mask: don't use sync with these targets */
-#ifdef PROC_INTERFACE
-    uchar            proc;             /* bit mask: what's in proc output */
-#ifdef PROC_STATISTICS
-    unsigned long    cmd_cnt[8];       /* # of commands issued per target */
-    unsigned long    int_cnt;          /* # of interrupts serviced */
-    unsigned long    disc_allowed_cnt[8]; /* # of disconnects allowed per target */
-    unsigned long    disc_done_cnt[8]; /* # of disconnects done per target*/
-#endif
-#endif
-    };
-
-
-/* defines for hostdata->chip */
-
-#define C_WD33C93       0
-#define C_WD33C93A      1
-#define C_WD33C93B      2
-#define C_UNKNOWN_CHIP  100
-
-/* defines for hostdata->state */
-
-#define S_UNCONNECTED         0
-#define S_SELECTING           1
-#define S_RUNNING_LEVEL2      2
-#define S_CONNECTED           3
-#define S_PRE_TMP_DISC        4
-#define S_PRE_CMP_DISC        5
-
-/* defines for hostdata->fifo */
-
-#define FI_FIFO_UNUSED        0
-#define FI_FIFO_READING       1
-#define FI_FIFO_WRITING       2
-
-/* defines for hostdata->level2 */
-/* NOTE: only the first 3 are trustworthy at this point -
- * having trouble when more than 1 device is reading/writing
- * at the same time...
- */
-
-#define L2_NONE      0  /* no combination commands - we get lots of ints */
-#define L2_SELECT    1  /* start with SEL_ATN_XFER, but never resume it */
-#define L2_BASIC     2  /* resume after STATUS ints & RDP messages */
-#define L2_DATA      3  /* resume after DATA_IN/OUT ints */
-#define L2_MOST      4  /* resume after anything except a RESELECT int */
-#define L2_RESELECT  5  /* resume after everything, including RESELECT ints */
-#define L2_ALL       6  /* always resume */
-
-/* defines for hostdata->disconnect */
-
-#define DIS_NEVER    0
-#define DIS_ADAPTIVE 1
-#define DIS_ALWAYS   2
-
-/* defines for hostdata->args */
-
-#define DB_TEST               1<<0
-#define DB_FIFO               1<<1
-#define DB_QUEUE_COMMAND      1<<2
-#define DB_EXECUTE            1<<3
-#define DB_INTR               1<<4
-#define DB_TRANSFER           1<<5
-#define DB_MASK               0x3f
-
-#define A_NO_SCSI_RESET       1<<15
-
-
-/* defines for hostdata->sync_xfer[] */
-
-#define SS_UNSET     0
-#define SS_FIRST     1
-#define SS_WAITING   2
-#define SS_SET       3
-
-/* defines for hostdata->proc */
-
-#define PR_VERSION   1<<0
-#define PR_INFO      1<<1
-#define PR_STATISTICS 1<<2
-#define PR_CONNECTED 1<<3
-#define PR_INPUTQ    1<<4
-#define PR_DISCQ     1<<5
-#define PR_TEST      1<<6
-#define PR_STOP      1<<7
-
-
-# include <linux/init.h>
-# include <linux/spinlock.h>
-# define in2000__INITFUNC(function) __initfunc(function)
-# define in2000__INIT __init
-# define in2000__INITDATA __initdata
-# define CLISPIN_LOCK(host,flags)   spin_lock_irqsave(host->host_lock, flags)
-# define CLISPIN_UNLOCK(host,flags) spin_unlock_irqrestore(host->host_lock, \
-							   flags)
-
-static int in2000_detect(struct scsi_host_template *) in2000__INIT;
-static int in2000_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
-static int in2000_abort(Scsi_Cmnd *);
-static void in2000_setup(char *, int *) in2000__INIT;
-static int in2000_biosparam(struct scsi_device *, struct block_device *,
-		sector_t, int *);
-static int in2000_bus_reset(Scsi_Cmnd *);
-
-
-#define IN2000_CAN_Q    16
-#define IN2000_SG       SG_ALL
-#define IN2000_CPL      2
-#define IN2000_HOST_ID  7
-
-#endif /* IN2000_H */

+ 118 - 16
drivers/scsi/ipr.c

@@ -493,15 +493,15 @@ struct ipr_error_table_t ipr_error_table[] = {
 	"9072: Link not operational transition"},
 	"9072: Link not operational transition"},
 	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
 	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
 	"9032: Array exposed but still protected"},
 	"9032: Array exposed but still protected"},
-	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
+	{0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
 	"70DD: Device forced failed by disrupt device command"},
 	"70DD: Device forced failed by disrupt device command"},
 	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
 	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
 	"4061: Multipath redundancy level got better"},
 	"4061: Multipath redundancy level got better"},
 	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
 	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
 	"4060: Multipath redundancy level got worse"},
 	"4060: Multipath redundancy level got worse"},
-	{0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
+	{0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
 	"9083: Device raw mode enabled"},
 	"9083: Device raw mode enabled"},
-	{0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
+	{0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
 	"9084: Device raw mode disabled"},
 	"9084: Device raw mode disabled"},
 	{0x07270000, 0, 0,
 	{0x07270000, 0, 0,
 	"Failure due to other device"},
 	"Failure due to other device"},
@@ -1473,7 +1473,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
 
-	list_del(&hostrcb->queue);
+	list_del_init(&hostrcb->queue);
 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 
 
 	if (ioasc) {
 	if (ioasc) {
@@ -2552,6 +2552,23 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
 	}
 	}
 }
 }
 
 
+static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
+{
+	struct ipr_hostrcb *hostrcb;
+
+	hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
+					struct ipr_hostrcb, queue);
+
+	if (unlikely(!hostrcb)) {
+		dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
+		hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
+						struct ipr_hostrcb, queue);
+	}
+
+	list_del_init(&hostrcb->queue);
+	return hostrcb;
+}
+
 /**
 /**
  * ipr_process_error - Op done function for an adapter error log.
  * ipr_process_error - Op done function for an adapter error log.
  * @ipr_cmd:	ipr command struct
  * @ipr_cmd:	ipr command struct
@@ -2569,13 +2586,14 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 	u32 fd_ioasc;
 	u32 fd_ioasc;
+	char *envp[] = { "ASYNC_ERR_LOG=1", NULL };
 
 
 	if (ioa_cfg->sis64)
 	if (ioa_cfg->sis64)
 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
 	else
 	else
 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
 
 
-	list_del(&hostrcb->queue);
+	list_del_init(&hostrcb->queue);
 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 
 
 	if (!ioasc) {
 	if (!ioasc) {
@@ -2588,6 +2606,10 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
 	}
 	}
 
 
+	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
+	hostrcb = ipr_get_free_hostrcb(ioa_cfg);
+	kobject_uevent_env(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE, envp);
+
 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
 }
 }
 
 
@@ -4095,6 +4117,64 @@ static struct device_attribute ipr_ioa_fw_type_attr = {
 	.show = ipr_show_fw_type
 	.show = ipr_show_fw_type
 };
 };
 
 
+static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
+				struct bin_attribute *bin_attr, char *buf,
+				loff_t off, size_t count)
+{
+	struct device *cdev = container_of(kobj, struct device, kobj);
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+	struct ipr_hostrcb *hostrcb;
+	unsigned long lock_flags = 0;
+	int ret;
+
+	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+	hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
+					struct ipr_hostrcb, queue);
+	if (!hostrcb) {
+		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+		return 0;
+	}
+	ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
+				sizeof(hostrcb->hcam));
+	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+	return ret;
+}
+
+static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
+				struct bin_attribute *bin_attr, char *buf,
+				loff_t off, size_t count)
+{
+	struct device *cdev = container_of(kobj, struct device, kobj);
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+	struct ipr_hostrcb *hostrcb;
+	unsigned long lock_flags = 0;
+
+	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+	hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
+					struct ipr_hostrcb, queue);
+	if (!hostrcb) {
+		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+		return count;
+	}
+
+	/* Reclaim hostrcb before exit */
+	list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
+	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+	return count;
+}
+
+static struct bin_attribute ipr_ioa_async_err_log = {
+	.attr = {
+		.name =		"async_err_log",
+		.mode =		S_IRUGO | S_IWUSR,
+	},
+	.size = 0,
+	.read = ipr_read_async_err_log,
+	.write = ipr_next_async_err_log
+};
+
 static struct device_attribute *ipr_ioa_attrs[] = {
 static struct device_attribute *ipr_ioa_attrs[] = {
 	&ipr_fw_version_attr,
 	&ipr_fw_version_attr,
 	&ipr_log_level_attr,
 	&ipr_log_level_attr,
@@ -7026,8 +7106,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
 {
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	struct ipr_resource_entry *res;
 	struct ipr_resource_entry *res;
-	struct ipr_hostrcb *hostrcb, *temp;
-	int i = 0, j;
+	int j;
 
 
 	ENTER;
 	ENTER;
 	ioa_cfg->in_reset_reload = 0;
 	ioa_cfg->in_reset_reload = 0;
@@ -7048,12 +7127,16 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
 	}
 	}
 	schedule_work(&ioa_cfg->work_q);
 	schedule_work(&ioa_cfg->work_q);
 
 
-	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
-		list_del(&hostrcb->queue);
-		if (i++ < IPR_NUM_LOG_HCAMS)
-			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
+	for (j = 0; j < IPR_NUM_HCAMS; j++) {
+		list_del_init(&ioa_cfg->hostrcb[j]->queue);
+		if (j < IPR_NUM_LOG_HCAMS)
+			ipr_send_hcam(ioa_cfg,
+				IPR_HCAM_CDB_OP_CODE_LOG_DATA,
+				ioa_cfg->hostrcb[j]);
 		else
 		else
-			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
+			ipr_send_hcam(ioa_cfg,
+				IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
+				ioa_cfg->hostrcb[j]);
 	}
 	}
 
 
 	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
 	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
@@ -7966,7 +8049,8 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
 
 
 	ENTER;
 	ENTER;
 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
-	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
+	if (ioa_cfg->identify_hrrq_index == 0)
+		dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
 
 
 	if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
 	if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
 		hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
 		hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
@@ -8335,7 +8419,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
 
 
 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
 			     struct ipr_hostrcb, queue);
 			     struct ipr_hostrcb, queue);
-	list_del(&hostrcb->queue);
+	list_del_init(&hostrcb->queue);
 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
 
 
 	rc = ipr_get_ldump_data_section(ioa_cfg,
 	rc = ipr_get_ldump_data_section(ioa_cfg,
@@ -9332,7 +9416,7 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
 	dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
 	dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
 			  ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
 			  ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
 
 
-	for (i = 0; i < IPR_NUM_HCAMS; i++) {
+	for (i = 0; i < IPR_MAX_HCAMS; i++) {
 		dma_free_coherent(&ioa_cfg->pdev->dev,
 		dma_free_coherent(&ioa_cfg->pdev->dev,
 				  sizeof(struct ipr_hostrcb),
 				  sizeof(struct ipr_hostrcb),
 				  ioa_cfg->hostrcb[i],
 				  ioa_cfg->hostrcb[i],
@@ -9572,7 +9656,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
 	if (!ioa_cfg->u.cfg_table)
 	if (!ioa_cfg->u.cfg_table)
 		goto out_free_host_rrq;
 		goto out_free_host_rrq;
 
 
-	for (i = 0; i < IPR_NUM_HCAMS; i++) {
+	for (i = 0; i < IPR_MAX_HCAMS; i++) {
 		ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
 		ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
 							 sizeof(struct ipr_hostrcb),
 							 sizeof(struct ipr_hostrcb),
 							 &ioa_cfg->hostrcb_dma[i],
 							 &ioa_cfg->hostrcb_dma[i],
@@ -9714,6 +9798,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
 
 
 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
+	INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
@@ -10352,6 +10437,8 @@ static void ipr_remove(struct pci_dev *pdev)
 			      &ipr_trace_attr);
 			      &ipr_trace_attr);
 	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
 	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
 			     &ipr_dump_attr);
 			     &ipr_dump_attr);
+	sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
+			&ipr_ioa_async_err_log);
 	scsi_remove_host(ioa_cfg->host);
 	scsi_remove_host(ioa_cfg->host);
 
 
 	__ipr_remove(pdev);
 	__ipr_remove(pdev);
@@ -10400,10 +10487,25 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
 		return rc;
 		return rc;
 	}
 	}
 
 
+	rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
+			&ipr_ioa_async_err_log);
+
+	if (rc) {
+		ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
+				&ipr_dump_attr);
+		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
+				&ipr_trace_attr);
+		scsi_remove_host(ioa_cfg->host);
+		__ipr_remove(pdev);
+		return rc;
+	}
+
 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
 				   &ipr_dump_attr);
 				   &ipr_dump_attr);
 
 
 	if (rc) {
 	if (rc) {
+		sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
+				      &ipr_ioa_async_err_log);
 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
 				      &ipr_trace_attr);
 				      &ipr_trace_attr);
 		scsi_remove_host(ioa_cfg->host);
 		scsi_remove_host(ioa_cfg->host);

+ 6 - 2
drivers/scsi/ipr.h

@@ -154,7 +154,9 @@
 #define IPR_DEFAULT_MAX_ERROR_DUMP			984
 #define IPR_DEFAULT_MAX_ERROR_DUMP			984
 #define IPR_NUM_LOG_HCAMS				2
 #define IPR_NUM_LOG_HCAMS				2
 #define IPR_NUM_CFG_CHG_HCAMS				2
 #define IPR_NUM_CFG_CHG_HCAMS				2
+#define IPR_NUM_HCAM_QUEUE				12
 #define IPR_NUM_HCAMS	(IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
 #define IPR_NUM_HCAMS	(IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
+#define IPR_MAX_HCAMS	(IPR_NUM_HCAMS + IPR_NUM_HCAM_QUEUE)
 
 
 #define IPR_MAX_SIS64_TARGETS_PER_BUS			1024
 #define IPR_MAX_SIS64_TARGETS_PER_BUS			1024
 #define IPR_MAX_SIS64_LUNS_PER_TARGET			0xffffffff
 #define IPR_MAX_SIS64_LUNS_PER_TARGET			0xffffffff
@@ -1504,6 +1506,7 @@ struct ipr_ioa_cfg {
 	u8 log_level;
 	u8 log_level;
 #define IPR_MAX_LOG_LEVEL			4
 #define IPR_MAX_LOG_LEVEL			4
 #define IPR_DEFAULT_LOG_LEVEL		2
 #define IPR_DEFAULT_LOG_LEVEL		2
+#define IPR_DEBUG_LOG_LEVEL		3
 
 
 #define IPR_NUM_TRACE_INDEX_BITS	8
 #define IPR_NUM_TRACE_INDEX_BITS	8
 #define IPR_NUM_TRACE_ENTRIES		(1 << IPR_NUM_TRACE_INDEX_BITS)
 #define IPR_NUM_TRACE_ENTRIES		(1 << IPR_NUM_TRACE_INDEX_BITS)
@@ -1532,10 +1535,11 @@ struct ipr_ioa_cfg {
 
 
 	char ipr_hcam_label[8];
 	char ipr_hcam_label[8];
 #define IPR_HCAM_LABEL			"hcams"
 #define IPR_HCAM_LABEL			"hcams"
-	struct ipr_hostrcb *hostrcb[IPR_NUM_HCAMS];
-	dma_addr_t hostrcb_dma[IPR_NUM_HCAMS];
+	struct ipr_hostrcb *hostrcb[IPR_MAX_HCAMS];
+	dma_addr_t hostrcb_dma[IPR_MAX_HCAMS];
 	struct list_head hostrcb_free_q;
 	struct list_head hostrcb_free_q;
 	struct list_head hostrcb_pending_q;
 	struct list_head hostrcb_pending_q;
+	struct list_head hostrcb_report_q;
 
 
 	struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
 	struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
 	u32 hrrq_num;
 	u32 hrrq_num;

+ 0 - 1
drivers/scsi/libfc/fc_exch.c

@@ -1837,7 +1837,6 @@ static void fc_exch_reset(struct fc_exch *ep)
 	int rc = 1;
 	int rc = 1;
 
 
 	spin_lock_bh(&ep->ex_lock);
 	spin_lock_bh(&ep->ex_lock);
-	fc_exch_abort_locked(ep, 0);
 	ep->state |= FC_EX_RST_CLEANUP;
 	ep->state |= FC_EX_RST_CLEANUP;
 	fc_exch_timer_cancel(ep);
 	fc_exch_timer_cancel(ep);
 	if (ep->esb_stat & ESB_ST_REC_QUAL)
 	if (ep->esb_stat & ESB_ST_REC_QUAL)

+ 23 - 3
drivers/scsi/libfc/fc_rport.c

@@ -457,6 +457,9 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
  */
  */
 static int fc_rport_logoff(struct fc_rport_priv *rdata)
 static int fc_rport_logoff(struct fc_rport_priv *rdata)
 {
 {
+	struct fc_lport *lport = rdata->local_port;
+	u32 port_id = rdata->ids.port_id;
+
 	mutex_lock(&rdata->rp_mutex);
 	mutex_lock(&rdata->rp_mutex);
 
 
 	FC_RPORT_DBG(rdata, "Remove port\n");
 	FC_RPORT_DBG(rdata, "Remove port\n");
@@ -466,6 +469,15 @@ static int fc_rport_logoff(struct fc_rport_priv *rdata)
 		FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
 		FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
 		goto out;
 		goto out;
 	}
 	}
+	/*
+	 * FC-LS states:
+	 * To explicitly Logout, the initiating Nx_Port shall terminate
+	 * other open Sequences that it initiated with the destination
+	 * Nx_Port prior to performing Logout.
+	 */
+	lport->tt.exch_mgr_reset(lport, 0, port_id);
+	lport->tt.exch_mgr_reset(lport, port_id, 0);
+
 	fc_rport_enter_logo(rdata);
 	fc_rport_enter_logo(rdata);
 
 
 	/*
 	/*
@@ -547,16 +559,24 @@ static void fc_rport_timeout(struct work_struct *work)
  */
  */
 static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
 static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
 {
 {
+	struct fc_lport *lport = rdata->local_port;
+
 	FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
 	FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
 		     IS_ERR(fp) ? -PTR_ERR(fp) : 0,
 		     IS_ERR(fp) ? -PTR_ERR(fp) : 0,
 		     fc_rport_state(rdata), rdata->retries);
 		     fc_rport_state(rdata), rdata->retries);
 
 
 	switch (rdata->rp_state) {
 	switch (rdata->rp_state) {
 	case RPORT_ST_FLOGI:
 	case RPORT_ST_FLOGI:
-	case RPORT_ST_PLOGI:
 		rdata->flags &= ~FC_RP_STARTED;
 		rdata->flags &= ~FC_RP_STARTED;
 		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
 		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
 		break;
 		break;
+	case RPORT_ST_PLOGI:
+		if (lport->point_to_multipoint) {
+			rdata->flags &= ~FC_RP_STARTED;
+			fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
+		} else
+			fc_rport_enter_logo(rdata);
+		break;
 	case RPORT_ST_RTV:
 	case RPORT_ST_RTV:
 		fc_rport_enter_ready(rdata);
 		fc_rport_enter_ready(rdata);
 		break;
 		break;
@@ -1877,7 +1897,7 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
 	spp->spp_type_ext = rspp->spp_type_ext;
 	spp->spp_type_ext = rspp->spp_type_ext;
 	spp->spp_flags = FC_SPP_RESP_ACK;
 	spp->spp_flags = FC_SPP_RESP_ACK;
 
 
-	fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+	fc_rport_enter_prli(rdata);
 
 
 	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
 	fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
 	lport->tt.frame_send(lport, fp);
 	lport->tt.frame_send(lport, fp);
@@ -1915,7 +1935,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
 		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
 		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
 			     fc_rport_state(rdata));
 			     fc_rport_state(rdata));
 
 
-		fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+		fc_rport_enter_delete(rdata, RPORT_EV_STOP);
 		mutex_unlock(&rdata->rp_mutex);
 		mutex_unlock(&rdata->rp_mutex);
 		kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
 		kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
 	} else
 	} else

+ 41 - 41
drivers/scsi/lpfc/lpfc_ct.c

@@ -1535,7 +1535,7 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
 }
 }
 
 
 /* Routines for all individual HBA attributes */
 /* Routines for all individual HBA attributes */
-int
+static int
 lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
 lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
 {
 {
 	struct lpfc_fdmi_attr_entry *ae;
 	struct lpfc_fdmi_attr_entry *ae;
@@ -1551,7 +1551,7 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
 	ad->AttrType = cpu_to_be16(RHBA_NODENAME);
 	ad->AttrType = cpu_to_be16(RHBA_NODENAME);
 	return size;
 	return size;
 }
 }
-int
+static int
 lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
 				struct lpfc_fdmi_attr_def *ad)
 				struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1573,7 +1573,7 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
 lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
 {
 {
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_hba *phba = vport->phba;
@@ -1594,7 +1594,7 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
 			 struct lpfc_fdmi_attr_def *ad)
 			 struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1615,7 +1615,7 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
 			       struct lpfc_fdmi_attr_def *ad)
 			       struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1637,7 +1637,7 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
 			   struct lpfc_fdmi_attr_def *ad)
 			   struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1669,7 +1669,7 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
 			    struct lpfc_fdmi_attr_def *ad)
 			    struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1690,7 +1690,7 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
 			   struct lpfc_fdmi_attr_def *ad)
 			   struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1715,7 +1715,7 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
 			   struct lpfc_fdmi_attr_def *ad)
 			   struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1736,7 +1736,7 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
 			  struct lpfc_fdmi_attr_def *ad)
 			  struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1759,7 +1759,7 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
 			  struct lpfc_fdmi_attr_def *ad)
 			  struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1775,7 +1775,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
 				 struct lpfc_fdmi_attr_def *ad)
 				 struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1794,7 +1794,7 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
 			       struct lpfc_fdmi_attr_def *ad)
 			       struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1811,7 +1811,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
 			     struct lpfc_fdmi_attr_def *ad)
 			     struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1828,7 +1828,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
 			       struct lpfc_fdmi_attr_def *ad)
 			       struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1846,7 +1846,7 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
 			    struct lpfc_fdmi_attr_def *ad)
 			    struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1867,7 +1867,7 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
 			      struct lpfc_fdmi_attr_def *ad)
 			      struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1884,7 +1884,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
 lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
 			     struct lpfc_fdmi_attr_def *ad)
 			     struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1906,7 +1906,7 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
 }
 }
 
 
 /* Routines for all individual PORT attributes */
 /* Routines for all individual PORT attributes */
-int
+static int
 lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
 			    struct lpfc_fdmi_attr_def *ad)
 			    struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1925,7 +1925,7 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
 				  struct lpfc_fdmi_attr_def *ad)
 				  struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -1975,7 +1975,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
 			  struct lpfc_fdmi_attr_def *ad)
 			  struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2039,7 +2039,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
 			      struct lpfc_fdmi_attr_def *ad)
 			      struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2059,7 +2059,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
 			       struct lpfc_fdmi_attr_def *ad)
 			       struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2081,7 +2081,7 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
 			      struct lpfc_fdmi_attr_def *ad)
 			      struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2102,7 +2102,7 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
 			 struct lpfc_fdmi_attr_def *ad)
 			 struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2120,7 +2120,7 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
 			 struct lpfc_fdmi_attr_def *ad)
 			 struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2138,7 +2138,7 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
 				  struct lpfc_fdmi_attr_def *ad)
 				  struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2156,7 +2156,7 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
 			      struct lpfc_fdmi_attr_def *ad)
 			      struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2175,7 +2175,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
 			  struct lpfc_fdmi_attr_def *ad)
 			  struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2190,7 +2190,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
 				struct lpfc_fdmi_attr_def *ad)
 				struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2208,7 +2208,7 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
 				   struct lpfc_fdmi_attr_def *ad)
 				   struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2227,7 +2227,7 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
 			       struct lpfc_fdmi_attr_def *ad)
 			       struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2243,7 +2243,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
 			     struct lpfc_fdmi_attr_def *ad)
 			     struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2259,7 +2259,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
 lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
 			    struct lpfc_fdmi_attr_def *ad)
 			    struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2274,7 +2274,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
 lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
 			     struct lpfc_fdmi_attr_def *ad)
 			     struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2295,7 +2295,7 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
 lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
 			  struct lpfc_fdmi_attr_def *ad)
 			  struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2316,7 +2316,7 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
 lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
 			     struct lpfc_fdmi_attr_def *ad)
 			     struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2337,7 +2337,7 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
 lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
 			   struct lpfc_fdmi_attr_def *ad)
 			   struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2358,7 +2358,7 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
 lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
 			       struct lpfc_fdmi_attr_def *ad)
 			       struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2378,7 +2378,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
 lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
 			 struct lpfc_fdmi_attr_def *ad)
 			 struct lpfc_fdmi_attr_def *ad)
 {
 {
@@ -2393,7 +2393,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
 	return size;
 	return size;
 }
 }
 
 
-int
+static int
 lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
 lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
 			      struct lpfc_fdmi_attr_def *ad)
 			      struct lpfc_fdmi_attr_def *ad)
 {
 {

+ 17 - 17
drivers/scsi/lpfc/lpfc_els.c

@@ -4617,7 +4617,7 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
 	return sentplogi;
 	return sentplogi;
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
 lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
 		uint32_t word0)
 		uint32_t word0)
 {
 {
@@ -4629,7 +4629,7 @@ lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
 	return sizeof(struct fc_rdp_link_service_desc);
 	return sizeof(struct fc_rdp_link_service_desc);
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
 lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
 		uint8_t *page_a0, uint8_t *page_a2)
 		uint8_t *page_a0, uint8_t *page_a2)
 {
 {
@@ -4694,7 +4694,7 @@ lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
 	return sizeof(struct fc_rdp_sfp_desc);
 	return sizeof(struct fc_rdp_sfp_desc);
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
 lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
 		READ_LNK_VAR *stat)
 		READ_LNK_VAR *stat)
 {
 {
@@ -4723,7 +4723,7 @@ lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
 	return sizeof(struct fc_rdp_link_error_status_desc);
 	return sizeof(struct fc_rdp_link_error_status_desc);
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
 lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
 		      struct lpfc_vport *vport)
 		      struct lpfc_vport *vport)
 {
 {
@@ -4748,7 +4748,7 @@ lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
 	return sizeof(struct fc_rdp_bbc_desc);
 	return sizeof(struct fc_rdp_bbc_desc);
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
 lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
 			   struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
 			   struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
 {
 {
@@ -4776,7 +4776,7 @@ lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
 	return sizeof(struct fc_rdp_oed_sfp_desc);
 	return sizeof(struct fc_rdp_oed_sfp_desc);
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
 lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
 			      struct fc_rdp_oed_sfp_desc *desc,
 			      struct fc_rdp_oed_sfp_desc *desc,
 			      uint8_t *page_a2)
 			      uint8_t *page_a2)
@@ -4805,7 +4805,7 @@ lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
 	return sizeof(struct fc_rdp_oed_sfp_desc);
 	return sizeof(struct fc_rdp_oed_sfp_desc);
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
 lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
 			     struct fc_rdp_oed_sfp_desc *desc,
 			     struct fc_rdp_oed_sfp_desc *desc,
 			     uint8_t *page_a2)
 			     uint8_t *page_a2)
@@ -4834,7 +4834,7 @@ lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
 	return sizeof(struct fc_rdp_oed_sfp_desc);
 	return sizeof(struct fc_rdp_oed_sfp_desc);
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
 lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
 			      struct fc_rdp_oed_sfp_desc *desc,
 			      struct fc_rdp_oed_sfp_desc *desc,
 			      uint8_t *page_a2)
 			      uint8_t *page_a2)
@@ -4864,7 +4864,7 @@ lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
 }
 }
 
 
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
 lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
 			      struct fc_rdp_oed_sfp_desc *desc,
 			      struct fc_rdp_oed_sfp_desc *desc,
 			      uint8_t *page_a2)
 			      uint8_t *page_a2)
@@ -4893,7 +4893,7 @@ lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
 	return sizeof(struct fc_rdp_oed_sfp_desc);
 	return sizeof(struct fc_rdp_oed_sfp_desc);
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
 lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
 		      uint8_t *page_a0, struct lpfc_vport *vport)
 		      uint8_t *page_a0, struct lpfc_vport *vport)
 {
 {
@@ -4907,7 +4907,7 @@ lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
 	return sizeof(struct fc_rdp_opd_sfp_desc);
 	return sizeof(struct fc_rdp_opd_sfp_desc);
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
 lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
 {
 {
 	if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
 	if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
@@ -4924,7 +4924,7 @@ lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
 	return sizeof(struct fc_fec_rdp_desc);
 	return sizeof(struct fc_fec_rdp_desc);
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
 lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
 {
 {
 	uint16_t rdp_cap = 0;
 	uint16_t rdp_cap = 0;
@@ -4986,7 +4986,7 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
 	return sizeof(struct fc_rdp_port_speed_desc);
 	return sizeof(struct fc_rdp_port_speed_desc);
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
 lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
 		struct lpfc_hba *phba)
 		struct lpfc_hba *phba)
 {
 {
@@ -5003,7 +5003,7 @@ lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
 	return sizeof(struct fc_rdp_port_name_desc);
 	return sizeof(struct fc_rdp_port_name_desc);
 }
 }
 
 
-uint32_t
+static uint32_t
 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
 lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
 		struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 		struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
 {
@@ -5027,7 +5027,7 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
 	return sizeof(struct fc_rdp_port_name_desc);
 	return sizeof(struct fc_rdp_port_name_desc);
 }
 }
 
 
-void
+static void
 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
 lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
 		int status)
 		int status)
 {
 {
@@ -5165,7 +5165,7 @@ free_rdp_context:
 	kfree(rdp_context);
 	kfree(rdp_context);
 }
 }
 
 
-int
+static int
 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
 lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
 {
 {
 	LPFC_MBOXQ_t *mbox = NULL;
 	LPFC_MBOXQ_t *mbox = NULL;
@@ -7995,7 +7995,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 	}
 	}
 }
 }
 
 
-void
+static void
 lpfc_start_fdmi(struct lpfc_vport *vport)
 lpfc_start_fdmi(struct lpfc_vport *vport)
 {
 {
 	struct lpfc_hba *phba = vport->phba;
 	struct lpfc_hba *phba = vport->phba;

+ 2 - 2
drivers/scsi/lpfc/lpfc_mbox.c

@@ -2260,7 +2260,7 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
 	return 0;
 	return 0;
 }
 }
 
 
-void
+static void
 lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 {
 {
 	MAILBOX_t *mb;
 	MAILBOX_t *mb;
@@ -2281,7 +2281,7 @@ mbx_failed:
 	rdp_context->cmpl(phba, rdp_context, rc);
 	rdp_context->cmpl(phba, rdp_context, rc);
 }
 }
 
 
-void
+static void
 lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
 lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
 {
 {
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1;
 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1;

+ 2 - 2
drivers/scsi/lpfc/lpfc_sli.c

@@ -5689,7 +5689,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
 	return rc;
 	return rc;
 }
 }
 
 
-void
+static void
 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
 		  uint32_t feature)
 		  uint32_t feature)
 {
 {
@@ -8968,7 +8968,7 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
  * Since ABORTS must go on the same WQ of the command they are
  * Since ABORTS must go on the same WQ of the command they are
  * aborting, we use command's fcp_wqidx.
  * aborting, we use command's fcp_wqidx.
  */
  */
-int
+static int
 lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
 lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
 		    struct lpfc_iocbq *piocb)
 		    struct lpfc_iocbq *piocb)
 {
 {

+ 5 - 23
drivers/scsi/megaraid/megaraid_sas_base.c

@@ -189,25 +189,12 @@ u32
 megasas_build_and_issue_cmd(struct megasas_instance *instance,
 megasas_build_and_issue_cmd(struct megasas_instance *instance,
 			    struct scsi_cmnd *scmd);
 			    struct scsi_cmnd *scmd);
 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
-void
-megasas_release_fusion(struct megasas_instance *instance);
-int
-megasas_ioc_init_fusion(struct megasas_instance *instance);
-void
-megasas_free_cmds_fusion(struct megasas_instance *instance);
-u8
-megasas_get_map_info(struct megasas_instance *instance);
-int
-megasas_sync_map_info(struct megasas_instance *instance);
 int
 int
 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
 	int seconds);
 	int seconds);
-void megasas_reset_reply_desc(struct megasas_instance *instance);
 void megasas_fusion_ocr_wq(struct work_struct *work);
 void megasas_fusion_ocr_wq(struct work_struct *work);
 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
 					 int initial);
 					 int initial);
-int megasas_check_mpio_paths(struct megasas_instance *instance,
-			     struct scsi_cmnd *scmd);
 
 
 int
 int
 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -5036,7 +5023,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
 
 
 	/* Find first memory bar */
 	/* Find first memory bar */
 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
 	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
-	instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
+	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
 	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
 					 "megasas: LSI")) {
 					 "megasas: LSI")) {
 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
 		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
@@ -5782,7 +5769,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
 					     &instance->consumer_h);
 					     &instance->consumer_h);
 
 
 		if (!instance->producer || !instance->consumer) {
 		if (!instance->producer || !instance->consumer) {
-			dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate"
+			dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
 			       "memory for producer, consumer\n");
 			       "memory for producer, consumer\n");
 			goto fail_alloc_dma_buf;
 			goto fail_alloc_dma_buf;
 		}
 		}
@@ -6711,14 +6698,9 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
 	unsigned long flags;
 	unsigned long flags;
 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
 	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
 
 
-	ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
-	if (!ioc)
-		return -ENOMEM;
-
-	if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
-		error = -EFAULT;
-		goto out_kfree_ioc;
-	}
+	ioc = memdup_user(user_ioc, sizeof(*ioc));
+	if (IS_ERR(ioc))
+		return PTR_ERR(ioc);
 
 
 	instance = megasas_lookup_instance(ioc->host_no);
 	instance = megasas_lookup_instance(ioc->host_no);
 	if (!instance) {
 	if (!instance) {

+ 9 - 0
drivers/scsi/megaraid/megaraid_sas_fusion.h

@@ -991,5 +991,14 @@ union desc_value {
 	} u;
 	} u;
 };
 };
 
 
+void megasas_free_cmds_fusion(struct megasas_instance *instance);
+int megasas_ioc_init_fusion(struct megasas_instance *instance);
+u8 megasas_get_map_info(struct megasas_instance *instance);
+int megasas_sync_map_info(struct megasas_instance *instance);
+void megasas_release_fusion(struct megasas_instance *instance);
+void megasas_reset_reply_desc(struct megasas_instance *instance);
+int megasas_check_mpio_paths(struct megasas_instance *instance,
+			      struct scsi_cmnd *scmd);
+void megasas_fusion_ocr_wq(struct work_struct *work);
 
 
 #endif /* _MEGARAID_SAS_FUSION_H_ */
 #endif /* _MEGARAID_SAS_FUSION_H_ */

+ 107 - 152
drivers/scsi/mpt3sas/mpt3sas_base.c

@@ -98,7 +98,7 @@ MODULE_PARM_DESC(mpt3sas_fwfault_debug,
 	" enable detection of firmware fault and halt firmware - (default=0)");
 	" enable detection of firmware fault and halt firmware - (default=0)");
 
 
 static int
 static int
-_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
 
 
 /**
 /**
  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
@@ -218,8 +218,7 @@ _base_fault_reset_work(struct work_struct *work)
 	ioc->non_operational_loop = 0;
 	ioc->non_operational_loop = 0;
 
 
 	if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
 	if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
-		rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
+		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 		pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
 		pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
 		    __func__, (rc == 0) ? "success" : "failed");
 		    __func__, (rc == 0) ? "success" : "failed");
 		doorbell = mpt3sas_base_get_iocstate(ioc, 0);
 		doorbell = mpt3sas_base_get_iocstate(ioc, 0);
@@ -2040,7 +2039,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
  * mpt3sas_base_unmap_resources - free controller resources
  * mpt3sas_base_unmap_resources - free controller resources
  * @ioc: per adapter object
  * @ioc: per adapter object
  */
  */
-void
+static void
 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
 {
 {
 	struct pci_dev *pdev = ioc->pdev;
 	struct pci_dev *pdev = ioc->pdev;
@@ -2145,7 +2144,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
 
 
 	_base_mask_interrupts(ioc);
 	_base_mask_interrupts(ioc);
 
 
-	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+	r = _base_get_ioc_facts(ioc);
 	if (r)
 	if (r)
 		goto out_fail;
 		goto out_fail;
 
 
@@ -3183,12 +3182,11 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 /**
 /**
  * _base_allocate_memory_pools - allocate start of day memory pools
  * _base_allocate_memory_pools - allocate start of day memory pools
  * @ioc: per adapter object
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 success, anything else error
  * Returns 0 success, anything else error
  */
  */
 static int
 static int
-_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc,  int sleep_flag)
+_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 {
 {
 	struct mpt3sas_facts *facts;
 	struct mpt3sas_facts *facts;
 	u16 max_sge_elements;
 	u16 max_sge_elements;
@@ -3658,29 +3656,25 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
  * _base_wait_on_iocstate - waiting on a particular ioc state
  * _base_wait_on_iocstate - waiting on a particular ioc state
  * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
  * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
  * @timeout: timeout in second
  * @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
-	int sleep_flag)
+_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
 {
 {
 	u32 count, cntdn;
 	u32 count, cntdn;
 	u32 current_state;
 	u32 current_state;
 
 
 	count = 0;
 	count = 0;
-	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+	cntdn = 1000 * timeout;
 	do {
 	do {
 		current_state = mpt3sas_base_get_iocstate(ioc, 1);
 		current_state = mpt3sas_base_get_iocstate(ioc, 1);
 		if (current_state == ioc_state)
 		if (current_state == ioc_state)
 			return 0;
 			return 0;
 		if (count && current_state == MPI2_IOC_STATE_FAULT)
 		if (count && current_state == MPI2_IOC_STATE_FAULT)
 			break;
 			break;
-		if (sleep_flag == CAN_SLEEP)
-			usleep_range(1000, 1500);
-		else
-			udelay(500);
+
+		usleep_range(1000, 1500);
 		count++;
 		count++;
 	} while (--cntdn);
 	} while (--cntdn);
 
 
@@ -3692,24 +3686,22 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
  * a write to the doorbell)
  * a write to the doorbell)
  * @ioc: per adapter object
  * @ioc: per adapter object
  * @timeout: timeout in second
  * @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  *
  *
  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
  * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
  */
  */
 static int
 static int
-_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
 
 
 static int
 static int
-_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
-	int sleep_flag)
+_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
 {
 {
 	u32 cntdn, count;
 	u32 cntdn, count;
 	u32 int_status;
 	u32 int_status;
 
 
 	count = 0;
 	count = 0;
-	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+	cntdn = 1000 * timeout;
 	do {
 	do {
 		int_status = readl(&ioc->chip->HostInterruptStatus);
 		int_status = readl(&ioc->chip->HostInterruptStatus);
 		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
 		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
@@ -3718,10 +3710,35 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
 				ioc->name, __func__, count, timeout));
 				ioc->name, __func__, count, timeout));
 			return 0;
 			return 0;
 		}
 		}
-		if (sleep_flag == CAN_SLEEP)
-			usleep_range(1000, 1500);
-		else
-			udelay(500);
+
+		usleep_range(1000, 1500);
+		count++;
+	} while (--cntdn);
+
+	pr_err(MPT3SAS_FMT
+		"%s: failed due to timeout count(%d), int_status(%x)!\n",
+		ioc->name, __func__, count, int_status);
+	return -EFAULT;
+}
+
+static int
+_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
+{
+	u32 cntdn, count;
+	u32 int_status;
+
+	count = 0;
+	cntdn = 2000 * timeout;
+	do {
+		int_status = readl(&ioc->chip->HostInterruptStatus);
+		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+			dhsprintk(ioc, pr_info(MPT3SAS_FMT
+				"%s: successful count(%d), timeout(%d)\n",
+				ioc->name, __func__, count, timeout));
+			return 0;
+		}
+
+		udelay(500);
 		count++;
 		count++;
 	} while (--cntdn);
 	} while (--cntdn);
 
 
@@ -3729,13 +3746,13 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
 		"%s: failed due to timeout count(%d), int_status(%x)!\n",
 		"%s: failed due to timeout count(%d), int_status(%x)!\n",
 		ioc->name, __func__, count, int_status);
 		ioc->name, __func__, count, int_status);
 	return -EFAULT;
 	return -EFAULT;
+
 }
 }
 
 
 /**
 /**
  * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
  * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
  * @ioc: per adapter object
  * @ioc: per adapter object
  * @timeout: timeout in second
  * @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  *
  *
@@ -3743,15 +3760,14 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
  * doorbell.
  * doorbell.
  */
  */
 static int
 static int
-_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
-	int sleep_flag)
+_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
 {
 {
 	u32 cntdn, count;
 	u32 cntdn, count;
 	u32 int_status;
 	u32 int_status;
 	u32 doorbell;
 	u32 doorbell;
 
 
 	count = 0;
 	count = 0;
-	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+	cntdn = 1000 * timeout;
 	do {
 	do {
 		int_status = readl(&ioc->chip->HostInterruptStatus);
 		int_status = readl(&ioc->chip->HostInterruptStatus);
 		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
 		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
@@ -3769,10 +3785,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
 		} else if (int_status == 0xFFFFFFFF)
 		} else if (int_status == 0xFFFFFFFF)
 			goto out;
 			goto out;
 
 
-		if (sleep_flag == CAN_SLEEP)
-			usleep_range(1000, 1500);
-		else
-			udelay(500);
+		usleep_range(1000, 1500);
 		count++;
 		count++;
 	} while (--cntdn);
 	} while (--cntdn);
 
 
@@ -3787,20 +3800,18 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
  * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
  * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
  * @ioc: per adapter object
  * @ioc: per adapter object
  * @timeout: timeout in second
  * @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  *
  *
  */
  */
 static int
 static int
-_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
-	int sleep_flag)
+_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
 {
 {
 	u32 cntdn, count;
 	u32 cntdn, count;
 	u32 doorbell_reg;
 	u32 doorbell_reg;
 
 
 	count = 0;
 	count = 0;
-	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+	cntdn = 1000 * timeout;
 	do {
 	do {
 		doorbell_reg = readl(&ioc->chip->Doorbell);
 		doorbell_reg = readl(&ioc->chip->Doorbell);
 		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
 		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
@@ -3809,10 +3820,8 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
 				ioc->name, __func__, count, timeout));
 				ioc->name, __func__, count, timeout));
 			return 0;
 			return 0;
 		}
 		}
-		if (sleep_flag == CAN_SLEEP)
-			usleep_range(1000, 1500);
-		else
-			udelay(500);
+
+		usleep_range(1000, 1500);
 		count++;
 		count++;
 	} while (--cntdn);
 	} while (--cntdn);
 
 
@@ -3827,13 +3836,11 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
  * @ioc: per adapter object
  * @ioc: per adapter object
  * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
  * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
  * @timeout: timeout in second
  * @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
-	int sleep_flag)
+_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
 {
 {
 	u32 ioc_state;
 	u32 ioc_state;
 	int r = 0;
 	int r = 0;
@@ -3852,12 +3859,11 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
 
 
 	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
 	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
 	    &ioc->chip->Doorbell);
 	    &ioc->chip->Doorbell);
-	if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
+	if ((_base_wait_for_doorbell_ack(ioc, 15))) {
 		r = -EFAULT;
 		r = -EFAULT;
 		goto out;
 		goto out;
 	}
 	}
-	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
-	    timeout, sleep_flag);
+	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
 	if (ioc_state) {
 	if (ioc_state) {
 		pr_err(MPT3SAS_FMT
 		pr_err(MPT3SAS_FMT
 			"%s: failed going to ready state (ioc_state=0x%x)\n",
 			"%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -3879,18 +3885,16 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
  * @reply_bytes: reply length
  * @reply_bytes: reply length
  * @reply: pointer to reply payload
  * @reply: pointer to reply payload
  * @timeout: timeout in second
  * @timeout: timeout in second
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
-	u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
+	u32 *request, int reply_bytes, u16 *reply, int timeout)
 {
 {
 	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
 	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
 	int i;
 	int i;
 	u8 failed;
 	u8 failed;
-	u16 dummy;
 	__le32 *mfp;
 	__le32 *mfp;
 
 
 	/* make sure doorbell is not in use */
 	/* make sure doorbell is not in use */
@@ -3911,7 +3915,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
 	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
 	    &ioc->chip->Doorbell);
 	    &ioc->chip->Doorbell);
 
 
-	if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
+	if ((_base_spin_on_doorbell_int(ioc, 5))) {
 		pr_err(MPT3SAS_FMT
 		pr_err(MPT3SAS_FMT
 			"doorbell handshake int failed (line=%d)\n",
 			"doorbell handshake int failed (line=%d)\n",
 			ioc->name, __LINE__);
 			ioc->name, __LINE__);
@@ -3919,7 +3923,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 	}
 	}
 	writel(0, &ioc->chip->HostInterruptStatus);
 	writel(0, &ioc->chip->HostInterruptStatus);
 
 
-	if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
+	if ((_base_wait_for_doorbell_ack(ioc, 5))) {
 		pr_err(MPT3SAS_FMT
 		pr_err(MPT3SAS_FMT
 			"doorbell handshake ack failed (line=%d)\n",
 			"doorbell handshake ack failed (line=%d)\n",
 			ioc->name, __LINE__);
 			ioc->name, __LINE__);
@@ -3929,7 +3933,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 	/* send message 32-bits at a time */
 	/* send message 32-bits at a time */
 	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
 	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
 		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
 		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
-		if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
+		if ((_base_wait_for_doorbell_ack(ioc, 5)))
 			failed = 1;
 			failed = 1;
 	}
 	}
 
 
@@ -3941,7 +3945,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 	}
 	}
 
 
 	/* now wait for the reply */
 	/* now wait for the reply */
-	if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
+	if ((_base_wait_for_doorbell_int(ioc, timeout))) {
 		pr_err(MPT3SAS_FMT
 		pr_err(MPT3SAS_FMT
 			"doorbell handshake int failed (line=%d)\n",
 			"doorbell handshake int failed (line=%d)\n",
 			ioc->name, __LINE__);
 			ioc->name, __LINE__);
@@ -3952,7 +3956,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
 	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
 	    & MPI2_DOORBELL_DATA_MASK);
 	    & MPI2_DOORBELL_DATA_MASK);
 	writel(0, &ioc->chip->HostInterruptStatus);
 	writel(0, &ioc->chip->HostInterruptStatus);
-	if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+	if ((_base_wait_for_doorbell_int(ioc, 5))) {
 		pr_err(MPT3SAS_FMT
 		pr_err(MPT3SAS_FMT
 			"doorbell handshake int failed (line=%d)\n",
 			"doorbell handshake int failed (line=%d)\n",
 			ioc->name, __LINE__);
 			ioc->name, __LINE__);
@@ -3963,22 +3967,22 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 	writel(0, &ioc->chip->HostInterruptStatus);
 	writel(0, &ioc->chip->HostInterruptStatus);
 
 
 	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
 	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
-		if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+		if ((_base_wait_for_doorbell_int(ioc, 5))) {
 			pr_err(MPT3SAS_FMT
 			pr_err(MPT3SAS_FMT
 				"doorbell handshake int failed (line=%d)\n",
 				"doorbell handshake int failed (line=%d)\n",
 				ioc->name, __LINE__);
 				ioc->name, __LINE__);
 			return -EFAULT;
 			return -EFAULT;
 		}
 		}
 		if (i >=  reply_bytes/2) /* overflow case */
 		if (i >=  reply_bytes/2) /* overflow case */
-			dummy = readl(&ioc->chip->Doorbell);
+			readl(&ioc->chip->Doorbell);
 		else
 		else
 			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
 			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
 			    & MPI2_DOORBELL_DATA_MASK);
 			    & MPI2_DOORBELL_DATA_MASK);
 		writel(0, &ioc->chip->HostInterruptStatus);
 		writel(0, &ioc->chip->HostInterruptStatus);
 	}
 	}
 
 
-	_base_wait_for_doorbell_int(ioc, 5, sleep_flag);
-	if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
+	_base_wait_for_doorbell_int(ioc, 5);
+	if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
 		dhsprintk(ioc, pr_info(MPT3SAS_FMT
 		dhsprintk(ioc, pr_info(MPT3SAS_FMT
 			"doorbell is in use (line=%d)\n", ioc->name, __LINE__));
 			"doorbell is in use (line=%d)\n", ioc->name, __LINE__));
 	}
 	}
@@ -4015,7 +4019,6 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
 {
 {
 	u16 smid;
 	u16 smid;
 	u32 ioc_state;
 	u32 ioc_state;
-	unsigned long timeleft;
 	bool issue_reset = false;
 	bool issue_reset = false;
 	int rc;
 	int rc;
 	void *request;
 	void *request;
@@ -4068,7 +4071,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
 		ioc->ioc_link_reset_in_progress = 1;
 		ioc->ioc_link_reset_in_progress = 1;
 	init_completion(&ioc->base_cmds.done);
 	init_completion(&ioc->base_cmds.done);
 	mpt3sas_base_put_smid_default(ioc, smid);
 	mpt3sas_base_put_smid_default(ioc, smid);
-	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+	wait_for_completion_timeout(&ioc->base_cmds.done,
 	    msecs_to_jiffies(10000));
 	    msecs_to_jiffies(10000));
 	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
 	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
 	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
@@ -4093,8 +4096,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
 
 
  issue_host_reset:
  issue_host_reset:
 	if (issue_reset)
 	if (issue_reset)
-		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
+		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
 	rc = -EFAULT;
 	rc = -EFAULT;
  out:
  out:
@@ -4119,7 +4121,6 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
 {
 {
 	u16 smid;
 	u16 smid;
 	u32 ioc_state;
 	u32 ioc_state;
-	unsigned long timeleft;
 	bool issue_reset = false;
 	bool issue_reset = false;
 	int rc;
 	int rc;
 	void *request;
 	void *request;
@@ -4170,7 +4171,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
 	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
 	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
 	init_completion(&ioc->base_cmds.done);
 	init_completion(&ioc->base_cmds.done);
 	mpt3sas_base_put_smid_default(ioc, smid);
 	mpt3sas_base_put_smid_default(ioc, smid);
-	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+	wait_for_completion_timeout(&ioc->base_cmds.done,
 	    msecs_to_jiffies(10000));
 	    msecs_to_jiffies(10000));
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -4191,8 +4192,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
 
 
  issue_host_reset:
  issue_host_reset:
 	if (issue_reset)
 	if (issue_reset)
-		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
+		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
 	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
 	rc = -EFAULT;
 	rc = -EFAULT;
  out:
  out:
@@ -4203,12 +4203,11 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
 /**
 /**
  * _base_get_port_facts - obtain port facts reply and save in ioc
  * _base_get_port_facts - obtain port facts reply and save in ioc
  * @ioc: per adapter object
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
+_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
 {
 {
 	Mpi2PortFactsRequest_t mpi_request;
 	Mpi2PortFactsRequest_t mpi_request;
 	Mpi2PortFactsReply_t mpi_reply;
 	Mpi2PortFactsReply_t mpi_reply;
@@ -4224,7 +4223,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
 	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
 	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
 	mpi_request.PortNumber = port;
 	mpi_request.PortNumber = port;
 	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
 	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
-	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
 
 
 	if (r != 0) {
 	if (r != 0) {
 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -4247,13 +4246,11 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
  * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
  * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
  * @ioc: per adapter object
  * @ioc: per adapter object
  * @timeout:
  * @timeout:
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
-	int sleep_flag)
+_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
 {
 {
 	u32 ioc_state;
 	u32 ioc_state;
 	int rc;
 	int rc;
@@ -4287,8 +4284,7 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
 		goto issue_diag_reset;
 		goto issue_diag_reset;
 	}
 	}
 
 
-	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
-	    timeout, sleep_flag);
+	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
 	if (ioc_state) {
 	if (ioc_state) {
 		dfailprintk(ioc, printk(MPT3SAS_FMT
 		dfailprintk(ioc, printk(MPT3SAS_FMT
 		    "%s: failed going to ready state (ioc_state=0x%x)\n",
 		    "%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -4297,19 +4293,18 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
 	}
 	}
 
 
  issue_diag_reset:
  issue_diag_reset:
-	rc = _base_diag_reset(ioc, sleep_flag);
+	rc = _base_diag_reset(ioc);
 	return rc;
 	return rc;
 }
 }
 
 
 /**
 /**
  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
  * @ioc: per adapter object
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
 {
 {
 	Mpi2IOCFactsRequest_t mpi_request;
 	Mpi2IOCFactsRequest_t mpi_request;
 	Mpi2IOCFactsReply_t mpi_reply;
 	Mpi2IOCFactsReply_t mpi_reply;
@@ -4319,7 +4314,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
 	    __func__));
 	    __func__));
 
 
-	r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
+	r = _base_wait_for_iocstate(ioc, 10);
 	if (r) {
 	if (r) {
 		dfailprintk(ioc, printk(MPT3SAS_FMT
 		dfailprintk(ioc, printk(MPT3SAS_FMT
 		    "%s: failed getting to correct state\n",
 		    "%s: failed getting to correct state\n",
@@ -4331,7 +4326,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 	memset(&mpi_request, 0, mpi_request_sz);
 	memset(&mpi_request, 0, mpi_request_sz);
 	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
 	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
 	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
 	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
-	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
 
 
 	if (r != 0) {
 	if (r != 0) {
 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -4391,12 +4386,11 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 /**
 /**
  * _base_send_ioc_init - send ioc_init to firmware
  * _base_send_ioc_init - send ioc_init to firmware
  * @ioc: per adapter object
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
 {
 {
 	Mpi2IOCInitRequest_t mpi_request;
 	Mpi2IOCInitRequest_t mpi_request;
 	Mpi2IOCInitReply_t mpi_reply;
 	Mpi2IOCInitReply_t mpi_reply;
@@ -4479,8 +4473,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 
 
 	r = _base_handshake_req_reply_wait(ioc,
 	r = _base_handshake_req_reply_wait(ioc,
 	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
 	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
-	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
-	    sleep_flag);
+	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
 
 
 	if (r != 0) {
 	if (r != 0) {
 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -4555,16 +4548,14 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
 /**
 /**
  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
  * _base_send_port_enable - send port_enable(discovery stuff) to firmware
  * @ioc: per adapter object
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
 {
 {
 	Mpi2PortEnableRequest_t *mpi_request;
 	Mpi2PortEnableRequest_t *mpi_request;
 	Mpi2PortEnableReply_t *mpi_reply;
 	Mpi2PortEnableReply_t *mpi_reply;
-	unsigned long timeleft;
 	int r = 0;
 	int r = 0;
 	u16 smid;
 	u16 smid;
 	u16 ioc_status;
 	u16 ioc_status;
@@ -4592,8 +4583,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 
 
 	init_completion(&ioc->port_enable_cmds.done);
 	init_completion(&ioc->port_enable_cmds.done);
 	mpt3sas_base_put_smid_default(ioc, smid);
 	mpt3sas_base_put_smid_default(ioc, smid);
-	timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
-	    300*HZ);
+	wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
 	if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
 	if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
 		    ioc->name, __func__);
 		    ioc->name, __func__);
@@ -4737,15 +4727,13 @@ _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
 /**
 /**
  * _base_event_notification - send event notification
  * _base_event_notification - send event notification
  * @ioc: per adapter object
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
 {
 {
 	Mpi2EventNotificationRequest_t *mpi_request;
 	Mpi2EventNotificationRequest_t *mpi_request;
-	unsigned long timeleft;
 	u16 smid;
 	u16 smid;
 	int r = 0;
 	int r = 0;
 	int i;
 	int i;
@@ -4777,7 +4765,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 		    cpu_to_le32(ioc->event_masks[i]);
 		    cpu_to_le32(ioc->event_masks[i]);
 	init_completion(&ioc->base_cmds.done);
 	init_completion(&ioc->base_cmds.done);
 	mpt3sas_base_put_smid_default(ioc, smid);
 	mpt3sas_base_put_smid_default(ioc, smid);
-	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
+	wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
 	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
 		    ioc->name, __func__);
 		    ioc->name, __func__);
@@ -4827,19 +4815,18 @@ mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
 		return;
 		return;
 
 
 	mutex_lock(&ioc->base_cmds.mutex);
 	mutex_lock(&ioc->base_cmds.mutex);
-	_base_event_notification(ioc, CAN_SLEEP);
+	_base_event_notification(ioc);
 	mutex_unlock(&ioc->base_cmds.mutex);
 	mutex_unlock(&ioc->base_cmds.mutex);
 }
 }
 
 
 /**
 /**
  * _base_diag_reset - the "big hammer" start of day reset
  * _base_diag_reset - the "big hammer" start of day reset
  * @ioc: per adapter object
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
 {
 {
 	u32 host_diagnostic;
 	u32 host_diagnostic;
 	u32 ioc_state;
 	u32 ioc_state;
@@ -4867,10 +4854,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
 		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
 
 
 		/* wait 100 msec */
 		/* wait 100 msec */
-		if (sleep_flag == CAN_SLEEP)
-			msleep(100);
-		else
-			mdelay(100);
+		msleep(100);
 
 
 		if (count++ > 20)
 		if (count++ > 20)
 			goto out;
 			goto out;
@@ -4890,10 +4874,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 	     &ioc->chip->HostDiagnostic);
 	     &ioc->chip->HostDiagnostic);
 
 
 	/*This delay allows the chip PCIe hardware time to finish reset tasks*/
 	/*This delay allows the chip PCIe hardware time to finish reset tasks*/
-	if (sleep_flag == CAN_SLEEP)
-		msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
-	else
-		mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
+	msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
 
 
 	/* Approximately 300 second max wait */
 	/* Approximately 300 second max wait */
 	for (count = 0; count < (300000000 /
 	for (count = 0; count < (300000000 /
@@ -4906,13 +4887,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
 		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
 			break;
 			break;
 
 
-		/* Wait to pass the second read delay window */
-		if (sleep_flag == CAN_SLEEP)
-			msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
-								/ 1000);
-		else
-			mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
-								/ 1000);
+		msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
 	}
 	}
 
 
 	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
 	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
@@ -4941,8 +4916,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 
 
 	drsprintk(ioc, pr_info(MPT3SAS_FMT
 	drsprintk(ioc, pr_info(MPT3SAS_FMT
 		"Wait for FW to go to the READY state\n", ioc->name));
 		"Wait for FW to go to the READY state\n", ioc->name));
-	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
-	    sleep_flag);
+	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
 	if (ioc_state) {
 	if (ioc_state) {
 		pr_err(MPT3SAS_FMT
 		pr_err(MPT3SAS_FMT
 			"%s: failed going to ready state (ioc_state=0x%x)\n",
 			"%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -4961,14 +4935,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 /**
 /**
  * _base_make_ioc_ready - put controller in READY state
  * _base_make_ioc_ready - put controller in READY state
  * @ioc: per adapter object
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  * @type: FORCE_BIG_HAMMER or SOFT_RESET
  * @type: FORCE_BIG_HAMMER or SOFT_RESET
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
-	enum reset_type type)
+_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
 {
 {
 	u32 ioc_state;
 	u32 ioc_state;
 	int rc;
 	int rc;
@@ -4995,10 +4967,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
 				    ioc->name, __func__, ioc_state);
 				    ioc->name, __func__, ioc_state);
 				return -EFAULT;
 				return -EFAULT;
 			}
 			}
-			if (sleep_flag == CAN_SLEEP)
-				ssleep(1);
-			else
-				mdelay(1000);
+			ssleep(1);
 			ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
 			ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
 		}
 		}
 	}
 	}
@@ -5024,24 +4993,23 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
 
 
 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
 		if (!(_base_send_ioc_reset(ioc,
 		if (!(_base_send_ioc_reset(ioc,
-		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
+		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
 			return 0;
 			return 0;
 	}
 	}
 
 
  issue_diag_reset:
  issue_diag_reset:
-	rc = _base_diag_reset(ioc, CAN_SLEEP);
+	rc = _base_diag_reset(ioc);
 	return rc;
 	return rc;
 }
 }
 
 
 /**
 /**
  * _base_make_ioc_operational - put controller in OPERATIONAL state
  * _base_make_ioc_operational - put controller in OPERATIONAL state
  * @ioc: per adapter object
  * @ioc: per adapter object
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 static int
 static int
-_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
 {
 {
 	int r, i, index;
 	int r, i, index;
 	unsigned long	flags;
 	unsigned long	flags;
@@ -5160,7 +5128,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 	}
 	}
  skip_init_reply_post_free_queue:
  skip_init_reply_post_free_queue:
 
 
-	r = _base_send_ioc_init(ioc, sleep_flag);
+	r = _base_send_ioc_init(ioc);
 	if (r)
 	if (r)
 		return r;
 		return r;
 
 
@@ -5186,13 +5154,11 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
  skip_init_reply_post_host_index:
  skip_init_reply_post_host_index:
 
 
 	_base_unmask_interrupts(ioc);
 	_base_unmask_interrupts(ioc);
-	r = _base_event_notification(ioc, sleep_flag);
+	r = _base_event_notification(ioc);
 	if (r)
 	if (r)
 		return r;
 		return r;
 
 
-	if (sleep_flag == CAN_SLEEP)
-		_base_static_config_pages(ioc);
-
+	_base_static_config_pages(ioc);
 
 
 	if (ioc->is_driver_loading) {
 	if (ioc->is_driver_loading) {
 
 
@@ -5211,7 +5177,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 		return r; /* scan_start and scan_finished support */
 		return r; /* scan_start and scan_finished support */
 	}
 	}
 
 
-	r = _base_send_port_enable(ioc, sleep_flag);
+	r = _base_send_port_enable(ioc);
 	if (r)
 	if (r)
 		return r;
 		return r;
 
 
@@ -5235,7 +5201,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
 	if (ioc->chip_phys && ioc->chip) {
 	if (ioc->chip_phys && ioc->chip) {
 		_base_mask_interrupts(ioc);
 		_base_mask_interrupts(ioc);
 		ioc->shost_recovery = 1;
 		ioc->shost_recovery = 1;
-		_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+		_base_make_ioc_ready(ioc, SOFT_RESET);
 		ioc->shost_recovery = 0;
 		ioc->shost_recovery = 0;
 	}
 	}
 
 
@@ -5292,7 +5258,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 		goto out_free_resources;
 		goto out_free_resources;
 
 
 	pci_set_drvdata(ioc->pdev, ioc->shost);
 	pci_set_drvdata(ioc->pdev, ioc->shost);
-	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+	r = _base_get_ioc_facts(ioc);
 	if (r)
 	if (r)
 		goto out_free_resources;
 		goto out_free_resources;
 
 
@@ -5326,7 +5292,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 	ioc->build_sg_mpi = &_base_build_sg;
 	ioc->build_sg_mpi = &_base_build_sg;
 	ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
 	ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
 
 
-	r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+	r = _base_make_ioc_ready(ioc, SOFT_RESET);
 	if (r)
 	if (r)
 		goto out_free_resources;
 		goto out_free_resources;
 
 
@@ -5338,12 +5304,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 	}
 	}
 
 
 	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
 	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
-		r = _base_get_port_facts(ioc, i, CAN_SLEEP);
+		r = _base_get_port_facts(ioc, i);
 		if (r)
 		if (r)
 			goto out_free_resources;
 			goto out_free_resources;
 	}
 	}
 
 
-	r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
+	r = _base_allocate_memory_pools(ioc);
 	if (r)
 	if (r)
 		goto out_free_resources;
 		goto out_free_resources;
 
 
@@ -5429,7 +5395,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
 	if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
 	if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
 		_base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
 		_base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
 
 
-	r = _base_make_ioc_operational(ioc, CAN_SLEEP);
+	r = _base_make_ioc_operational(ioc);
 	if (r)
 	if (r)
 		goto out_free_resources;
 		goto out_free_resources;
 
 
@@ -5565,21 +5531,18 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
 /**
 /**
  * _wait_for_commands_to_complete - reset controller
  * _wait_for_commands_to_complete - reset controller
  * @ioc: Pointer to MPT_ADAPTER structure
  * @ioc: Pointer to MPT_ADAPTER structure
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  *
  *
  * This function waiting(3s) for all pending commands to complete
  * This function waiting(3s) for all pending commands to complete
  * prior to putting controller in reset.
  * prior to putting controller in reset.
  */
  */
 static void
 static void
-_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
 {
 {
 	u32 ioc_state;
 	u32 ioc_state;
 	unsigned long flags;
 	unsigned long flags;
 	u16 i;
 	u16 i;
 
 
 	ioc->pending_io_count = 0;
 	ioc->pending_io_count = 0;
-	if (sleep_flag != CAN_SLEEP)
-		return;
 
 
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
 	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
 	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
@@ -5602,13 +5565,12 @@ _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
 /**
 /**
  * mpt3sas_base_hard_reset_handler - reset controller
  * mpt3sas_base_hard_reset_handler - reset controller
  * @ioc: Pointer to MPT_ADAPTER structure
  * @ioc: Pointer to MPT_ADAPTER structure
- * @sleep_flag: CAN_SLEEP or NO_SLEEP
  * @type: FORCE_BIG_HAMMER or SOFT_RESET
  * @type: FORCE_BIG_HAMMER or SOFT_RESET
  *
  *
  * Returns 0 for success, non-zero for failure.
  * Returns 0 for success, non-zero for failure.
  */
  */
 int
 int
-mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
 	enum reset_type type)
 	enum reset_type type)
 {
 {
 	int r;
 	int r;
@@ -5629,13 +5591,6 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
 	if (mpt3sas_fwfault_debug)
 	if (mpt3sas_fwfault_debug)
 		mpt3sas_halt_firmware(ioc);
 		mpt3sas_halt_firmware(ioc);
 
 
-	/* TODO - What we really should be doing is pulling
-	 * out all the code associated with NO_SLEEP; its never used.
-	 * That is legacy code from mpt fusion driver, ported over.
-	 * I will leave this BUG_ON here for now till its been resolved.
-	 */
-	BUG_ON(sleep_flag == NO_SLEEP);
-
 	/* wait for an active reset in progress to complete */
 	/* wait for an active reset in progress to complete */
 	if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
 	if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
 		do {
 		do {
@@ -5660,9 +5615,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
 			is_fault = 1;
 			is_fault = 1;
 	}
 	}
 	_base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
 	_base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
-	_wait_for_commands_to_complete(ioc, sleep_flag);
+	_wait_for_commands_to_complete(ioc);
 	_base_mask_interrupts(ioc);
 	_base_mask_interrupts(ioc);
-	r = _base_make_ioc_ready(ioc, sleep_flag, type);
+	r = _base_make_ioc_ready(ioc, type);
 	if (r)
 	if (r)
 		goto out;
 		goto out;
 	_base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
 	_base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
@@ -5675,7 +5630,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
 		r = -EFAULT;
 		r = -EFAULT;
 		goto out;
 		goto out;
 	}
 	}
-	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+	r = _base_get_ioc_facts(ioc);
 	if (r)
 	if (r)
 		goto out;
 		goto out;
 
 
@@ -5684,7 +5639,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
 		      "Please reboot the system and ensure that the correct"
 		      "Please reboot the system and ensure that the correct"
 		      " firmware version is running\n", ioc->name);
 		      " firmware version is running\n", ioc->name);
 
 
-	r = _base_make_ioc_operational(ioc, sleep_flag);
+	r = _base_make_ioc_operational(ioc);
 	if (!r)
 	if (!r)
 		_base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
 		_base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
 
 

+ 7 - 17
drivers/scsi/mpt3sas/mpt3sas_base.h

@@ -119,10 +119,6 @@
 
 
 #define MPT_MAX_CALLBACKS		32
 #define MPT_MAX_CALLBACKS		32
 
 
-
-#define	 CAN_SLEEP			1
-#define  NO_SLEEP			0
-
 #define INTERNAL_CMDS_COUNT		10	/* reserved cmds */
 #define INTERNAL_CMDS_COUNT		10	/* reserved cmds */
 /* reserved for issuing internally framed scsi io cmds */
 /* reserved for issuing internally framed scsi io cmds */
 #define INTERNAL_SCSIIO_CMDS_COUNT	3
 #define INTERNAL_SCSIIO_CMDS_COUNT	3
@@ -478,7 +474,7 @@ struct _sas_device {
 	u8	pfa_led_on;
 	u8	pfa_led_on;
 	u8	pend_sas_rphy_add;
 	u8	pend_sas_rphy_add;
 	u8	enclosure_level;
 	u8	enclosure_level;
-	u8	connector_name[4];
+	u8	connector_name[5];
 	struct kref refcount;
 	struct kref refcount;
 };
 };
 
 
@@ -794,16 +790,6 @@ struct reply_post_struct {
 	dma_addr_t			reply_post_free_dma;
 	dma_addr_t			reply_post_free_dma;
 };
 };
 
 
-/**
- * enum mutex_type - task management mutex type
- * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
- * @TM_MUTEX_ON: mutex is required
- */
-enum mutex_type {
-	TM_MUTEX_OFF = 0,
-	TM_MUTEX_ON = 1,
-};
-
 typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
 typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
 /**
 /**
  * struct MPT3SAS_ADAPTER - per adapter struct
  * struct MPT3SAS_ADAPTER - per adapter struct
@@ -1229,7 +1215,7 @@ int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc);
 void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc);
 void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc);
 int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc);
 int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc);
 void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc);
 void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc);
-int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
 	enum reset_type type);
 	enum reset_type type);
 
 
 void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
 void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
@@ -1291,7 +1277,11 @@ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
 
 
 int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
 int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
 	uint channel, uint id, uint lun, u8 type, u16 smid_task,
 	uint channel, uint id, uint lun, u8 type, u16 smid_task,
-	ulong timeout, enum mutex_type m_type);
+	ulong timeout);
+int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+	uint channel, uint id, uint lun, u8 type, u16 smid_task,
+	ulong timeout);
+
 void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
 void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
 void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
 void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
 void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
 void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);

+ 2 - 5
drivers/scsi/mpt3sas/mpt3sas_config.c

@@ -285,7 +285,6 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
 {
 {
 	u16 smid;
 	u16 smid;
 	u32 ioc_state;
 	u32 ioc_state;
-	unsigned long timeleft;
 	Mpi2ConfigRequest_t *config_request;
 	Mpi2ConfigRequest_t *config_request;
 	int r;
 	int r;
 	u8 retry_count, issue_host_reset = 0;
 	u8 retry_count, issue_host_reset = 0;
@@ -386,8 +385,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
 	_config_display_some_debug(ioc, smid, "config_request", NULL);
 	_config_display_some_debug(ioc, smid, "config_request", NULL);
 	init_completion(&ioc->config_cmds.done);
 	init_completion(&ioc->config_cmds.done);
 	mpt3sas_base_put_smid_default(ioc, smid);
 	mpt3sas_base_put_smid_default(ioc, smid);
-	timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
-	    timeout*HZ);
+	wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
 	if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
 	if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
 		    ioc->name, __func__);
 		    ioc->name, __func__);
@@ -491,8 +489,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
 	mutex_unlock(&ioc->config_cmds.mutex);
 	mutex_unlock(&ioc->config_cmds.mutex);
 
 
 	if (issue_host_reset)
 	if (issue_host_reset)
-		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
+		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 	return r;
 	return r;
 }
 }
 
 

+ 19 - 30
drivers/scsi/mpt3sas/mpt3sas_ctl.c

@@ -518,7 +518,7 @@ mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
  *
  *
  * Called when application request fasyn callback handler.
  * Called when application request fasyn callback handler.
  */
  */
-int
+static int
 _ctl_fasync(int fd, struct file *filep, int mode)
 _ctl_fasync(int fd, struct file *filep, int mode)
 {
 {
 	return fasync_helper(fd, filep, mode, &async_queue);
 	return fasync_helper(fd, filep, mode, &async_queue);
@@ -530,7 +530,7 @@ _ctl_fasync(int fd, struct file *filep, int mode)
  * @wait -
  * @wait -
  *
  *
  */
  */
-unsigned int
+static unsigned int
 _ctl_poll(struct file *filep, poll_table *wait)
 _ctl_poll(struct file *filep, poll_table *wait)
 {
 {
 	struct MPT3SAS_ADAPTER *ioc;
 	struct MPT3SAS_ADAPTER *ioc;
@@ -641,9 +641,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 	MPI2RequestHeader_t *mpi_request = NULL, *request;
 	MPI2RequestHeader_t *mpi_request = NULL, *request;
 	MPI2DefaultReply_t *mpi_reply;
 	MPI2DefaultReply_t *mpi_reply;
 	u32 ioc_state;
 	u32 ioc_state;
-	u16 ioc_status;
 	u16 smid;
 	u16 smid;
-	unsigned long timeout, timeleft;
+	unsigned long timeout;
 	u8 issue_reset;
 	u8 issue_reset;
 	u32 sz;
 	u32 sz;
 	void *psge;
 	void *psge;
@@ -914,8 +913,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 		timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
 		timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
 	else
 	else
 		timeout = karg.timeout;
 		timeout = karg.timeout;
-	timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
-	    timeout*HZ);
+	wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
 	if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
 		Mpi2SCSITaskManagementRequest_t *tm_request =
 		Mpi2SCSITaskManagementRequest_t *tm_request =
 		    (Mpi2SCSITaskManagementRequest_t *)mpi_request;
 		    (Mpi2SCSITaskManagementRequest_t *)mpi_request;
@@ -938,7 +936,6 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 	}
 	}
 
 
 	mpi_reply = ioc->ctl_cmds.reply;
 	mpi_reply = ioc->ctl_cmds.reply;
-	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
 
 
 	if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
 	if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
 	    (ioc->logging_level & MPT_DEBUG_TM)) {
 	    (ioc->logging_level & MPT_DEBUG_TM)) {
@@ -1001,13 +998,11 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
 				ioc->name,
 				ioc->name,
 				le16_to_cpu(mpi_request->FunctionDependent1));
 				le16_to_cpu(mpi_request->FunctionDependent1));
 			mpt3sas_halt_firmware(ioc);
 			mpt3sas_halt_firmware(ioc);
-			mpt3sas_scsih_issue_tm(ioc,
+			mpt3sas_scsih_issue_locked_tm(ioc,
 			    le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
 			    le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
-			    0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30,
-			    TM_MUTEX_ON);
+			    0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
 		} else
 		} else
-			mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-			    FORCE_BIG_HAMMER);
+			mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 	}
 	}
 
 
  out:
  out:
@@ -1220,8 +1215,7 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
 	    __func__));
 	    __func__));
 
 
-	retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-	    FORCE_BIG_HAMMER);
+	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 	pr_info(MPT3SAS_FMT "host reset: %s\n",
 	pr_info(MPT3SAS_FMT "host reset: %s\n",
 	    ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
 	    ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
 	return 0;
 	return 0;
@@ -1381,7 +1375,6 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
 	Mpi2DiagBufferPostRequest_t *mpi_request;
 	Mpi2DiagBufferPostRequest_t *mpi_request;
 	Mpi2DiagBufferPostReply_t *mpi_reply;
 	Mpi2DiagBufferPostReply_t *mpi_reply;
 	u8 buffer_type;
 	u8 buffer_type;
-	unsigned long timeleft;
 	u16 smid;
 	u16 smid;
 	u16 ioc_status;
 	u16 ioc_status;
 	u32 ioc_state;
 	u32 ioc_state;
@@ -1499,7 +1492,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
 
 
 	init_completion(&ioc->ctl_cmds.done);
 	init_completion(&ioc->ctl_cmds.done);
 	mpt3sas_base_put_smid_default(ioc, smid);
 	mpt3sas_base_put_smid_default(ioc, smid);
-	timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+	wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
 
 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1538,8 +1531,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
 
 
  issue_host_reset:
  issue_host_reset:
 	if (issue_reset)
 	if (issue_reset)
-		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
+		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 
 
  out:
  out:
 
 
@@ -1800,7 +1792,6 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
 	u16 ioc_status;
 	u16 ioc_status;
 	u32 ioc_state;
 	u32 ioc_state;
 	int rc;
 	int rc;
-	unsigned long timeleft;
 
 
 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
 	dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
 	    __func__));
 	    __func__));
@@ -1848,7 +1839,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
 
 
 	init_completion(&ioc->ctl_cmds.done);
 	init_completion(&ioc->ctl_cmds.done);
 	mpt3sas_base_put_smid_default(ioc, smid);
 	mpt3sas_base_put_smid_default(ioc, smid);
-	timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+	wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
 
 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1974,8 +1965,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 	rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
 	rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
 
 
 	if (issue_reset)
 	if (issue_reset)
-		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
+		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 
 
 	return rc;
 	return rc;
 }
 }
@@ -1995,7 +1985,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 	Mpi2DiagBufferPostReply_t *mpi_reply;
 	Mpi2DiagBufferPostReply_t *mpi_reply;
 	int rc, i;
 	int rc, i;
 	u8 buffer_type;
 	u8 buffer_type;
-	unsigned long timeleft, request_size, copy_size;
+	unsigned long request_size, copy_size;
 	u16 smid;
 	u16 smid;
 	u16 ioc_status;
 	u16 ioc_status;
 	u8 issue_reset = 0;
 	u8 issue_reset = 0;
@@ -2116,7 +2106,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 
 
 	init_completion(&ioc->ctl_cmds.done);
 	init_completion(&ioc->ctl_cmds.done);
 	mpt3sas_base_put_smid_default(ioc, smid);
 	mpt3sas_base_put_smid_default(ioc, smid);
-	timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+	wait_for_completion_timeout(&ioc->ctl_cmds.done,
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 	    MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
 
 
 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
 	if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2155,8 +2145,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
 
 
  issue_host_reset:
  issue_host_reset:
 	if (issue_reset)
 	if (issue_reset)
-		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
+		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 
 
  out:
  out:
 
 
@@ -2352,7 +2341,7 @@ out_unlock_pciaccess:
  * @cmd - ioctl opcode
  * @cmd - ioctl opcode
  * @arg -
  * @arg -
  */
  */
-long
+static long
 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 {
 	long ret;
 	long ret;
@@ -2372,7 +2361,7 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  * @cmd - ioctl opcode
  * @cmd - ioctl opcode
  * @arg -
  * @arg -
  */
  */
-long
+static long
 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
 {
 	long ret;
 	long ret;
@@ -2392,7 +2381,7 @@ _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  *
  *
  * This routine handles 32 bit applications in 64bit os.
  * This routine handles 32 bit applications in 64bit os.
  */
  */
-long
+static long
 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
 {
 {
 	long ret;
 	long ret;
@@ -2410,7 +2399,7 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
  *
  *
  * This routine handles 32 bit applications in 64bit os.
  * This routine handles 32 bit applications in 64bit os.
  */
  */
-long
+static long
 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
 {
 {
 	long ret;
 	long ret;

+ 79 - 90
drivers/scsi/mpt3sas/mpt3sas_scsih.c

@@ -1195,7 +1195,7 @@ _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
  *
  *
  * Returns queue depth.
  * Returns queue depth.
  */
  */
-int
+static int
 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
 {
 {
 	struct Scsi_Host *shost = sdev->host;
 	struct Scsi_Host *shost = sdev->host;
@@ -1244,7 +1244,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
  * Returns 0 if ok. Any other return is assumed to be an error and
  * Returns 0 if ok. Any other return is assumed to be an error and
  * the device is ignored.
  * the device is ignored.
  */
  */
-int
+static int
 scsih_target_alloc(struct scsi_target *starget)
 scsih_target_alloc(struct scsi_target *starget)
 {
 {
 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
@@ -1311,7 +1311,7 @@ scsih_target_alloc(struct scsi_target *starget)
  *
  *
  * Returns nothing.
  * Returns nothing.
  */
  */
-void
+static void
 scsih_target_destroy(struct scsi_target *starget)
 scsih_target_destroy(struct scsi_target *starget)
 {
 {
 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
@@ -1320,7 +1320,6 @@ scsih_target_destroy(struct scsi_target *starget)
 	struct _sas_device *sas_device;
 	struct _sas_device *sas_device;
 	struct _raid_device *raid_device;
 	struct _raid_device *raid_device;
 	unsigned long flags;
 	unsigned long flags;
-	struct sas_rphy *rphy;
 
 
 	sas_target_priv_data = starget->hostdata;
 	sas_target_priv_data = starget->hostdata;
 	if (!sas_target_priv_data)
 	if (!sas_target_priv_data)
@@ -1339,7 +1338,6 @@ scsih_target_destroy(struct scsi_target *starget)
 	}
 	}
 
 
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
 	spin_lock_irqsave(&ioc->sas_device_lock, flags);
-	rphy = dev_to_rphy(starget->dev.parent);
 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
 	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
 	if (sas_device && (sas_device->starget == starget) &&
 	if (sas_device && (sas_device->starget == starget) &&
 	    (sas_device->id == starget->id) &&
 	    (sas_device->id == starget->id) &&
@@ -1369,7 +1367,7 @@ scsih_target_destroy(struct scsi_target *starget)
  * Returns 0 if ok. Any other return is assumed to be an error and
  * Returns 0 if ok. Any other return is assumed to be an error and
  * the device is ignored.
  * the device is ignored.
  */
  */
-int
+static int
 scsih_slave_alloc(struct scsi_device *sdev)
 scsih_slave_alloc(struct scsi_device *sdev)
 {
 {
 	struct Scsi_Host *shost;
 	struct Scsi_Host *shost;
@@ -1434,7 +1432,7 @@ scsih_slave_alloc(struct scsi_device *sdev)
  *
  *
  * Returns nothing.
  * Returns nothing.
  */
  */
-void
+static void
 scsih_slave_destroy(struct scsi_device *sdev)
 scsih_slave_destroy(struct scsi_device *sdev)
 {
 {
 	struct MPT3SAS_TARGET *sas_target_priv_data;
 	struct MPT3SAS_TARGET *sas_target_priv_data;
@@ -1527,7 +1525,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
  * scsih_is_raid - return boolean indicating device is raid volume
  * scsih_is_raid - return boolean indicating device is raid volume
  * @dev the device struct object
  * @dev the device struct object
  */
  */
-int
+static int
 scsih_is_raid(struct device *dev)
 scsih_is_raid(struct device *dev)
 {
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
 	struct scsi_device *sdev = to_scsi_device(dev);
@@ -1542,7 +1540,7 @@ scsih_is_raid(struct device *dev)
  * scsih_get_resync - get raid volume resync percent complete
  * scsih_get_resync - get raid volume resync percent complete
  * @dev the device struct object
  * @dev the device struct object
  */
  */
-void
+static void
 scsih_get_resync(struct device *dev)
 scsih_get_resync(struct device *dev)
 {
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
 	struct scsi_device *sdev = to_scsi_device(dev);
@@ -1603,7 +1601,7 @@ scsih_get_resync(struct device *dev)
  * scsih_get_state - get raid volume level
  * scsih_get_state - get raid volume level
  * @dev the device struct object
  * @dev the device struct object
  */
  */
-void
+static void
 scsih_get_state(struct device *dev)
 scsih_get_state(struct device *dev)
 {
 {
 	struct scsi_device *sdev = to_scsi_device(dev);
 	struct scsi_device *sdev = to_scsi_device(dev);
@@ -1805,7 +1803,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
  * Returns 0 if ok. Any other return is assumed to be an error and
  * Returns 0 if ok. Any other return is assumed to be an error and
  * the device is ignored.
  * the device is ignored.
  */
  */
-int
+static int
 scsih_slave_configure(struct scsi_device *sdev)
 scsih_slave_configure(struct scsi_device *sdev)
 {
 {
 	struct Scsi_Host *shost = sdev->host;
 	struct Scsi_Host *shost = sdev->host;
@@ -2021,7 +2019,7 @@ scsih_slave_configure(struct scsi_device *sdev)
  *
  *
  * Return nothing.
  * Return nothing.
  */
  */
-int
+static int
 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
 	sector_t capacity, int params[])
 	sector_t capacity, int params[])
 {
 {
@@ -2201,7 +2199,6 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
  * @smid_task: smid assigned to the task
  * @smid_task: smid assigned to the task
  * @timeout: timeout in seconds
  * @timeout: timeout in seconds
- * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
  * Context: user
  * Context: user
  *
  *
  * A generic API for sending task management requests to firmware.
  * A generic API for sending task management requests to firmware.
@@ -2212,60 +2209,51 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  */
  */
 int
 int
 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
-	uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
-	enum mutex_type m_type)
+	uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
 {
 {
 	Mpi2SCSITaskManagementRequest_t *mpi_request;
 	Mpi2SCSITaskManagementRequest_t *mpi_request;
 	Mpi2SCSITaskManagementReply_t *mpi_reply;
 	Mpi2SCSITaskManagementReply_t *mpi_reply;
 	u16 smid = 0;
 	u16 smid = 0;
 	u32 ioc_state;
 	u32 ioc_state;
-	unsigned long timeleft;
 	struct scsiio_tracker *scsi_lookup = NULL;
 	struct scsiio_tracker *scsi_lookup = NULL;
 	int rc;
 	int rc;
 	u16 msix_task = 0;
 	u16 msix_task = 0;
 
 
-	if (m_type == TM_MUTEX_ON)
-		mutex_lock(&ioc->tm_cmds.mutex);
+	lockdep_assert_held(&ioc->tm_cmds.mutex);
+
 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
 	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
 		pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
 		pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
 		    __func__, ioc->name);
 		    __func__, ioc->name);
-		rc = FAILED;
-		goto err_out;
+		return FAILED;
 	}
 	}
 
 
 	if (ioc->shost_recovery || ioc->remove_host ||
 	if (ioc->shost_recovery || ioc->remove_host ||
 	    ioc->pci_error_recovery) {
 	    ioc->pci_error_recovery) {
 		pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
 		pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
 		    __func__, ioc->name);
 		    __func__, ioc->name);
-		rc = FAILED;
-		goto err_out;
+		return FAILED;
 	}
 	}
 
 
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
 	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
 	if (ioc_state & MPI2_DOORBELL_USED) {
 	if (ioc_state & MPI2_DOORBELL_USED) {
 		dhsprintk(ioc, pr_info(MPT3SAS_FMT
 		dhsprintk(ioc, pr_info(MPT3SAS_FMT
 			"unexpected doorbell active!\n", ioc->name));
 			"unexpected doorbell active!\n", ioc->name));
-		rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
-		rc = (!rc) ? SUCCESS : FAILED;
-		goto err_out;
+		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+		return (!rc) ? SUCCESS : FAILED;
 	}
 	}
 
 
 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
 	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
 		mpt3sas_base_fault_info(ioc, ioc_state &
 		mpt3sas_base_fault_info(ioc, ioc_state &
 		    MPI2_DOORBELL_DATA_MASK);
 		    MPI2_DOORBELL_DATA_MASK);
-		rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
-		rc = (!rc) ? SUCCESS : FAILED;
-		goto err_out;
+		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+		return (!rc) ? SUCCESS : FAILED;
 	}
 	}
 
 
 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
 	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
 	if (!smid) {
 	if (!smid) {
 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
 		pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
 		    ioc->name, __func__);
 		    ioc->name, __func__);
-		rc = FAILED;
-		goto err_out;
+		return FAILED;
 	}
 	}
 
 
 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
 	if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
@@ -2292,19 +2280,17 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
 	else
 	else
 		msix_task = 0;
 		msix_task = 0;
 	mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
 	mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
-	timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
+	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
 	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
 		    ioc->name, __func__);
 		    ioc->name, __func__);
 		_debug_dump_mf(mpi_request,
 		_debug_dump_mf(mpi_request,
 		    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
 		    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
 		if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
 		if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
-			rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-			    FORCE_BIG_HAMMER);
+			rc = mpt3sas_base_hard_reset_handler(ioc,
+					FORCE_BIG_HAMMER);
 			rc = (!rc) ? SUCCESS : FAILED;
 			rc = (!rc) ? SUCCESS : FAILED;
-			ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
-			mpt3sas_scsih_clear_tm_flag(ioc, handle);
-			goto err_out;
+			goto out;
 		}
 		}
 	}
 	}
 
 
@@ -2356,17 +2342,23 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
 		break;
 		break;
 	}
 	}
 
 
+out:
 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
 	mpt3sas_scsih_clear_tm_flag(ioc, handle);
 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
 	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
-	if (m_type == TM_MUTEX_ON)
-		mutex_unlock(&ioc->tm_cmds.mutex);
-
 	return rc;
 	return rc;
+}
 
 
- err_out:
-	if (m_type == TM_MUTEX_ON)
-		mutex_unlock(&ioc->tm_cmds.mutex);
-	return rc;
+int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+	uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
+{
+	int ret;
+
+	mutex_lock(&ioc->tm_cmds.mutex);
+	ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
+			smid_task, timeout);
+	mutex_unlock(&ioc->tm_cmds.mutex);
+
+	return ret;
 }
 }
 
 
 /**
 /**
@@ -2439,7 +2431,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
  *
  *
  * Returns SUCCESS if command aborted else FAILED
  * Returns SUCCESS if command aborted else FAILED
  */
  */
-int
+static int
 scsih_abort(struct scsi_cmnd *scmd)
 scsih_abort(struct scsi_cmnd *scmd)
 {
 {
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2482,9 +2474,9 @@ scsih_abort(struct scsi_cmnd *scmd)
 	mpt3sas_halt_firmware(ioc);
 	mpt3sas_halt_firmware(ioc);
 
 
 	handle = sas_device_priv_data->sas_target->handle;
 	handle = sas_device_priv_data->sas_target->handle;
-	r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
 	    scmd->device->id, scmd->device->lun,
 	    scmd->device->id, scmd->device->lun,
-	    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
+	    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
 
 
  out:
  out:
 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
 	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2498,7 +2490,7 @@ scsih_abort(struct scsi_cmnd *scmd)
  *
  *
  * Returns SUCCESS if command aborted else FAILED
  * Returns SUCCESS if command aborted else FAILED
  */
  */
-int
+static int
 scsih_dev_reset(struct scsi_cmnd *scmd)
 scsih_dev_reset(struct scsi_cmnd *scmd)
 {
 {
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2541,9 +2533,9 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
 		goto out;
 		goto out;
 	}
 	}
 
 
-	r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
 	    scmd->device->id, scmd->device->lun,
 	    scmd->device->id, scmd->device->lun,
-	    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
+	    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30);
 
 
  out:
  out:
 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
 	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2561,7 +2553,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
  *
  *
  * Returns SUCCESS if command aborted else FAILED
  * Returns SUCCESS if command aborted else FAILED
  */
  */
-int
+static int
 scsih_target_reset(struct scsi_cmnd *scmd)
 scsih_target_reset(struct scsi_cmnd *scmd)
 {
 {
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2603,9 +2595,9 @@ scsih_target_reset(struct scsi_cmnd *scmd)
 		goto out;
 		goto out;
 	}
 	}
 
 
-	r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
 	    scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
 	    scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
-	    30, TM_MUTEX_ON);
+	    30);
 
 
  out:
  out:
 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
 	starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -2624,7 +2616,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
  *
  *
  * Returns SUCCESS if command aborted else FAILED
  * Returns SUCCESS if command aborted else FAILED
  */
  */
-int
+static int
 scsih_host_reset(struct scsi_cmnd *scmd)
 scsih_host_reset(struct scsi_cmnd *scmd)
 {
 {
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2641,8 +2633,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
 		goto out;
 		goto out;
 	}
 	}
 
 
-	retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-	    FORCE_BIG_HAMMER);
+	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 	r = (retval < 0) ? FAILED : SUCCESS;
 	r = (retval < 0) ? FAILED : SUCCESS;
 out:
 out:
 	pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
 	pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
@@ -3455,7 +3446,7 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  *
  *
  * Context - processed in interrupt context.
  * Context - processed in interrupt context.
  */
  */
-void
+static void
 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
 				u32 event_context)
 				u32 event_context)
 {
 {
@@ -3494,7 +3485,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
  *
  *
  * Context - processed in interrupt context.
  * Context - processed in interrupt context.
  */
  */
-void
+static void
 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
 					u16 smid, u16 handle)
 					u16 smid, u16 handle)
 	{
 	{
@@ -4032,7 +4023,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
  * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
  */
  */
-int
+static int
 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 {
 {
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -4701,7 +4692,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
 			    le16_to_cpu(mpi_reply->DevHandle));
 			    le16_to_cpu(mpi_reply->DevHandle));
 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
 		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
 
 
-		if (!(ioc->logging_level & MPT_DEBUG_REPLY) &&
+		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
 		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
 		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
 		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
@@ -5380,8 +5371,9 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
 		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
 			sas_device->enclosure_level =
 			sas_device->enclosure_level =
 				le16_to_cpu(sas_device_pg0.EnclosureLevel);
 				le16_to_cpu(sas_device_pg0.EnclosureLevel);
-			memcpy(&sas_device->connector_name[0],
-				&sas_device_pg0.ConnectorName[0], 4);
+			memcpy(sas_device->connector_name,
+				sas_device_pg0.ConnectorName, 4);
+			sas_device->connector_name[4] = '\0';
 		} else {
 		} else {
 			sas_device->enclosure_level = 0;
 			sas_device->enclosure_level = 0;
 			sas_device->connector_name[0] = '\0';
 			sas_device->connector_name[0] = '\0';
@@ -5508,8 +5500,9 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
 	if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
 	if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
 		sas_device->enclosure_level =
 		sas_device->enclosure_level =
 			le16_to_cpu(sas_device_pg0.EnclosureLevel);
 			le16_to_cpu(sas_device_pg0.EnclosureLevel);
-		memcpy(&sas_device->connector_name[0],
-			&sas_device_pg0.ConnectorName[0], 4);
+		memcpy(sas_device->connector_name,
+			sas_device_pg0.ConnectorName, 4);
+		sas_device->connector_name[4] = '\0';
 	} else {
 	} else {
 		sas_device->enclosure_level = 0;
 		sas_device->enclosure_level = 0;
 		sas_device->connector_name[0] = '\0';
 		sas_device->connector_name[0] = '\0';
@@ -6087,8 +6080,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
 
 
 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 		r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
 		r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
-		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
-		    TM_MUTEX_OFF);
+		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
 		if (r == FAILED) {
 		if (r == FAILED) {
 			sdev_printk(KERN_WARNING, sdev,
 			sdev_printk(KERN_WARNING, sdev,
 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
 			    "mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -6128,8 +6120,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
 			goto out_no_lock;
 			goto out_no_lock;
 
 
 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
 		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
-		    sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
-		    TM_MUTEX_OFF);
+		    sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid,
+		    30);
 		if (r == FAILED) {
 		if (r == FAILED) {
 			sdev_printk(KERN_WARNING, sdev,
 			sdev_printk(KERN_WARNING, sdev,
 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
 			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
@@ -6297,8 +6289,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
 	mutex_unlock(&ioc->scsih_cmds.mutex);
 	mutex_unlock(&ioc->scsih_cmds.mutex);
 
 
 	if (issue_reset)
 	if (issue_reset)
-		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
+		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 	return rc;
 	return rc;
 }
 }
 
 
@@ -6311,11 +6302,10 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
 static void
 static void
 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
 {
 {
-	int rc;
 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
 	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
 	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
 	    sdev->no_uld_attach ? "hidding" : "exposing");
 	    sdev->no_uld_attach ? "hidding" : "exposing");
-	rc = scsi_device_reprobe(sdev);
+	WARN_ON(scsi_device_reprobe(sdev));
 }
 }
 
 
 /**
 /**
@@ -8137,7 +8127,7 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
  * Routine called when unloading the driver.
  * Routine called when unloading the driver.
  * Return nothing.
  * Return nothing.
  */
  */
-void scsih_remove(struct pci_dev *pdev)
+static void scsih_remove(struct pci_dev *pdev)
 {
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -8210,7 +8200,7 @@ void scsih_remove(struct pci_dev *pdev)
  *
  *
  * Return nothing.
  * Return nothing.
  */
  */
-void
+static void
 scsih_shutdown(struct pci_dev *pdev)
 scsih_shutdown(struct pci_dev *pdev)
 {
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8451,7 +8441,7 @@ _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
  * of scanning the entire bus.  In our implemention, we will kick off
  * of scanning the entire bus.  In our implemention, we will kick off
  * firmware discovery.
  * firmware discovery.
  */
  */
-void
+static void
 scsih_scan_start(struct Scsi_Host *shost)
 scsih_scan_start(struct Scsi_Host *shost)
 {
 {
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -8478,7 +8468,7 @@ scsih_scan_start(struct Scsi_Host *shost)
  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
  * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
  * we wait for firmware discovery to complete, then return 1.
  * we wait for firmware discovery to complete, then return 1.
  */
  */
-int
+static int
 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
 {
 {
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
 	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -8608,7 +8598,7 @@ static struct raid_function_template mpt3sas_raid_functions = {
  *	MPI25_VERSION for SAS 3.0 HBA devices, and
  *	MPI25_VERSION for SAS 3.0 HBA devices, and
  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
  *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
  */
  */
-u16
+static u16
 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
 {
 {
 
 
@@ -8660,7 +8650,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
  *
  *
  * Returns 0 success, anything else error.
  * Returns 0 success, anything else error.
  */
  */
-int
+static int
 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 {
 	struct MPT3SAS_ADAPTER *ioc;
 	struct MPT3SAS_ADAPTER *ioc;
@@ -8869,7 +8859,7 @@ out_add_shost_fail:
  *
  *
  * Returns 0 success, anything else error.
  * Returns 0 success, anything else error.
  */
  */
-int
+static int
 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8896,7 +8886,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
  *
  *
  * Returns 0 success, anything else error.
  * Returns 0 success, anything else error.
  */
  */
-int
+static int
 scsih_resume(struct pci_dev *pdev)
 scsih_resume(struct pci_dev *pdev)
 {
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8916,7 +8906,7 @@ scsih_resume(struct pci_dev *pdev)
 	if (r)
 	if (r)
 		return r;
 		return r;
 
 
-	mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
+	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
 	scsi_unblock_requests(shost);
 	scsi_unblock_requests(shost);
 	mpt3sas_base_start_watchdog(ioc);
 	mpt3sas_base_start_watchdog(ioc);
 	return 0;
 	return 0;
@@ -8933,7 +8923,7 @@ scsih_resume(struct pci_dev *pdev)
  * Return value:
  * Return value:
  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
  *      PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
  */
  */
-pci_ers_result_t
+static pci_ers_result_t
 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
 {
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8970,7 +8960,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
  * code after the PCI slot has been reset, just before we
  * code after the PCI slot has been reset, just before we
  * should resume normal operations.
  * should resume normal operations.
  */
  */
-pci_ers_result_t
+static pci_ers_result_t
 scsih_pci_slot_reset(struct pci_dev *pdev)
 scsih_pci_slot_reset(struct pci_dev *pdev)
 {
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8987,8 +8977,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
 	if (rc)
 	if (rc)
 		return PCI_ERS_RESULT_DISCONNECT;
 		return PCI_ERS_RESULT_DISCONNECT;
 
 
-	rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-	    FORCE_BIG_HAMMER);
+	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 
 
 	pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
 	pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
 	    (rc == 0) ? "success" : "failed");
 	    (rc == 0) ? "success" : "failed");
@@ -9007,7 +8996,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
  * OK to resume normal operation. Use completion to allow
  * OK to resume normal operation. Use completion to allow
  * halted scsi ops to resume.
  * halted scsi ops to resume.
  */
  */
-void
+static void
 scsih_pci_resume(struct pci_dev *pdev)
 scsih_pci_resume(struct pci_dev *pdev)
 {
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -9024,7 +9013,7 @@ scsih_pci_resume(struct pci_dev *pdev)
  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
  * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
  * @pdev: pointer to PCI device
  * @pdev: pointer to PCI device
  */
  */
-pci_ers_result_t
+static pci_ers_result_t
 scsih_pci_mmio_enabled(struct pci_dev *pdev)
 scsih_pci_mmio_enabled(struct pci_dev *pdev)
 {
 {
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
 	struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -9152,7 +9141,7 @@ static struct pci_driver mpt3sas_driver = {
  *
  *
  * Returns 0 success, anything else error.
  * Returns 0 success, anything else error.
  */
  */
-int
+static int
 scsih_init(void)
 scsih_init(void)
 {
 {
 	mpt2_ids = 0;
 	mpt2_ids = 0;
@@ -9202,7 +9191,7 @@ scsih_init(void)
  *
  *
  * Returns 0 success, anything else error.
  * Returns 0 success, anything else error.
  */
  */
-void
+static void
 scsih_exit(void)
 scsih_exit(void)
 {
 {
 
 

+ 8 - 20
drivers/scsi/mpt3sas/mpt3sas_transport.c

@@ -300,7 +300,6 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
 	int rc;
 	int rc;
 	u16 smid;
 	u16 smid;
 	u32 ioc_state;
 	u32 ioc_state;
-	unsigned long timeleft;
 	void *psge;
 	void *psge;
 	u8 issue_reset = 0;
 	u8 issue_reset = 0;
 	void *data_out = NULL;
 	void *data_out = NULL;
@@ -394,8 +393,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
 		ioc->name, (unsigned long long)sas_address));
 		ioc->name, (unsigned long long)sas_address));
 	init_completion(&ioc->transport_cmds.done);
 	init_completion(&ioc->transport_cmds.done);
 	mpt3sas_base_put_smid_default(ioc, smid);
 	mpt3sas_base_put_smid_default(ioc, smid);
-	timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
-	    10*HZ);
+	wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -446,8 +444,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
 
 
  issue_host_reset:
  issue_host_reset:
 	if (issue_reset)
 	if (issue_reset)
-		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
+		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  out:
  out:
 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
 	if (data_out)
 	if (data_out)
@@ -1107,7 +1104,6 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
 	int rc;
 	int rc;
 	u16 smid;
 	u16 smid;
 	u32 ioc_state;
 	u32 ioc_state;
-	unsigned long timeleft;
 	void *psge;
 	void *psge;
 	u8 issue_reset = 0;
 	u8 issue_reset = 0;
 	void *data_out = NULL;
 	void *data_out = NULL;
@@ -1203,8 +1199,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
 		phy->number));
 		phy->number));
 	init_completion(&ioc->transport_cmds.done);
 	init_completion(&ioc->transport_cmds.done);
 	mpt3sas_base_put_smid_default(ioc, smid);
 	mpt3sas_base_put_smid_default(ioc, smid);
-	timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
-	    10*HZ);
+	wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -1253,8 +1248,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
 
 
  issue_host_reset:
  issue_host_reset:
 	if (issue_reset)
 	if (issue_reset)
-		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
+		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  out:
  out:
 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
 	if (data_out)
 	if (data_out)
@@ -1421,7 +1415,6 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
 	int rc;
 	int rc;
 	u16 smid;
 	u16 smid;
 	u32 ioc_state;
 	u32 ioc_state;
-	unsigned long timeleft;
 	void *psge;
 	void *psge;
 	u8 issue_reset = 0;
 	u8 issue_reset = 0;
 	void *data_out = NULL;
 	void *data_out = NULL;
@@ -1522,8 +1515,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
 		phy->number, phy_operation));
 		phy->number, phy_operation));
 	init_completion(&ioc->transport_cmds.done);
 	init_completion(&ioc->transport_cmds.done);
 	mpt3sas_base_put_smid_default(ioc, smid);
 	mpt3sas_base_put_smid_default(ioc, smid);
-	timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
-	    10*HZ);
+	wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
 		pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -1564,8 +1556,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
 
 
  issue_host_reset:
  issue_host_reset:
 	if (issue_reset)
 	if (issue_reset)
-		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
+		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  out:
  out:
 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
 	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
 	if (data_out)
 	if (data_out)
@@ -1899,7 +1890,6 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 	int rc;
 	int rc;
 	u16 smid;
 	u16 smid;
 	u32 ioc_state;
 	u32 ioc_state;
-	unsigned long timeleft;
 	void *psge;
 	void *psge;
 	u8 issue_reset = 0;
 	u8 issue_reset = 0;
 	dma_addr_t dma_addr_in = 0;
 	dma_addr_t dma_addr_in = 0;
@@ -2043,8 +2033,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
 
 	init_completion(&ioc->transport_cmds.done);
 	init_completion(&ioc->transport_cmds.done);
 	mpt3sas_base_put_smid_default(ioc, smid);
 	mpt3sas_base_put_smid_default(ioc, smid);
-	timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
-	    10*HZ);
+	wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
 
 
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
 	if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
 		pr_err(MPT3SAS_FMT "%s : timeout\n",
 		pr_err(MPT3SAS_FMT "%s : timeout\n",
@@ -2103,8 +2092,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 
 
  issue_host_reset:
  issue_host_reset:
 	if (issue_reset) {
 	if (issue_reset) {
-		mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
-		    FORCE_BIG_HAMMER);
+		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
 		rc = -ETIMEDOUT;
 		rc = -ETIMEDOUT;
 	}
 	}
 
 

+ 10 - 9
drivers/scsi/mvsas/mv_64xx.c

@@ -136,7 +136,8 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
 	}
 	}
 }
 }
 
 
-void mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
+static void
+mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
 {
 {
 	void __iomem *regs = mvi->regs;
 	void __iomem *regs = mvi->regs;
 	u32 tmp;
 	u32 tmp;
@@ -563,7 +564,7 @@ static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
 	return MVS_ID_NOT_MAPPED;
 	return MVS_ID_NOT_MAPPED;
 }
 }
 
 
-void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
+static void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
 {
 {
 	int i;
 	int i;
 	struct scatterlist *sg;
 	struct scatterlist *sg;
@@ -633,7 +634,7 @@ static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
 	mvs_write_port_vsr_data(mvi, i, tmp);
 	mvs_write_port_vsr_data(mvi, i, tmp);
 }
 }
 
 
-void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+static void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
 			struct sas_phy_linkrates *rates)
 			struct sas_phy_linkrates *rates)
 {
 {
 	u32 lrmin = 0, lrmax = 0;
 	u32 lrmin = 0, lrmax = 0;
@@ -668,20 +669,20 @@ static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
 }
 }
 
 
 
 
-u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
+static u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
 {
 {
 	void __iomem *regs = mvi->regs_ex;
 	void __iomem *regs = mvi->regs_ex;
 	return ior32(SPI_DATA_REG_64XX);
 	return ior32(SPI_DATA_REG_64XX);
 }
 }
 
 
-void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
+static void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
 {
 {
 	void __iomem *regs = mvi->regs_ex;
 	void __iomem *regs = mvi->regs_ex;
 	 iow32(SPI_DATA_REG_64XX, data);
 	 iow32(SPI_DATA_REG_64XX, data);
 }
 }
 
 
 
 
-int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
+static int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
 			u32      *dwCmd,
 			u32      *dwCmd,
 			u8       cmd,
 			u8       cmd,
 			u8       read,
 			u8       read,
@@ -705,7 +706,7 @@ int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
 }
 }
 
 
 
 
-int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+static int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
 {
 {
 	void __iomem *regs = mvi->regs_ex;
 	void __iomem *regs = mvi->regs_ex;
 	int     retry;
 	int     retry;
@@ -720,7 +721,7 @@ int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
 	return 0;
 	return 0;
 }
 }
 
 
-int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+static int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
 {
 {
 	void __iomem *regs = mvi->regs_ex;
 	void __iomem *regs = mvi->regs_ex;
 	u32 i, dwTmp;
 	u32 i, dwTmp;
@@ -735,7 +736,7 @@ int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
 	return -1;
 	return -1;
 }
 }
 
 
-void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
+static void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
 				int buf_len, int from, void *prd)
 				int buf_len, int from, void *prd)
 {
 {
 	int i;
 	int i;

+ 21 - 20
drivers/scsi/mvsas/mv_94xx.c

@@ -48,8 +48,8 @@ static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
 	}
 	}
 }
 }
 
 
-void set_phy_tuning(struct mvs_info *mvi, int phy_id,
-			struct phy_tuning phy_tuning)
+static void set_phy_tuning(struct mvs_info *mvi, int phy_id,
+			   struct phy_tuning phy_tuning)
 {
 {
 	u32 tmp, setting_0 = 0, setting_1 = 0;
 	u32 tmp, setting_0 = 0, setting_1 = 0;
 	u8 i;
 	u8 i;
@@ -110,8 +110,8 @@ void set_phy_tuning(struct mvs_info *mvi, int phy_id,
 	}
 	}
 }
 }
 
 
-void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
-				struct ffe_control ffe)
+static void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
+			       struct ffe_control ffe)
 {
 {
 	u32 tmp;
 	u32 tmp;
 
 
@@ -177,7 +177,7 @@ void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
 }
 }
 
 
 /*Notice: this function must be called when phy is disabled*/
 /*Notice: this function must be called when phy is disabled*/
-void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
+static void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
 {
 {
 	union reg_phy_cfg phy_cfg, phy_cfg_tmp;
 	union reg_phy_cfg phy_cfg, phy_cfg_tmp;
 	mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
 	mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
@@ -679,7 +679,8 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
 	}
 	}
 }
 }
 
 
-void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
+static void
+mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
 {
 {
 	void __iomem *regs = mvi->regs;
 	void __iomem *regs = mvi->regs;
 	u32 tmp;
 	u32 tmp;
@@ -906,8 +907,8 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
 
 
 }
 }
 
 
-void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
-			struct sas_phy_linkrates *rates)
+static void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+				       struct sas_phy_linkrates *rates)
 {
 {
 	u32 lrmax = 0;
 	u32 lrmax = 0;
 	u32 tmp;
 	u32 tmp;
@@ -936,25 +937,25 @@ static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
 }
 }
 
 
 
 
-u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
+static u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
 {
 {
 	void __iomem *regs = mvi->regs_ex - 0x10200;
 	void __iomem *regs = mvi->regs_ex - 0x10200;
 	return mr32(SPI_RD_DATA_REG_94XX);
 	return mr32(SPI_RD_DATA_REG_94XX);
 }
 }
 
 
-void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
+static void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
 {
 {
 	void __iomem *regs = mvi->regs_ex - 0x10200;
 	void __iomem *regs = mvi->regs_ex - 0x10200;
 	 mw32(SPI_RD_DATA_REG_94XX, data);
 	 mw32(SPI_RD_DATA_REG_94XX, data);
 }
 }
 
 
 
 
-int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
-				u32      *dwCmd,
-				u8       cmd,
-				u8       read,
-				u8       length,
-				u32      addr
+static int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
+				 u32      *dwCmd,
+				 u8       cmd,
+				 u8       read,
+				 u8       length,
+				 u32      addr
 				)
 				)
 {
 {
 	void __iomem *regs = mvi->regs_ex - 0x10200;
 	void __iomem *regs = mvi->regs_ex - 0x10200;
@@ -974,7 +975,7 @@ int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
 }
 }
 
 
 
 
-int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+static int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
 {
 {
 	void __iomem *regs = mvi->regs_ex - 0x10200;
 	void __iomem *regs = mvi->regs_ex - 0x10200;
 	mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
 	mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
@@ -982,7 +983,7 @@ int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
 	return 0;
 	return 0;
 }
 }
 
 
-int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+static int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
 {
 {
 	void __iomem *regs = mvi->regs_ex - 0x10200;
 	void __iomem *regs = mvi->regs_ex - 0x10200;
 	u32   i, dwTmp;
 	u32   i, dwTmp;
@@ -997,8 +998,8 @@ int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
 	return -1;
 	return -1;
 }
 }
 
 
-void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
-				int buf_len, int from, void *prd)
+static void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
+			     int buf_len, int from, void *prd)
 {
 {
 	int i;
 	int i;
 	struct mvs_prd *buf_prd = prd;
 	struct mvs_prd *buf_prd = prd;

+ 8 - 8
drivers/scsi/mvsas/mv_sas.c

@@ -74,7 +74,7 @@ void mvs_tag_init(struct mvs_info *mvi)
 		mvs_tag_clear(mvi, i);
 		mvs_tag_clear(mvi, i);
 }
 }
 
 
-struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
+static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
 {
 {
 	unsigned long i = 0, j = 0, hi = 0;
 	unsigned long i = 0, j = 0, hi = 0;
 	struct sas_ha_struct *sha = dev->port->ha;
 	struct sas_ha_struct *sha = dev->port->ha;
@@ -102,7 +102,7 @@ struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
 
 
 }
 }
 
 
-int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
+static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
 {
 {
 	unsigned long i = 0, j = 0, n = 0, num = 0;
 	unsigned long i = 0, j = 0, n = 0, num = 0;
 	struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
 	struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
@@ -1158,7 +1158,7 @@ void mvs_port_deformed(struct asd_sas_phy *sas_phy)
 	mvs_port_notify_deformed(sas_phy, 1);
 	mvs_port_notify_deformed(sas_phy, 1);
 }
 }
 
 
-struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
+static struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
 {
 {
 	u32 dev;
 	u32 dev;
 	for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
 	for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
@@ -1175,7 +1175,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
 	return NULL;
 	return NULL;
 }
 }
 
 
-void mvs_free_dev(struct mvs_device *mvi_dev)
+static void mvs_free_dev(struct mvs_device *mvi_dev)
 {
 {
 	u32 id = mvi_dev->device_id;
 	u32 id = mvi_dev->device_id;
 	memset(mvi_dev, 0, sizeof(*mvi_dev));
 	memset(mvi_dev, 0, sizeof(*mvi_dev));
@@ -1185,7 +1185,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev)
 	mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
 	mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
 }
 }
 
 
-int mvs_dev_found_notify(struct domain_device *dev, int lock)
+static int mvs_dev_found_notify(struct domain_device *dev, int lock)
 {
 {
 	unsigned long flags = 0;
 	unsigned long flags = 0;
 	int res = 0;
 	int res = 0;
@@ -1241,7 +1241,7 @@ int mvs_dev_found(struct domain_device *dev)
 	return mvs_dev_found_notify(dev, 1);
 	return mvs_dev_found_notify(dev, 1);
 }
 }
 
 
-void mvs_dev_gone_notify(struct domain_device *dev)
+static void mvs_dev_gone_notify(struct domain_device *dev)
 {
 {
 	unsigned long flags = 0;
 	unsigned long flags = 0;
 	struct mvs_device *mvi_dev = dev->lldd_dev;
 	struct mvs_device *mvi_dev = dev->lldd_dev;
@@ -1611,7 +1611,7 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
 	return stat;
 	return stat;
 }
 }
 
 
-void mvs_set_sense(u8 *buffer, int len, int d_sense,
+static void mvs_set_sense(u8 *buffer, int len, int d_sense,
 		int key, int asc, int ascq)
 		int key, int asc, int ascq)
 {
 {
 	memset(buffer, 0, len);
 	memset(buffer, 0, len);
@@ -1650,7 +1650,7 @@ void mvs_set_sense(u8 *buffer, int len, int d_sense,
 	return;
 	return;
 }
 }
 
 
-void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
+static void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
 				u8 key, u8 asc, u8 asc_q)
 				u8 key, u8 asc, u8 asc_q)
 {
 {
 	iu->datapres = 2;
 	iu->datapres = 2;

+ 0 - 565
drivers/scsi/pas16.c

@@ -1,565 +0,0 @@
-/*
- * This driver adapted from Drew Eckhardt's Trantor T128 driver
- *
- * Copyright 1993, Drew Eckhardt
- *	Visionary Computing
- *	(Unix and Linux consulting and custom programming)
- *	drew@colorado.edu
- *      +1 (303) 666-5836
- *
- *  ( Based on T128 - DISTRIBUTION RELEASE 3. ) 
- *
- * Modified to work with the Pro Audio Spectrum/Studio 16
- * by John Weidman.
- *
- *
- * For more information, please consult 
- *
- * Media Vision
- * (510) 770-8600
- * (800) 348-7116
- */
-
-/*
- * The card is detected and initialized in one of several ways : 
- * 1.  Autoprobe (default) - There are many different models of
- *     the Pro Audio Spectrum/Studio 16, and I only have one of
- *     them, so this may require a little tweaking.  An interrupt
- *     is triggered to autoprobe for the interrupt line.  Note:
- *     with the newer model boards, the interrupt is set via
- *     software after reset using the default_irq for the
- *     current board number.
- *
- * 2.  With command line overrides - pas16=port,irq may be 
- *     used on the LILO command line to override the defaults.
- *
- * 3.  With the PAS16_OVERRIDE compile time define.  This is 
- *     specified as an array of address, irq tuples.  Ie, for
- *     one board at the default 0x388 address, IRQ10, I could say 
- *     -DPAS16_OVERRIDE={{0x388, 10}}
- *     NOTE:  Untested.
- *	
- * 4.  When included as a module, with arguments passed on the command line:
- *         pas16_irq=xx		the interrupt
- *         pas16_addr=xx	the port
- *     e.g. "modprobe pas16 pas16_addr=0x388 pas16_irq=5"
- *
- *     Note that if the override methods are used, place holders must
- *     be specified for other boards in the system.
- *
- *
- * Configuration notes :
- *   The current driver does not support interrupt sharing with the
- *   sound portion of the card.  If you use the same irq for the
- *   scsi port and sound you will have problems.  Either use
- *   a different irq for the scsi port or don't use interrupts
- *   for the scsi port.
- *
- *   If you have problems with your card not being recognized, use
- *   the LILO command line override.  Try to get it recognized without
- *   interrupts.  Ie, for a board at the default 0x388 base port,
- *   boot: linux pas16=0x388,0
- *
- *   NO_IRQ (0) should be specified for no interrupt,
- *   IRQ_AUTO (254) to autoprobe for an IRQ line if overridden
- *   on the command line.
- */
- 
-#include <linux/module.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-
-#include <scsi/scsi_host.h>
-#include "pas16.h"
-#include "NCR5380.h"
-
-
-static unsigned short pas16_addr;
-static int pas16_irq;
- 
-
-static const int scsi_irq_translate[] =
-	{ 0,  0,  1,  2,  3,  4,  5,  6, 0,  0,  7,  8,  9,  0, 10, 11 };
-
-/* The default_irqs array contains values used to set the irq into the
- * board via software (as must be done on newer model boards without
- * irq jumpers on the board).  The first value in the array will be
- * assigned to logical board 0, the next to board 1, etc.
- */
-static int default_irqs[] __initdata =
-	{  PAS16_DEFAULT_BOARD_1_IRQ,
-	   PAS16_DEFAULT_BOARD_2_IRQ,
-	   PAS16_DEFAULT_BOARD_3_IRQ,
-	   PAS16_DEFAULT_BOARD_4_IRQ
-	};
-
-static struct override {
-    unsigned short io_port;
-    int  irq;
-} overrides
-#ifdef PAS16_OVERRIDE
-    [] __initdata = PAS16_OVERRIDE;
-#else
-    [4] __initdata = {{0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO},
-	{0,IRQ_AUTO}};
-#endif
-
-#define NO_OVERRIDES ARRAY_SIZE(overrides)
-
-static struct base {
-    unsigned short io_port;
-    int noauto;
-} bases[] __initdata =
-	{ {PAS16_DEFAULT_BASE_1, 0},
-	  {PAS16_DEFAULT_BASE_2, 0},
-	  {PAS16_DEFAULT_BASE_3, 0},
-	  {PAS16_DEFAULT_BASE_4, 0}
-	};
-
-#define NO_BASES ARRAY_SIZE(bases)
-
-static const unsigned short  pas16_offset[ 8 ] =
-    {
-	0x1c00,    /* OUTPUT_DATA_REG */
-	0x1c01,    /* INITIATOR_COMMAND_REG */
-	0x1c02,    /* MODE_REG */
-	0x1c03,    /* TARGET_COMMAND_REG */
-	0x3c00,    /* STATUS_REG ro, SELECT_ENABLE_REG wo */
-	0x3c01,    /* BUS_AND_STATUS_REG ro, START_DMA_SEND_REG wo */
-	0x3c02,    /* INPUT_DATA_REGISTER ro, (N/A on PAS16 ?)
-		    * START_DMA_TARGET_RECEIVE_REG wo
-		    */
-	0x3c03,    /* RESET_PARITY_INTERRUPT_REG ro,
-		    * START_DMA_INITIATOR_RECEIVE_REG wo
-		    */
-    };
-
-
-/*
- * Function : enable_board( int  board_num, unsigned short port )
- *
- * Purpose :  set address in new model board
- *
- * Inputs : board_num - logical board number 0-3, port - base address
- *
- */
-
-static void __init
-	enable_board( int  board_num,  unsigned short port )
-{
-    outb( 0xbc + board_num, MASTER_ADDRESS_PTR );
-    outb( port >> 2, MASTER_ADDRESS_PTR );
-}
-
-
-
-/*
- * Function : init_board( unsigned short port, int irq )
- *
- * Purpose :  Set the board up to handle the SCSI interface
- *
- * Inputs : port - base address of the board,
- *	    irq - irq to assign to the SCSI port
- *	    force_irq - set it even if it conflicts with sound driver
- *
- */
-
-static void __init 
-	init_board( unsigned short io_port, int irq, int force_irq )
-{
-	unsigned int	tmp;
-	unsigned int	pas_irq_code;
-
-	/* Initialize the SCSI part of the board */
-
-	outb( 0x30, io_port + P_TIMEOUT_COUNTER_REG );  /* Timeout counter */
-	outb( 0x01, io_port + P_TIMEOUT_STATUS_REG_OFFSET );   /* Reset TC */
-	outb( 0x01, io_port + WAIT_STATE );   /* 1 Wait state */
-
-	inb(io_port + pas16_offset[RESET_PARITY_INTERRUPT_REG]);
-
-	/* Set the SCSI interrupt pointer without mucking up the sound
-	 * interrupt pointer in the same byte.
-	 */
-	pas_irq_code = ( irq < 16 ) ? scsi_irq_translate[irq] : 0;
-	tmp = inb( io_port + IO_CONFIG_3 );
-
-	if( (( tmp & 0x0f ) == pas_irq_code) && pas_irq_code > 0 
-	    && !force_irq )
-	{
-	    printk( "pas16: WARNING: Can't use same irq as sound "
-		    "driver -- interrupts disabled\n" );
-	    /* Set up the drive parameters, disable 5380 interrupts */
-	    outb( 0x4d, io_port + SYS_CONFIG_4 );
-	}
-	else
-	{
-	    tmp = (  tmp & 0x0f ) | ( pas_irq_code << 4 );
-	    outb( tmp, io_port + IO_CONFIG_3 );
-
-	    /* Set up the drive parameters and enable 5380 interrupts */
-	    outb( 0x6d, io_port + SYS_CONFIG_4 );
-	}
-}
-
-
-/*
- * Function : pas16_hw_detect( unsigned short board_num )
- *
- * Purpose : determine if a pas16 board is present
- * 
- * Inputs : board_num - logical board number ( 0 - 3 )
- *
- * Returns : 0 if board not found, 1 if found.
- */
-
-static int __init 
-     pas16_hw_detect( unsigned short  board_num )
-{
-    unsigned char	board_rev, tmp;
-    unsigned short	io_port = bases[ board_num ].io_port;
-
-    /* See if we can find a PAS16 board at the address associated
-     * with this logical board number.
-     */
-
-    /* First, attempt to take a newer model board out of reset and
-     * give it a base address.  This shouldn't affect older boards.
-     */
-    enable_board( board_num, io_port );
-
-    /* Now see if it looks like a PAS16 board */
-    board_rev = inb( io_port + PCB_CONFIG );
-
-    if( board_rev == 0xff )
-	return 0;
-
-    tmp = board_rev ^ 0xe0;
-
-    outb( tmp, io_port + PCB_CONFIG );
-    tmp = inb( io_port + PCB_CONFIG );
-    outb( board_rev, io_port + PCB_CONFIG );
-
-    if( board_rev != tmp ) 	/* Not a PAS-16 */
-	return 0;
-
-    if( ( inb( io_port + OPERATION_MODE_1 ) & 0x03 ) != 0x03 ) 
-	return 0;  	/* return if no SCSI interface found */
-
-    /* Mediavision has some new model boards that return ID bits
-     * that indicate a SCSI interface, but they're not (LMS).  We'll
-     * put in an additional test to try to weed them out.
-     */
-
-	outb(0x01, io_port + WAIT_STATE);             /* 1 Wait state */
-	outb(0x20, io_port + pas16_offset[MODE_REG]); /* Is it really SCSI? */
-	if (inb(io_port + pas16_offset[MODE_REG]) != 0x20) /* Write to a reg. */
-		return 0;                                  /* and try to read */
-	outb(0x00, io_port + pas16_offset[MODE_REG]);      /* it back. */
-	if (inb(io_port + pas16_offset[MODE_REG]) != 0x00)
-		return 0;
-
-    return 1;
-}
-
-
-#ifndef MODULE
-/*
- * Function : pas16_setup(char *str, int *ints)
- *
- * Purpose : LILO command line initialization of the overrides array,
- * 
- * Inputs : str - unused, ints - array of integer parameters with ints[0]
- *	equal to the number of ints.
- *
- */
-
-static int __init pas16_setup(char *str)
-{
-	static int commandline_current;
-    int i;
-    int ints[10];
-
-    get_options(str, ARRAY_SIZE(ints), ints);
-    if (ints[0] != 2) 
-	printk("pas16_setup : usage pas16=io_port,irq\n");
-    else 
-	if (commandline_current < NO_OVERRIDES) {
-	    overrides[commandline_current].io_port = (unsigned short) ints[1];
-	    overrides[commandline_current].irq = ints[2];
-	    for (i = 0; i < NO_BASES; ++i)
-		if (bases[i].io_port == (unsigned short) ints[1]) {
- 		    bases[i].noauto = 1;
-		    break;
-		}
-	    ++commandline_current;
-	}
-    return 1;
-}
-
-__setup("pas16=", pas16_setup);
-#endif
-
-/* 
- * Function : int pas16_detect(struct scsi_host_template * tpnt)
- *
- * Purpose : detects and initializes PAS16 controllers
- *	that were autoprobed, overridden on the LILO command line, 
- *	or specified at compile time.
- *
- * Inputs : tpnt - template for this SCSI adapter.
- * 
- * Returns : 1 if a host adapter was found, 0 if not.
- *
- */
-
-static int __init pas16_detect(struct scsi_host_template *tpnt)
-{
-	static int current_override;
-	static unsigned short current_base;
-    struct Scsi_Host *instance;
-    unsigned short io_port;
-    int  count;
-
-    if (pas16_addr != 0) {
-	overrides[0].io_port = pas16_addr;
-	/*
-	*  This is how we avoid seeing more than
-	*  one host adapter at the same I/O port.
-	*  Cribbed shamelessly from pas16_setup().
-	*/
-	for (count = 0; count < NO_BASES; ++count)
-	    if (bases[count].io_port == pas16_addr) {
- 		    bases[count].noauto = 1;
-		    break;
-	}
-    }
-    if (pas16_irq != 0)
-	overrides[0].irq = pas16_irq;
-
-    for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
-	io_port = 0;
-
-	if (overrides[current_override].io_port)
-	{
-	    io_port = overrides[current_override].io_port;
-	    enable_board( current_override, io_port );
-	    init_board( io_port, overrides[current_override].irq, 1 );
-	}
-	else
-	    for (; !io_port && (current_base < NO_BASES); ++current_base) {
-		dprintk(NDEBUG_INIT, "pas16: probing io_port 0x%04x\n",
-		        (unsigned int)bases[current_base].io_port);
-		if ( !bases[current_base].noauto &&
-		     pas16_hw_detect( current_base ) ){
-			io_port = bases[current_base].io_port;
-			init_board( io_port, default_irqs[ current_base ], 0 ); 
-			dprintk(NDEBUG_INIT, "pas16: detected board\n");
-		}
-    }
-
-	dprintk(NDEBUG_INIT, "pas16: io_port = 0x%04x\n",
-	        (unsigned int)io_port);
-
-	if (!io_port)
-	    break;
-
-	instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
-	if(instance == NULL)
-		goto out;
-		
-	instance->io_port = io_port;
-
-	if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP))
-		goto out_unregister;
-
-	NCR5380_maybe_reset_bus(instance);
-
-	if (overrides[current_override].irq != IRQ_AUTO)
-	    instance->irq = overrides[current_override].irq;
-	else 
-	    instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
-
-	/* Compatibility with documented NCR5380 kernel parameters */
-	if (instance->irq == 255)
-		instance->irq = NO_IRQ;
-
-	if (instance->irq != NO_IRQ)
-	    if (request_irq(instance->irq, pas16_intr, 0,
-			    "pas16", instance)) {
-		printk("scsi%d : IRQ%d not free, interrupts disabled\n", 
-		    instance->host_no, instance->irq);
-		instance->irq = NO_IRQ;
-	    } 
-
-	if (instance->irq == NO_IRQ) {
-	    printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
-	    printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
-	    /* Disable 5380 interrupts, leave drive params the same */
-	    outb( 0x4d, io_port + SYS_CONFIG_4 );
-	    outb( (inb(io_port + IO_CONFIG_3) & 0x0f), io_port + IO_CONFIG_3 );
-	}
-
-	dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n",
-	        instance->host_no, instance->irq);
-
-	++current_override;
-	++count;
-    }
-    return count;
-
-out_unregister:
-	scsi_unregister(instance);
-out:
-	return count;
-}
-
-/*
- * Function : int pas16_biosparam(Disk *disk, struct block_device *dev, int *ip)
- *
- * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for 
- *	the specified device / size.
- * 
- * Inputs : size = size of device in sectors (512 bytes), dev = block device
- *	major / minor, ip[] = {heads, sectors, cylinders}  
- *
- * Returns : always 0 (success), initializes ip
- *	
- */
-
-/* 
- * XXX Most SCSI boards use this mapping, I could be incorrect.  Some one
- * using hard disks on a trantor should verify that this mapping corresponds
- * to that used by the BIOS / ASPI driver by running the linux fdisk program
- * and matching the H_C_S coordinates to what DOS uses.
- */
-
-static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev,
-                           sector_t capacity, int *ip)
-{
-  int size = capacity;
-  ip[0] = 64;
-  ip[1] = 32;
-  ip[2] = size >> 11;		/* I think I have it as /(32*64) */
-  if( ip[2] > 1024 ) {		/* yes, >, not >= */
-	ip[0]=255;
-	ip[1]=63;
-	ip[2]=size/(63*255);
-	if( ip[2] > 1023 )	/* yes >1023... */
-		ip[2] = 1023;
-  }
-
-  return 0;
-}
-
-/*
- * Function : int pas16_pread (struct Scsi_Host *instance,
- *	unsigned char *dst, int len)
- *
- * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to 
- *	dst
- * 
- * Inputs : dst = destination, len = length in bytes
- *
- * Returns : 0 on success, non zero on a failure such as a watchdog 
- * 	timeout.
- */
-
-static inline int pas16_pread(struct Scsi_Host *instance,
-                              unsigned char *dst, int len)
-{
-    register unsigned char  *d = dst;
-    register unsigned short reg = (unsigned short) (instance->io_port + 
-	P_DATA_REG_OFFSET);
-    register int i = len;
-    int ii = 0;
-
-    while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) )
-	 ++ii;
-
-    insb( reg, d, i );
-
-    if ( inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
-	outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
-	printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
-	    instance->host_no);
-	return -1;
-    }
-    return 0;
-}
-
-/*
- * Function : int pas16_pwrite (struct Scsi_Host *instance,
- *	unsigned char *src, int len)
- *
- * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
- *	src
- * 
- * Inputs : src = source, len = length in bytes
- *
- * Returns : 0 on success, non zero on a failure such as a watchdog 
- * 	timeout.
- */
-
-static inline int pas16_pwrite(struct Scsi_Host *instance,
-                               unsigned char *src, int len)
-{
-    register unsigned char *s = src;
-    register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET);
-    register int i = len;
-    int ii = 0;
-
-    while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) )
-	 ++ii;
- 
-    outsb( reg, s, i );
-
-    if (inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
-	outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
-	printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
-	    instance->host_no);
-	return -1;
-    }
-    return 0;
-}
-
-#include "NCR5380.c"
-
-static int pas16_release(struct Scsi_Host *shost)
-{
-	if (shost->irq != NO_IRQ)
-		free_irq(shost->irq, shost);
-	NCR5380_exit(shost);
-	scsi_unregister(shost);
-	return 0;
-}
-
-static struct scsi_host_template driver_template = {
-	.name			= "Pro Audio Spectrum-16 SCSI",
-	.detect			= pas16_detect,
-	.release		= pas16_release,
-	.proc_name		= "pas16",
-	.info			= pas16_info,
-	.queuecommand		= pas16_queue_command,
-	.eh_abort_handler	= pas16_abort,
-	.eh_bus_reset_handler	= pas16_bus_reset,
-	.bios_param		= pas16_biosparam,
-	.can_queue		= 32,
-	.this_id		= 7,
-	.sg_tablesize		= SG_ALL,
-	.cmd_per_lun		= 2,
-	.use_clustering		= DISABLE_CLUSTERING,
-	.cmd_size		= NCR5380_CMD_SIZE,
-	.max_sectors		= 128,
-};
-#include "scsi_module.c"
-
-#ifdef MODULE
-module_param(pas16_addr, ushort, 0);
-module_param(pas16_irq, int, 0);
-#endif
-MODULE_LICENSE("GPL");

+ 0 - 121
drivers/scsi/pas16.h

@@ -1,121 +0,0 @@
-/*
- * This driver adapted from Drew Eckhardt's Trantor T128 driver
- *
- * Copyright 1993, Drew Eckhardt
- *	Visionary Computing
- *	(Unix and Linux consulting and custom programming)
- *	drew@colorado.edu
- *      +1 (303) 666-5836
- *
- *  ( Based on T128 - DISTRIBUTION RELEASE 3. ) 
- *
- * Modified to work with the Pro Audio Spectrum/Studio 16
- * by John Weidman.
- *
- *
- * For more information, please consult 
- *
- * Media Vision
- * (510) 770-8600
- * (800) 348-7116
- */
-
-
-#ifndef PAS16_H
-#define PAS16_H
-
-#define PAS16_DEFAULT_BASE_1  0x388
-#define PAS16_DEFAULT_BASE_2  0x384
-#define PAS16_DEFAULT_BASE_3  0x38c
-#define PAS16_DEFAULT_BASE_4  0x288
-
-#define PAS16_DEFAULT_BOARD_1_IRQ 10
-#define PAS16_DEFAULT_BOARD_2_IRQ 12
-#define PAS16_DEFAULT_BOARD_3_IRQ 14
-#define PAS16_DEFAULT_BOARD_4_IRQ 15
-
-
-/*
- * The Pro Audio Spectrum boards are I/O mapped. They use a Zilog 5380
- * SCSI controller, which is the equivalent of NCR's 5380.  "Pseudo-DMA"
- * architecture is used, where a PAL drives the DMA signals on the 5380
- * allowing fast, blind transfers with proper handshaking. 
- */
-
-
-/* The Time-out Counter register is used to safe-guard against a stuck
- * bus (in the case of RDY driven handshake) or a stuck byte (if 16-Bit
- * DMA conversion is used).  The counter uses a 28.224MHz clock
- * divided by 14 as its clock source.  In the case of a stuck byte in
- * the holding register, an interrupt is generated (and mixed with the
- * one with the drive) using the CD-ROM interrupt pointer.
- */
- 
-#define P_TIMEOUT_COUNTER_REG	0x4000
-#define P_TC_DISABLE	0x80	/* Set to 0 to enable timeout int. */
-				/* Bits D6-D0 contain timeout count */
-
-
-#define P_TIMEOUT_STATUS_REG_OFFSET	0x4001
-#define P_TS_TIM		0x80	/* check timeout status */
-					/* Bits D6-D4 N/U */
-#define P_TS_ARM_DRQ_INT	0x08	/* Arm DRQ Int.  When set high,
-					 * the next rising edge will
-					 * cause a CD-ROM interrupt.
-					 * When set low, the interrupt
-					 * will be cleared.  There is
-					 * no status available for
-					 * this interrupt.
-					 */
-#define P_TS_ENABLE_TO_ERR_INTERRUPT	/* Enable timeout error int. */
-#define P_TS_ENABLE_WAIT		/* Enable Wait */
-
-#define P_TS_CT			0x01	/* clear timeout. Note: writing
-					 * to this register clears the
-					 * timeout error int. or status
-					 */
-
-
-/*
- * The data register reads/writes to/from the 5380 in pseudo-DMA mode
- */ 
-
-#define P_DATA_REG_OFFSET	0x5c00	/* rw */
-
-#define P_STATUS_REG_OFFSET	0x5c01	/* ro */
-#define P_ST_RDY		0x80	/* 5380 DDRQ Status */
-
-#define P_IRQ_STATUS		0x5c03
-#define P_IS_IRQ		0x80	/* DIRQ status */
-
-#define PCB_CONFIG 0x803
-#define MASTER_ADDRESS_PTR 0x9a01  /* Fixed position - no relo */
-#define SYS_CONFIG_4 0x8003
-#define WAIT_STATE 0xbc00
-#define OPERATION_MODE_1 0xec03
-#define IO_CONFIG_3 0xf002
-
-#define NCR5380_implementation_fields /* none */
-
-#define PAS16_io_port(reg) (instance->io_port + pas16_offset[(reg)])
-
-#define NCR5380_read(reg) ( inb(PAS16_io_port(reg)) )
-#define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) )
-
-#define NCR5380_dma_xfer_len(instance, cmd, phase)	(cmd->transfersize)
-#define NCR5380_dma_recv_setup		pas16_pread
-#define NCR5380_dma_send_setup		pas16_pwrite
-#define NCR5380_dma_residual(instance)	(0)
-
-#define NCR5380_intr pas16_intr
-#define NCR5380_queue_command pas16_queue_command
-#define NCR5380_abort pas16_abort
-#define NCR5380_bus_reset pas16_bus_reset
-#define NCR5380_info pas16_info
-
-/* 15 14 12 10 7 5 3 
-   1101 0100 1010 1000 */
-   
-#define PAS16_IRQS 0xd4a8 
-
-#endif /* PAS16_H */

+ 2 - 2
drivers/scsi/pm8001/pm8001_hwi.c

@@ -4492,8 +4492,8 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
  * @num: the inbound queue number
  * @num: the inbound queue number
  * @phy_id: the phy id which we wanted to start up.
  * @phy_id: the phy id which we wanted to start up.
  */
  */
-int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
-	u8 phy_id)
+static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
+				    u8 phy_id)
 {
 {
 	struct phy_stop_req payload;
 	struct phy_stop_req payload;
 	struct inbound_queue_table *circularQ;
 	struct inbound_queue_table *circularQ;

+ 1 - 1
drivers/scsi/pm8001/pm8001_sas.c

@@ -527,7 +527,7 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
   * pm8001_alloc_dev - find a empty pm8001_device
   * pm8001_alloc_dev - find a empty pm8001_device
   * @pm8001_ha: our hba card information
   * @pm8001_ha: our hba card information
   */
   */
-struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
+static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
 {
 {
 	u32 dev;
 	u32 dev;
 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
 	for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {

+ 4 - 4
drivers/scsi/pmcraid.c

@@ -306,7 +306,7 @@ static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
  * Return Value
  * Return Value
  *	 None
  *	 None
  */
  */
-void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
+static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
 {
 {
 	struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
 	struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
 	dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
 	dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
@@ -401,7 +401,7 @@ static struct pmcraid_cmd *pmcraid_get_free_cmd(
  * Return Value:
  * Return Value:
  *	nothing
  *	nothing
  */
  */
-void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
+static void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
 {
 {
 	struct pmcraid_instance *pinstance = cmd->drv_inst;
 	struct pmcraid_instance *pinstance = cmd->drv_inst;
 	unsigned long lock_flags;
 	unsigned long lock_flags;
@@ -1710,7 +1710,7 @@ static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
  * @ioasc: ioasc code
  * @ioasc: ioasc code
  * @cmd: pointer to command that resulted in 'ioasc'
  * @cmd: pointer to command that resulted in 'ioasc'
  */
  */
-void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
+static void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
 {
 {
 	struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
 	struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
 
 
@@ -3137,7 +3137,7 @@ static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
  *   returns pointer pmcraid_ioadl_desc, initialized to point to internal
  *   returns pointer pmcraid_ioadl_desc, initialized to point to internal
  *   or external IOADLs
  *   or external IOADLs
  */
  */
-struct pmcraid_ioadl_desc *
+static struct pmcraid_ioadl_desc *
 pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
 pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
 {
 {
 	struct pmcraid_ioadl_desc *ioadl;
 	struct pmcraid_ioadl_desc *ioadl;

+ 0 - 10
drivers/scsi/qla2xxx/qla_def.h

@@ -278,16 +278,6 @@
 struct req_que;
 struct req_que;
 struct qla_tgt_sess;
 struct qla_tgt_sess;
 
 
-/*
- * (sd.h is not exported, hence local inclusion)
- * Data Integrity Field tuple.
- */
-struct sd_dif_tuple {
-	__be16 guard_tag;	/* Checksum */
-	__be16 app_tag;		/* Opaque storage */
-	__be32 ref_tag;		/* Target LBA or indirect LBA */
-};
-
 /*
 /*
  * SCSI Request Block
  * SCSI Request Block
  */
  */

+ 1 - 1
drivers/scsi/qla2xxx/qla_isr.c

@@ -1828,7 +1828,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
 		if (scsi_prot_sg_count(cmd)) {
 		if (scsi_prot_sg_count(cmd)) {
 			uint32_t i, j = 0, k = 0, num_ent;
 			uint32_t i, j = 0, k = 0, num_ent;
 			struct scatterlist *sg;
 			struct scatterlist *sg;
-			struct sd_dif_tuple *spt;
+			struct t10_pi_tuple *spt;
 
 
 			/* Patch the corresponding protection tags */
 			/* Patch the corresponding protection tags */
 			scsi_for_each_prot_sg(cmd, sg,
 			scsi_for_each_prot_sg(cmd, sg,

+ 9 - 9
drivers/scsi/qla2xxx/qla_os.c

@@ -899,12 +899,12 @@ qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
 	struct qla_hw_data *ha = vha->hw;
 	struct qla_hw_data *ha = vha->hw;
 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
 
-	while (((qla2x00_reset_active(vha)) || ha->dpc_active ||
-	    ha->flags.mbox_busy) ||
-		test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
-		test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
-			if (test_bit(UNLOADING, &base_vha->dpc_flags))
-				break;
+	while ((qla2x00_reset_active(vha) || ha->dpc_active ||
+		ha->flags.mbox_busy) ||
+	       test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
+	       test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
+		if (test_bit(UNLOADING, &base_vha->dpc_flags))
+			break;
 		msleep(1000);
 		msleep(1000);
 	}
 	}
 }
 }
@@ -4694,7 +4694,7 @@ retry_unlock:
 			qla83xx_wait_logic();
 			qla83xx_wait_logic();
 			retry++;
 			retry++;
 			ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
 			ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
-			    "Failed to release IDC lock, retyring=%d\n", retry);
+			    "Failed to release IDC lock, retrying=%d\n", retry);
 			goto retry_unlock;
 			goto retry_unlock;
 		}
 		}
 	} else if (retry < 10) {
 	} else if (retry < 10) {
@@ -4702,7 +4702,7 @@ retry_unlock:
 		qla83xx_wait_logic();
 		qla83xx_wait_logic();
 		retry++;
 		retry++;
 		ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
 		ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
-		    "Failed to read drv-lockid, retyring=%d\n", retry);
+		    "Failed to read drv-lockid, retrying=%d\n", retry);
 		goto retry_unlock;
 		goto retry_unlock;
 	}
 	}
 
 
@@ -4718,7 +4718,7 @@ retry_unlock2:
 			qla83xx_wait_logic();
 			qla83xx_wait_logic();
 			retry++;
 			retry++;
 			ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
 			ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
-			    "Failed to release IDC lock, retyring=%d\n", retry);
+			    "Failed to release IDC lock, retrying=%d\n", retry);
 			goto retry_unlock2;
 			goto retry_unlock2;
 		}
 		}
 	}
 	}

+ 1 - 1
drivers/scsi/qla4xxx/ql4_nx.c

@@ -1843,7 +1843,7 @@ static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha,
 	return rval;
 	return rval;
 }
 }
 
 
-uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
+static uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
 				uint32_t addr3, uint32_t mask, uint32_t addr,
 				uint32_t addr3, uint32_t mask, uint32_t addr,
 				uint32_t *data_ptr)
 				uint32_t *data_ptr)
 {
 {

+ 27 - 27
drivers/scsi/scsi_debug.c

@@ -42,6 +42,7 @@
 #include <linux/atomic.h>
 #include <linux/atomic.h>
 #include <linux/hrtimer.h>
 #include <linux/hrtimer.h>
 #include <linux/uuid.h>
 #include <linux/uuid.h>
+#include <linux/t10-pi.h>
 
 
 #include <net/checksum.h>
 #include <net/checksum.h>
 
 
@@ -627,7 +628,7 @@ static LIST_HEAD(sdebug_host_list);
 static DEFINE_SPINLOCK(sdebug_host_list_lock);
 static DEFINE_SPINLOCK(sdebug_host_list_lock);
 
 
 static unsigned char *fake_storep;	/* ramdisk storage */
 static unsigned char *fake_storep;	/* ramdisk storage */
-static struct sd_dif_tuple *dif_storep;	/* protection info */
+static struct t10_pi_tuple *dif_storep;	/* protection info */
 static void *map_storep;		/* provisioning map */
 static void *map_storep;		/* provisioning map */
 
 
 static unsigned long map_size;
 static unsigned long map_size;
@@ -682,7 +683,7 @@ static void *fake_store(unsigned long long lba)
 	return fake_storep + lba * sdebug_sector_size;
 	return fake_storep + lba * sdebug_sector_size;
 }
 }
 
 
-static struct sd_dif_tuple *dif_store(sector_t sector)
+static struct t10_pi_tuple *dif_store(sector_t sector)
 {
 {
 	sector = sector_div(sector, sdebug_store_sectors);
 	sector = sector_div(sector, sdebug_store_sectors);
 
 
@@ -1349,7 +1350,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 		} else if (0x86 == cmd[2]) { /* extended inquiry */
 		} else if (0x86 == cmd[2]) { /* extended inquiry */
 			arr[1] = cmd[2];	/*sanity */
 			arr[1] = cmd[2];	/*sanity */
 			arr[3] = 0x3c;	/* number of following entries */
 			arr[3] = 0x3c;	/* number of following entries */
-			if (sdebug_dif == SD_DIF_TYPE3_PROTECTION)
+			if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
 				arr[4] = 0x4;	/* SPT: GRD_CHK:1 */
 			else if (have_dif_prot)
 			else if (have_dif_prot)
 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
 				arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
@@ -2430,7 +2431,7 @@ static __be16 dif_compute_csum(const void *buf, int len)
 	return csum;
 	return csum;
 }
 }
 
 
-static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
+static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
 		      sector_t sector, u32 ei_lba)
 		      sector_t sector, u32 ei_lba)
 {
 {
 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
 	__be16 csum = dif_compute_csum(data, sdebug_sector_size);
@@ -2442,13 +2443,13 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
 			be16_to_cpu(csum));
 			be16_to_cpu(csum));
 		return 0x01;
 		return 0x01;
 	}
 	}
-	if (sdebug_dif == SD_DIF_TYPE1_PROTECTION &&
+	if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
 	    be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
 		pr_err("REF check failed on sector %lu\n",
 		pr_err("REF check failed on sector %lu\n",
 			(unsigned long)sector);
 			(unsigned long)sector);
 		return 0x03;
 		return 0x03;
 	}
 	}
-	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
+	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
 	    be32_to_cpu(sdt->ref_tag) != ei_lba) {
 		pr_err("REF check failed on sector %lu\n",
 		pr_err("REF check failed on sector %lu\n",
 			(unsigned long)sector);
 			(unsigned long)sector);
@@ -2504,7 +2505,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
 			    unsigned int sectors, u32 ei_lba)
 			    unsigned int sectors, u32 ei_lba)
 {
 {
 	unsigned int i;
 	unsigned int i;
-	struct sd_dif_tuple *sdt;
+	struct t10_pi_tuple *sdt;
 	sector_t sector;
 	sector_t sector;
 
 
 	for (i = 0; i < sectors; i++, ei_lba++) {
 	for (i = 0; i < sectors; i++, ei_lba++) {
@@ -2580,13 +2581,13 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 		break;
 		break;
 	}
 	}
 	if (unlikely(have_dif_prot && check_prot)) {
 	if (unlikely(have_dif_prot && check_prot)) {
-		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
+		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
 		    (cmd[1] & 0xe0)) {
 		    (cmd[1] & 0xe0)) {
 			mk_sense_invalid_opcode(scp);
 			mk_sense_invalid_opcode(scp);
 			return check_condition_result;
 			return check_condition_result;
 		}
 		}
-		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
-		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
+		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
+		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
 		    (cmd[1] & 0xe0) == 0)
 		    (cmd[1] & 0xe0) == 0)
 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
 			sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
 				    "to DIF device\n");
 				    "to DIF device\n");
@@ -2696,7 +2697,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
 			     unsigned int sectors, u32 ei_lba)
 			     unsigned int sectors, u32 ei_lba)
 {
 {
 	int ret;
 	int ret;
-	struct sd_dif_tuple *sdt;
+	struct t10_pi_tuple *sdt;
 	void *daddr;
 	void *daddr;
 	sector_t sector = start_sec;
 	sector_t sector = start_sec;
 	int ppage_offset;
 	int ppage_offset;
@@ -2722,7 +2723,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
 		}
 		}
 
 
 		for (ppage_offset = 0; ppage_offset < piter.length;
 		for (ppage_offset = 0; ppage_offset < piter.length;
-		     ppage_offset += sizeof(struct sd_dif_tuple)) {
+		     ppage_offset += sizeof(struct t10_pi_tuple)) {
 			/* If we're at the end of the current
 			/* If we're at the end of the current
 			 * data page advance to the next one
 			 * data page advance to the next one
 			 */
 			 */
@@ -2893,13 +2894,13 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
 		break;
 		break;
 	}
 	}
 	if (unlikely(have_dif_prot && check_prot)) {
 	if (unlikely(have_dif_prot && check_prot)) {
-		if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
+		if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
 		    (cmd[1] & 0xe0)) {
 		    (cmd[1] & 0xe0)) {
 			mk_sense_invalid_opcode(scp);
 			mk_sense_invalid_opcode(scp);
 			return check_condition_result;
 			return check_condition_result;
 		}
 		}
-		if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
-		     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
+		if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
+		     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
 		    (cmd[1] & 0xe0) == 0)
 		    (cmd[1] & 0xe0) == 0)
 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
 			sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
 				    "to DIF device\n");
 				    "to DIF device\n");
@@ -3135,13 +3136,13 @@ static int resp_comp_write(struct scsi_cmnd *scp,
 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
 	num = cmd[13];		/* 1 to a maximum of 255 logical blocks */
 	if (0 == num)
 	if (0 == num)
 		return 0;	/* degenerate case, not an error */
 		return 0;	/* degenerate case, not an error */
-	if (sdebug_dif == SD_DIF_TYPE2_PROTECTION &&
+	if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
 	    (cmd[1] & 0xe0)) {
 	    (cmd[1] & 0xe0)) {
 		mk_sense_invalid_opcode(scp);
 		mk_sense_invalid_opcode(scp);
 		return check_condition_result;
 		return check_condition_result;
 	}
 	}
-	if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION ||
-	     sdebug_dif == SD_DIF_TYPE3_PROTECTION) &&
+	if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
+	     sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
 	    (cmd[1] & 0xe0) == 0)
 	    (cmd[1] & 0xe0) == 0)
 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
 		sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
 			    "to DIF device\n");
 			    "to DIF device\n");
@@ -4939,12 +4940,11 @@ static int __init scsi_debug_init(void)
 	}
 	}
 
 
 	switch (sdebug_dif) {
 	switch (sdebug_dif) {
-
-	case SD_DIF_TYPE0_PROTECTION:
+	case T10_PI_TYPE0_PROTECTION:
 		break;
 		break;
-	case SD_DIF_TYPE1_PROTECTION:
-	case SD_DIF_TYPE2_PROTECTION:
-	case SD_DIF_TYPE3_PROTECTION:
+	case T10_PI_TYPE1_PROTECTION:
+	case T10_PI_TYPE2_PROTECTION:
+	case T10_PI_TYPE3_PROTECTION:
 		have_dif_prot = true;
 		have_dif_prot = true;
 		break;
 		break;
 
 
@@ -5026,7 +5026,7 @@ static int __init scsi_debug_init(void)
 	if (sdebug_dix) {
 	if (sdebug_dix) {
 		int dif_size;
 		int dif_size;
 
 
-		dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
+		dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
 		dif_storep = vmalloc(dif_size);
 		dif_storep = vmalloc(dif_size);
 
 
 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
 		pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
@@ -5480,19 +5480,19 @@ static int sdebug_driver_probe(struct device * dev)
 
 
 	switch (sdebug_dif) {
 	switch (sdebug_dif) {
 
 
-	case SD_DIF_TYPE1_PROTECTION:
+	case T10_PI_TYPE1_PROTECTION:
 		hprot = SHOST_DIF_TYPE1_PROTECTION;
 		hprot = SHOST_DIF_TYPE1_PROTECTION;
 		if (sdebug_dix)
 		if (sdebug_dix)
 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
 			hprot |= SHOST_DIX_TYPE1_PROTECTION;
 		break;
 		break;
 
 
-	case SD_DIF_TYPE2_PROTECTION:
+	case T10_PI_TYPE2_PROTECTION:
 		hprot = SHOST_DIF_TYPE2_PROTECTION;
 		hprot = SHOST_DIF_TYPE2_PROTECTION;
 		if (sdebug_dix)
 		if (sdebug_dix)
 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
 			hprot |= SHOST_DIX_TYPE2_PROTECTION;
 		break;
 		break;
 
 
-	case SD_DIF_TYPE3_PROTECTION:
+	case T10_PI_TYPE3_PROTECTION:
 		hprot = SHOST_DIF_TYPE3_PROTECTION;
 		hprot = SHOST_DIF_TYPE3_PROTECTION;
 		if (sdebug_dix)
 		if (sdebug_dix)
 			hprot |= SHOST_DIX_TYPE3_PROTECTION;
 			hprot |= SHOST_DIX_TYPE3_PROTECTION;

+ 2 - 0
drivers/scsi/scsi_priv.h

@@ -86,12 +86,14 @@ extern void scsi_device_unbusy(struct scsi_device *sdev);
 extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
 extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
 extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
 extern void scsi_run_host_queues(struct Scsi_Host *shost);
 extern void scsi_run_host_queues(struct Scsi_Host *shost);
+extern void scsi_requeue_run_queue(struct work_struct *work);
 extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
 extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
 extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
 extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
 extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
 extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
 extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
 extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
 extern int scsi_init_queue(void);
 extern int scsi_init_queue(void);
 extern void scsi_exit_queue(void);
 extern void scsi_exit_queue(void);
+extern void scsi_evt_thread(struct work_struct *work);
 struct request_queue;
 struct request_queue;
 struct request;
 struct request;
 extern struct kmem_cache *scsi_sdb_cache;
 extern struct kmem_cache *scsi_sdb_cache;

+ 0 - 2
drivers/scsi/scsi_scan.c

@@ -217,8 +217,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
 	struct scsi_device *sdev;
 	struct scsi_device *sdev;
 	int display_failure_msg = 1, ret;
 	int display_failure_msg = 1, ret;
 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
-	extern void scsi_evt_thread(struct work_struct *work);
-	extern void scsi_requeue_run_queue(struct work_struct *work);
 
 
 	sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
 	sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
 		       GFP_ATOMIC);
 		       GFP_ATOMIC);

+ 6 - 5
drivers/scsi/sd.c

@@ -52,6 +52,7 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
 #include <linux/pm_runtime.h>
 #include <linux/pr.h>
 #include <linux/pr.h>
+#include <linux/t10-pi.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 #include <asm/unaligned.h>
 #include <asm/unaligned.h>
 
 
@@ -314,7 +315,7 @@ protection_type_store(struct device *dev, struct device_attribute *attr,
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
-	if (val >= 0 && val <= SD_DIF_TYPE3_PROTECTION)
+	if (val >= 0 && val <= T10_PI_TYPE3_PROTECTION)
 		sdkp->protection_type = val;
 		sdkp->protection_type = val;
 
 
 	return count;
 	return count;
@@ -332,7 +333,7 @@ protection_mode_show(struct device *dev, struct device_attribute *attr,
 	dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
 	dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
 	dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
 	dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
 
 
-	if (!dix && scsi_host_dix_capable(sdp->host, SD_DIF_TYPE0_PROTECTION)) {
+	if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
 		dif = 0;
 		dif = 0;
 		dix = 1;
 		dix = 1;
 	}
 	}
@@ -608,7 +609,7 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
 			scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
 			scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
 	}
 	}
 
 
-	if (dif != SD_DIF_TYPE3_PROTECTION) {	/* DIX/DIF Type 0, 1, 2 */
+	if (dif != T10_PI_TYPE3_PROTECTION) {	/* DIX/DIF Type 0, 1, 2 */
 		scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
 		scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
 
 
 		if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
 		if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
@@ -1031,7 +1032,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
 	else
 	else
 		protect = 0;
 		protect = 0;
 
 
-	if (protect && sdkp->protection_type == SD_DIF_TYPE2_PROTECTION) {
+	if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
 		SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
 		SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
 
 
 		if (unlikely(SCpnt->cmnd == NULL)) {
 		if (unlikely(SCpnt->cmnd == NULL)) {
@@ -1997,7 +1998,7 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
 
 
 	type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
 	type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
 
 
-	if (type > SD_DIF_TYPE3_PROTECTION)
+	if (type > T10_PI_TYPE3_PROTECTION)
 		ret = -ENODEV;
 		ret = -ENODEV;
 	else if (scsi_host_dif_capable(sdp->host, type))
 	else if (scsi_host_dif_capable(sdp->host, type))
 		ret = 1;
 		ret = 1;

+ 0 - 30
drivers/scsi/sd.h

@@ -156,27 +156,6 @@ static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t b
 	return blocks * sdev->sector_size;
 	return blocks * sdev->sector_size;
 }
 }
 
 
-/*
- * A DIF-capable target device can be formatted with different
- * protection schemes.  Currently 0 through 3 are defined:
- *
- * Type 0 is regular (unprotected) I/O
- *
- * Type 1 defines the contents of the guard and reference tags
- *
- * Type 2 defines the contents of the guard and reference tags and
- * uses 32-byte commands to seed the latter
- *
- * Type 3 defines the contents of the guard tag only
- */
-
-enum sd_dif_target_protection_types {
-	SD_DIF_TYPE0_PROTECTION = 0x0,
-	SD_DIF_TYPE1_PROTECTION = 0x1,
-	SD_DIF_TYPE2_PROTECTION = 0x2,
-	SD_DIF_TYPE3_PROTECTION = 0x3,
-};
-
 /*
 /*
  * Look up the DIX operation based on whether the command is read or
  * Look up the DIX operation based on whether the command is read or
  * write and whether dix and dif are enabled.
  * write and whether dix and dif are enabled.
@@ -239,15 +218,6 @@ static inline unsigned int sd_prot_flag_mask(unsigned int prot_op)
 	return flag_mask[prot_op];
 	return flag_mask[prot_op];
 }
 }
 
 
-/*
- * Data Integrity Field tuple.
- */
-struct sd_dif_tuple {
-       __be16 guard_tag;	/* Checksum */
-       __be16 app_tag;		/* Opaque storage */
-       __be32 ref_tag;		/* Target LBA or indirect LBA */
-};
-
 #ifdef CONFIG_BLK_DEV_INTEGRITY
 #ifdef CONFIG_BLK_DEV_INTEGRITY
 
 
 extern void sd_dif_config_host(struct scsi_disk *);
 extern void sd_dif_config_host(struct scsi_disk *);

+ 5 - 5
drivers/scsi/sd_dif.c

@@ -60,14 +60,14 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
 
 
 	/* Enable DMA of protection information */
 	/* Enable DMA of protection information */
 	if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) {
 	if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) {
-		if (type == SD_DIF_TYPE3_PROTECTION)
+		if (type == T10_PI_TYPE3_PROTECTION)
 			bi.profile = &t10_pi_type3_ip;
 			bi.profile = &t10_pi_type3_ip;
 		else
 		else
 			bi.profile = &t10_pi_type1_ip;
 			bi.profile = &t10_pi_type1_ip;
 
 
 		bi.flags |= BLK_INTEGRITY_IP_CHECKSUM;
 		bi.flags |= BLK_INTEGRITY_IP_CHECKSUM;
 	} else
 	} else
-		if (type == SD_DIF_TYPE3_PROTECTION)
+		if (type == T10_PI_TYPE3_PROTECTION)
 			bi.profile = &t10_pi_type3_crc;
 			bi.profile = &t10_pi_type3_crc;
 		else
 		else
 			bi.profile = &t10_pi_type1_crc;
 			bi.profile = &t10_pi_type1_crc;
@@ -82,7 +82,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
 		if (!sdkp->ATO)
 		if (!sdkp->ATO)
 			goto out;
 			goto out;
 
 
-		if (type == SD_DIF_TYPE3_PROTECTION)
+		if (type == T10_PI_TYPE3_PROTECTION)
 			bi.tag_size = sizeof(u16) + sizeof(u32);
 			bi.tag_size = sizeof(u16) + sizeof(u32);
 		else
 		else
 			bi.tag_size = sizeof(u16);
 			bi.tag_size = sizeof(u16);
@@ -121,7 +121,7 @@ void sd_dif_prepare(struct scsi_cmnd *scmd)
 
 
 	sdkp = scsi_disk(scmd->request->rq_disk);
 	sdkp = scsi_disk(scmd->request->rq_disk);
 
 
-	if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
+	if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION)
 		return;
 		return;
 
 
 	phys = scsi_prot_ref_tag(scmd);
 	phys = scsi_prot_ref_tag(scmd);
@@ -172,7 +172,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
 
 
 	sdkp = scsi_disk(scmd->request->rq_disk);
 	sdkp = scsi_disk(scmd->request->rq_disk);
 
 
-	if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
+	if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION || good_bytes == 0)
 		return;
 		return;
 
 
 	intervals = good_bytes / scsi_prot_interval(scmd);
 	intervals = good_bytes / scsi_prot_interval(scmd);

+ 5 - 15
drivers/scsi/sg.c

@@ -79,18 +79,7 @@ static void sg_proc_cleanup(void);
  */
  */
 #define SG_MAX_CDB_SIZE 252
 #define SG_MAX_CDB_SIZE 252
 
 
-/*
- * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
- * Then when using 32 bit integers x * m may overflow during the calculation.
- * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
- * calculates the same, but prevents the overflow when both m and d
- * are "small" numbers (like HZ and USER_HZ).
- * Of course an overflow is inavoidable if the result of muldiv doesn't fit
- * in 32 bits.
- */
-#define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
-
-#define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
+#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
 
 
 int sg_big_buff = SG_DEF_RESERVED_SIZE;
 int sg_big_buff = SG_DEF_RESERVED_SIZE;
 /* N.B. This variable is readable and writeable via
 /* N.B. This variable is readable and writeable via
@@ -884,10 +873,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
 			return result;
 			return result;
 		if (val < 0)
 		if (val < 0)
 			return -EIO;
 			return -EIO;
-		if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
-		    val = MULDIV (INT_MAX, USER_HZ, HZ);
+		if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ))
+			val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ),
+				    INT_MAX);
 		sfp->timeout_user = val;
 		sfp->timeout_user = val;
-		sfp->timeout = MULDIV (val, HZ, USER_HZ);
+		sfp->timeout = mult_frac(val, HZ, USER_HZ);
 
 
 		return 0;
 		return 0;
 	case SG_GET_TIMEOUT:	/* N.B. User receives timeout as return value */
 	case SG_GET_TIMEOUT:	/* N.B. User receives timeout as return value */

+ 54 - 0
drivers/scsi/smartpqi/Kconfig

@@ -0,0 +1,54 @@
+#
+# Kernel configuration file for the SMARTPQI
+#
+# Copyright (c) 2016 Microsemi Corporation
+# Copyright (c) 2016 PMC-Sierra, Inc.
+#  (mailto:esc.storagedev@microsemi.com)
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2
+# of the License.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# NO WARRANTY
+# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+# solely responsible for determining the appropriateness of using and
+# distributing the Program and assumes all risks associated with its
+# exercise of rights under this Agreement, including but not limited to
+# the risks and costs of program errors, damage to or loss of data,
+# programs or equipment, and unavailability or interruption of operations.
+
+# DISCLAIMER OF LIABILITY
+# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+config SCSI_SMARTPQI
+	tristate "Microsemi PQI Driver"
+	depends on PCI && SCSI && !S390
+	select SCSI_SAS_ATTRS
+	select RAID_ATTRS
+	---help---
+	This driver supports Microsemi PQI controllers.
+
+	<http://www.microsemi.com>
+
+	To compile this driver as a module, choose M here: the
+	module will be called smartpqi.
+
+        Note: the aacraid driver will not manage a smartpqi
+              controller. You need to enable smartpqi for smartpqi
+              controllers. For more information, please see
+              Documentation/scsi/smartpqi.txt

+ 3 - 0
drivers/scsi/smartpqi/Makefile

@@ -0,0 +1,3 @@
+ccflags-y += -I.
+obj-m		+= smartpqi.o
+smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o

+ 1136 - 0
drivers/scsi/smartpqi/smartpqi.h

@@ -0,0 +1,1136 @@
+/*
+ *    driver for Microsemi PQI-based storage controllers
+ *    Copyright (c) 2016 Microsemi Corporation
+ *    Copyright (c) 2016 PMC-Sierra, Inc.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; version 2 of the License.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *
+ */
+
+#if !defined(_SMARTPQI_H)
+#define _SMARTPQI_H
+
+#pragma pack(1)
+
+#define PQI_DEVICE_SIGNATURE	"PQI DREG"
+
+/* This structure is defined by the PQI specification. */
+struct pqi_device_registers {
+	__le64	signature;
+	u8	function_and_status_code;
+	u8	reserved[7];
+	u8	max_admin_iq_elements;
+	u8	max_admin_oq_elements;
+	u8	admin_iq_element_length;	/* in 16-byte units */
+	u8	admin_oq_element_length;	/* in 16-byte units */
+	__le16	max_reset_timeout;		/* in 100-millisecond units */
+	u8	reserved1[2];
+	__le32	legacy_intx_status;
+	__le32	legacy_intx_mask_set;
+	__le32	legacy_intx_mask_clear;
+	u8	reserved2[28];
+	__le32	device_status;
+	u8	reserved3[4];
+	__le64	admin_iq_pi_offset;
+	__le64	admin_oq_ci_offset;
+	__le64	admin_iq_element_array_addr;
+	__le64	admin_oq_element_array_addr;
+	__le64	admin_iq_ci_addr;
+	__le64	admin_oq_pi_addr;
+	u8	admin_iq_num_elements;
+	u8	admin_oq_num_elements;
+	__le16	admin_queue_int_msg_num;
+	u8	reserved4[4];
+	__le32	device_error;
+	u8	reserved5[4];
+	__le64	error_details;
+	__le32	device_reset;
+	__le32	power_action;
+	u8	reserved6[104];
+};
+
+/*
+ * controller registers
+ *
+ * These are defined by the PMC implementation.
+ *
+ * Some registers (those named sis_*) are only used when in
+ * legacy SIS mode before we transition the controller into
+ * PQI mode.  There are a number of other SIS mode registers,
+ * but we don't use them, so only the SIS registers that we
+ * care about are defined here.  The offsets mentioned in the
+ * comments are the offsets from the PCIe BAR 0.
+ */
+struct pqi_ctrl_registers {
+	u8	reserved[0x20];
+	__le32	sis_host_to_ctrl_doorbell;		/* 20h */
+	u8	reserved1[0x34 - (0x20 + sizeof(__le32))];
+	__le32	sis_interrupt_mask;			/* 34h */
+	u8	reserved2[0x9c - (0x34 + sizeof(__le32))];
+	__le32	sis_ctrl_to_host_doorbell;		/* 9Ch */
+	u8	reserved3[0xa0 - (0x9c + sizeof(__le32))];
+	__le32	sis_ctrl_to_host_doorbell_clear;	/* A0h */
+	u8	reserved4[0xb0 - (0xa0 + sizeof(__le32))];
+	__le32	sis_driver_scratch;			/* B0h */
+	u8	reserved5[0xbc - (0xb0 + sizeof(__le32))];
+	__le32	sis_firmware_status;			/* BCh */
+	u8	reserved6[0x1000 - (0xbc + sizeof(__le32))];
+	__le32	sis_mailbox[8];				/* 1000h */
+	u8	reserved7[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
+	/*
+	 * The PQI spec states that the PQI registers should be at
+	 * offset 0 from the PCIe BAR 0.  However, we can't map
+	 * them at offset 0 because that would break compatibility
+	 * with the SIS registers.  So we map them at offset 4000h.
+	 */
+	struct pqi_device_registers pqi_registers;	/* 4000h */
+};
+
+#define PQI_DEVICE_REGISTERS_OFFSET	0x4000
+
+enum pqi_io_path {
+	RAID_PATH = 0,
+	AIO_PATH = 1
+};
+
+struct pqi_sg_descriptor {
+	__le64	address;
+	__le32	length;
+	__le32	flags;
+};
+
+/* manifest constants for the flags field of pqi_sg_descriptor */
+#define CISS_SG_LAST	0x40000000
+#define CISS_SG_CHAIN	0x80000000
+
+struct pqi_iu_header {
+	u8	iu_type;
+	u8	reserved;
+	__le16	iu_length;	/* in bytes - does not include the length */
+				/* of this header */
+	__le16	response_queue_id;	/* specifies the OQ where the */
+					/*   response IU is to be delivered */
+	u8	work_area[2];	/* reserved for driver use */
+};
+
+/*
+ * According to the PQI spec, the IU header is only the first 4 bytes of our
+ * pqi_iu_header structure.
+ */
+#define PQI_REQUEST_HEADER_LENGTH	4
+
+struct pqi_general_admin_request {
+	struct pqi_iu_header header;
+	__le16	request_id;
+	u8	function_code;
+	union {
+		struct {
+			u8	reserved[33];
+			__le32	buffer_length;
+			struct pqi_sg_descriptor sg_descriptor;
+		} report_device_capability;
+
+		struct {
+			u8	reserved;
+			__le16	queue_id;
+			u8	reserved1[2];
+			__le64	element_array_addr;
+			__le64	ci_addr;
+			__le16	num_elements;
+			__le16	element_length;
+			u8	queue_protocol;
+			u8	reserved2[23];
+			__le32	vendor_specific;
+		} create_operational_iq;
+
+		struct {
+			u8	reserved;
+			__le16	queue_id;
+			u8	reserved1[2];
+			__le64	element_array_addr;
+			__le64	pi_addr;
+			__le16	num_elements;
+			__le16	element_length;
+			u8	queue_protocol;
+			u8	reserved2[3];
+			__le16	int_msg_num;
+			__le16	coalescing_count;
+			__le32	min_coalescing_time;
+			__le32	max_coalescing_time;
+			u8	reserved3[8];
+			__le32	vendor_specific;
+		} create_operational_oq;
+
+		struct {
+			u8	reserved;
+			__le16	queue_id;
+			u8	reserved1[50];
+		} delete_operational_queue;
+
+		struct {
+			u8	reserved;
+			__le16	queue_id;
+			u8	reserved1[46];
+			__le32	vendor_specific;
+		} change_operational_iq_properties;
+
+	} data;
+};
+
+struct pqi_general_admin_response {
+	struct pqi_iu_header header;
+	__le16	request_id;
+	u8	function_code;
+	u8	status;
+	union {
+		struct {
+			u8	status_descriptor[4];
+			__le64	iq_pi_offset;
+			u8	reserved[40];
+		} create_operational_iq;
+
+		struct {
+			u8	status_descriptor[4];
+			__le64	oq_ci_offset;
+			u8	reserved[40];
+		} create_operational_oq;
+	} data;
+};
+
+struct pqi_iu_layer_descriptor {
+	u8	inbound_spanning_supported : 1;
+	u8	reserved : 7;
+	u8	reserved1[5];
+	__le16	max_inbound_iu_length;
+	u8	outbound_spanning_supported : 1;
+	u8	reserved2 : 7;
+	u8	reserved3[5];
+	__le16	max_outbound_iu_length;
+};
+
+struct pqi_device_capability {
+	__le16	data_length;
+	u8	reserved[6];
+	u8	iq_arbitration_priority_support_bitmask;
+	u8	maximum_aw_a;
+	u8	maximum_aw_b;
+	u8	maximum_aw_c;
+	u8	max_arbitration_burst : 3;
+	u8	reserved1 : 4;
+	u8	iqa : 1;
+	u8	reserved2[2];
+	u8	iq_freeze : 1;
+	u8	reserved3 : 7;
+	__le16	max_inbound_queues;
+	__le16	max_elements_per_iq;
+	u8	reserved4[4];
+	__le16	max_iq_element_length;
+	__le16	min_iq_element_length;
+	u8	reserved5[2];
+	__le16	max_outbound_queues;
+	__le16	max_elements_per_oq;
+	__le16	intr_coalescing_time_granularity;
+	__le16	max_oq_element_length;
+	__le16	min_oq_element_length;
+	u8	reserved6[24];
+	struct pqi_iu_layer_descriptor iu_layer_descriptors[32];
+};
+
+#define PQI_MAX_EMBEDDED_SG_DESCRIPTORS		4
+
+struct pqi_raid_path_request {
+	struct pqi_iu_header header;
+	__le16	request_id;
+	__le16	nexus_id;
+	__le32	buffer_length;
+	u8	lun_number[8];
+	__le16	protocol_specific;
+	u8	data_direction : 2;
+	u8	partial : 1;
+	u8	reserved1 : 4;
+	u8	fence : 1;
+	__le16	error_index;
+	u8	reserved2;
+	u8	task_attribute : 3;
+	u8	command_priority : 4;
+	u8	reserved3 : 1;
+	u8	reserved4 : 2;
+	u8	additional_cdb_bytes_usage : 3;
+	u8	reserved5 : 3;
+	u8	cdb[32];
+	struct pqi_sg_descriptor
+		sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+};
+
+struct pqi_aio_path_request {
+	struct pqi_iu_header header;
+	__le16	request_id;
+	u8	reserved1[2];
+	__le32	nexus_id;
+	__le32	buffer_length;
+	u8	data_direction : 2;
+	u8	partial : 1;
+	u8	memory_type : 1;
+	u8	fence : 1;
+	u8	encryption_enable : 1;
+	u8	reserved2 : 2;
+	u8	task_attribute : 3;
+	u8	command_priority : 4;
+	u8	reserved3 : 1;
+	__le16	data_encryption_key_index;
+	__le32	encrypt_tweak_lower;
+	__le32	encrypt_tweak_upper;
+	u8	cdb[16];
+	__le16	error_index;
+	u8	num_sg_descriptors;
+	u8	cdb_length;
+	u8	lun_number[8];
+	u8	reserved4[4];
+	struct pqi_sg_descriptor
+		sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
+};
+
+struct pqi_io_response {
+	struct pqi_iu_header header;
+	__le16	request_id;
+	__le16	error_index;
+	u8	reserved2[4];
+};
+
+struct pqi_general_management_request {
+	struct pqi_iu_header header;
+	__le16	request_id;
+	union {
+		struct {
+			u8	reserved[2];
+			__le32	buffer_length;
+			struct pqi_sg_descriptor sg_descriptors[3];
+		} report_event_configuration;
+
+		struct {
+			__le16	global_event_oq_id;
+			__le32	buffer_length;
+			struct pqi_sg_descriptor sg_descriptors[3];
+		} set_event_configuration;
+	} data;
+};
+
+struct pqi_event_descriptor {
+	u8	event_type;
+	u8	reserved;
+	__le16	oq_id;
+};
+
+struct pqi_event_config {
+	u8	reserved[2];
+	u8	num_event_descriptors;
+	u8	reserved1;
+	struct pqi_event_descriptor descriptors[1];
+};
+
+#define PQI_MAX_EVENT_DESCRIPTORS	255
+
+struct pqi_event_response {
+	struct pqi_iu_header header;
+	u8	event_type;
+	u8	reserved2 : 7;
+	u8	request_acknowlege : 1;
+	__le16	event_id;
+	__le32	additional_event_id;
+	u8	data[16];
+};
+
+struct pqi_event_acknowledge_request {
+	struct pqi_iu_header header;
+	u8	event_type;
+	u8	reserved2;
+	__le16	event_id;
+	__le32	additional_event_id;
+};
+
+struct pqi_task_management_request {
+	struct pqi_iu_header header;
+	__le16	request_id;
+	__le16	nexus_id;
+	u8	reserved[4];
+	u8	lun_number[8];
+	__le16	protocol_specific;
+	__le16	outbound_queue_id_to_manage;
+	__le16	request_id_to_manage;
+	u8	task_management_function;
+	u8	reserved2 : 7;
+	u8	fence : 1;
+};
+
+#define SOP_TASK_MANAGEMENT_LUN_RESET	0x8
+
+struct pqi_task_management_response {
+	struct pqi_iu_header header;
+	__le16	request_id;
+	__le16	nexus_id;
+	u8	additional_response_info[3];
+	u8	response_code;
+};
+
+struct pqi_aio_error_info {
+	u8	status;
+	u8	service_response;
+	u8	data_present;
+	u8	reserved;
+	__le32	residual_count;
+	__le16	data_length;
+	__le16	reserved1;
+	u8	data[256];
+};
+
+struct pqi_raid_error_info {
+	u8	data_in_result;
+	u8	data_out_result;
+	u8	reserved[3];
+	u8	status;
+	__le16	status_qualifier;
+	__le16	sense_data_length;
+	__le16	response_data_length;
+	__le32	data_in_transferred;
+	__le32	data_out_transferred;
+	u8	data[256];
+};
+
+#define PQI_REQUEST_IU_TASK_MANAGEMENT			0x13
+#define PQI_REQUEST_IU_RAID_PATH_IO			0x14
+#define PQI_REQUEST_IU_AIO_PATH_IO			0x15
+#define PQI_REQUEST_IU_GENERAL_ADMIN			0x60
+#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG	0x72
+#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG		0x73
+#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT		0xf6
+
+#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT		0x81
+#define PQI_RESPONSE_IU_TASK_MANAGEMENT			0x93
+#define PQI_RESPONSE_IU_GENERAL_ADMIN			0xe0
+#define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS		0xf0
+#define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS		0xf1
+#define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR		0xf2
+#define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR		0xf3
+#define PQI_RESPONSE_IU_AIO_PATH_DISABLED		0xf4
+#define PQI_RESPONSE_IU_VENDOR_EVENT			0xf5
+
+#define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY	0x0
+#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ			0x10
+#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ			0x11
+#define PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ			0x12
+#define PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ			0x13
+#define PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY		0x14
+
+#define PQI_GENERAL_ADMIN_STATUS_SUCCESS	0x0
+
+#define PQI_IQ_PROPERTY_IS_AIO_QUEUE	0x1
+
+#define PQI_GENERAL_ADMIN_IU_LENGTH		0x3c
+#define PQI_PROTOCOL_SOP			0x0
+
+#define PQI_DATA_IN_OUT_GOOD					0x0
+#define PQI_DATA_IN_OUT_UNDERFLOW				0x1
+#define PQI_DATA_IN_OUT_BUFFER_ERROR				0x40
+#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW				0x41
+#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA		0x42
+#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE			0x43
+#define PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR			0x60
+#define PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT			0x61
+#define PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED		0x62
+#define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED	0x63
+#define PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED			0x64
+#define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST		0x65
+#define PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION			0x66
+#define PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED			0x67
+#define PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ		0x6F
+#define PQI_DATA_IN_OUT_ERROR					0xf0
+#define PQI_DATA_IN_OUT_PROTOCOL_ERROR				0xf1
+#define PQI_DATA_IN_OUT_HARDWARE_ERROR				0xf2
+#define PQI_DATA_IN_OUT_UNSOLICITED_ABORT			0xf3
+#define PQI_DATA_IN_OUT_ABORTED					0xf4
+#define PQI_DATA_IN_OUT_TIMEOUT					0xf5
+
+#define CISS_CMD_STATUS_SUCCESS			0x0
+#define CISS_CMD_STATUS_TARGET_STATUS		0x1
+#define CISS_CMD_STATUS_DATA_UNDERRUN		0x2
+#define CISS_CMD_STATUS_DATA_OVERRUN		0x3
+#define CISS_CMD_STATUS_INVALID			0x4
+#define CISS_CMD_STATUS_PROTOCOL_ERROR		0x5
+#define CISS_CMD_STATUS_HARDWARE_ERROR		0x6
+#define CISS_CMD_STATUS_CONNECTION_LOST		0x7
+#define CISS_CMD_STATUS_ABORTED			0x8
+#define CISS_CMD_STATUS_ABORT_FAILED		0x9
+#define CISS_CMD_STATUS_UNSOLICITED_ABORT	0xa
+#define CISS_CMD_STATUS_TIMEOUT			0xb
+#define CISS_CMD_STATUS_UNABORTABLE		0xc
+#define CISS_CMD_STATUS_TMF			0xd
+#define CISS_CMD_STATUS_AIO_DISABLED		0xe
+
+#define PQI_NUM_EVENT_QUEUE_ELEMENTS	32
+#define PQI_EVENT_OQ_ELEMENT_LENGTH	sizeof(struct pqi_event_response)
+
+#define PQI_EVENT_TYPE_HOTPLUG			0x1
+#define PQI_EVENT_TYPE_HARDWARE			0x2
+#define PQI_EVENT_TYPE_PHYSICAL_DEVICE		0x4
+#define PQI_EVENT_TYPE_LOGICAL_DEVICE		0x5
+#define PQI_EVENT_TYPE_AIO_STATE_CHANGE		0xfd
+#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE	0xfe
+#define PQI_EVENT_TYPE_HEARTBEAT		0xff
+
+#pragma pack()
+
+#define PQI_ERROR_BUFFER_ELEMENT_LENGTH		\
+	sizeof(struct pqi_raid_error_info)
+
+/* these values are based on our implementation */
+#define PQI_ADMIN_IQ_NUM_ELEMENTS		8
+#define PQI_ADMIN_OQ_NUM_ELEMENTS		20
+#define PQI_ADMIN_IQ_ELEMENT_LENGTH		64
+#define PQI_ADMIN_OQ_ELEMENT_LENGTH		64
+
+#define PQI_OPERATIONAL_IQ_ELEMENT_LENGTH	128
+#define PQI_OPERATIONAL_OQ_ELEMENT_LENGTH	16
+
+#define PQI_MIN_MSIX_VECTORS		1
+#define PQI_MAX_MSIX_VECTORS		64
+
+/* these values are defined by the PQI spec */
+#define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE	255
+#define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE	65535
+#define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT	64
+#define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT	16
+#define PQI_ADMIN_INDEX_ALIGNMENT		64
+#define PQI_OPERATIONAL_INDEX_ALIGNMENT		4
+
+#define PQI_MIN_OPERATIONAL_QUEUE_ID		1
+#define PQI_MAX_OPERATIONAL_QUEUE_ID		65535
+
+#define PQI_AIO_SERV_RESPONSE_COMPLETE		0
+#define PQI_AIO_SERV_RESPONSE_FAILURE		1
+#define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE	2
+#define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED	3
+#define PQI_AIO_SERV_RESPONSE_TMF_REJECTED	4
+#define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN	5
+
+#define PQI_AIO_STATUS_IO_ERROR			0x1
+#define PQI_AIO_STATUS_IO_ABORTED		0x2
+#define PQI_AIO_STATUS_NO_PATH_TO_DEVICE	0x3
+#define PQI_AIO_STATUS_INVALID_DEVICE		0x4
+#define PQI_AIO_STATUS_AIO_PATH_DISABLED	0xe
+#define PQI_AIO_STATUS_UNDERRUN			0x51
+#define PQI_AIO_STATUS_OVERRUN			0x75
+
+typedef u32 pqi_index_t;
+
+/* SOP data direction flags */
+#define SOP_NO_DIRECTION_FLAG	0
+#define SOP_WRITE_FLAG		1	/* host writes data to Data-Out */
+					/* buffer */
+#define SOP_READ_FLAG		2	/* host receives data from Data-In */
+					/* buffer */
+#define SOP_BIDIRECTIONAL	3	/* data is transferred from the */
+					/* Data-Out buffer and data is */
+					/* transferred to the Data-In buffer */
+
+#define SOP_TASK_ATTRIBUTE_SIMPLE		0
+#define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE	1
+#define SOP_TASK_ATTRIBUTE_ORDERED		2
+#define SOP_TASK_ATTRIBUTE_ACA			4
+
+#define SOP_TMF_COMPLETE		0x0
+#define SOP_TMF_FUNCTION_SUCCEEDED	0x8
+
+/* additional CDB bytes usage field codes */
+#define SOP_ADDITIONAL_CDB_BYTES_0	0	/* 16-byte CDB */
+#define SOP_ADDITIONAL_CDB_BYTES_4	1	/* 20-byte CDB */
+#define SOP_ADDITIONAL_CDB_BYTES_8	2	/* 24-byte CDB */
+#define SOP_ADDITIONAL_CDB_BYTES_12	3	/* 28-byte CDB */
+#define SOP_ADDITIONAL_CDB_BYTES_16	4	/* 32-byte CDB */
+
+/*
+ * The purpose of this structure is to obtain proper alignment of objects in
+ * an admin queue pair.
+ */
+struct pqi_admin_queues_aligned {
+	__aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
+		u8	iq_element_array[PQI_ADMIN_IQ_ELEMENT_LENGTH]
+					[PQI_ADMIN_IQ_NUM_ELEMENTS];
+	__aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
+		u8	oq_element_array[PQI_ADMIN_OQ_ELEMENT_LENGTH]
+					[PQI_ADMIN_OQ_NUM_ELEMENTS];
+	__aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t iq_ci;
+	__aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t oq_pi;
+};
+
+struct pqi_admin_queues {
+	void		*iq_element_array;
+	void		*oq_element_array;
+	volatile pqi_index_t *iq_ci;
+	volatile pqi_index_t *oq_pi;
+	dma_addr_t	iq_element_array_bus_addr;
+	dma_addr_t	oq_element_array_bus_addr;
+	dma_addr_t	iq_ci_bus_addr;
+	dma_addr_t	oq_pi_bus_addr;
+	__le32 __iomem	*iq_pi;
+	pqi_index_t	iq_pi_copy;
+	__le32 __iomem	*oq_ci;
+	pqi_index_t	oq_ci_copy;
+	struct task_struct *task;
+	u16		int_msg_num;
+};
+
+struct pqi_queue_group {
+	struct pqi_ctrl_info *ctrl_info;	/* backpointer */
+	u16		iq_id[2];
+	u16		oq_id;
+	u16		int_msg_num;
+	void		*iq_element_array[2];
+	void		*oq_element_array;
+	dma_addr_t	iq_element_array_bus_addr[2];
+	dma_addr_t	oq_element_array_bus_addr;
+	__le32 __iomem	*iq_pi[2];
+	pqi_index_t	iq_pi_copy[2];
+	volatile pqi_index_t *iq_ci[2];
+	volatile pqi_index_t *oq_pi;
+	dma_addr_t	iq_ci_bus_addr[2];
+	dma_addr_t	oq_pi_bus_addr;
+	__le32 __iomem	*oq_ci;
+	pqi_index_t	oq_ci_copy;
+	spinlock_t	submit_lock[2];	/* protect submission queue */
+	struct list_head request_list[2];
+};
+
+struct pqi_event_queue {
+	u16		oq_id;
+	u16		int_msg_num;
+	void		*oq_element_array;
+	volatile pqi_index_t *oq_pi;
+	dma_addr_t	oq_element_array_bus_addr;
+	dma_addr_t	oq_pi_bus_addr;
+	__le32 __iomem	*oq_ci;
+	pqi_index_t	oq_ci_copy;
+};
+
+#define PQI_DEFAULT_QUEUE_GROUP		0
+#define PQI_MAX_QUEUE_GROUPS		PQI_MAX_MSIX_VECTORS
+
+struct pqi_encryption_info {
+	u16	data_encryption_key_index;
+	u32	encrypt_tweak_lower;
+	u32	encrypt_tweak_upper;
+};
+
+#define PQI_MAX_OUTSTANDING_REQUESTS	((u32)~0)
+#define PQI_MAX_TRANSFER_SIZE		(4 * 1024U * 1024U)
+
+#define RAID_MAP_MAX_ENTRIES		1024
+
+#define PQI_PHYSICAL_DEVICE_BUS		0
+#define PQI_RAID_VOLUME_BUS		1
+#define PQI_HBA_BUS			2
+#define PQI_MAX_BUS			PQI_HBA_BUS
+
+#pragma pack(1)
+
+struct report_lun_header {
+	__be32	list_length;
+	u8	extended_response;
+	u8	reserved[3];
+};
+
+struct report_log_lun_extended_entry {
+	u8	lunid[8];
+	u8	volume_id[16];
+};
+
+struct report_log_lun_extended {
+	struct report_lun_header header;
+	struct report_log_lun_extended_entry lun_entries[1];
+};
+
+struct report_phys_lun_extended_entry {
+	u8	lunid[8];
+	__be64	wwid;
+	u8	device_type;
+	u8	device_flags;
+	u8	lun_count;	/* number of LUNs in a multi-LUN device */
+	u8	redundant_paths;
+	u32	aio_handle;
+};
+
+/* for device_flags field of struct report_phys_lun_extended_entry */
+#define REPORT_PHYS_LUN_DEV_FLAG_NON_DISK	0x1
+#define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED	0x8
+
+struct report_phys_lun_extended {
+	struct report_lun_header header;
+	struct report_phys_lun_extended_entry lun_entries[1];
+};
+
+struct raid_map_disk_data {
+	u32	aio_handle;
+	u8	xor_mult[2];
+	u8	reserved[2];
+};
+
+/* constants for flags field of RAID map */
+#define RAID_MAP_ENCRYPTION_ENABLED	0x1
+
+struct raid_map {
+	__le32	structure_size;		/* size of entire structure in bytes */
+	__le32	volume_blk_size;	/* bytes / block in the volume */
+	__le64	volume_blk_cnt;		/* logical blocks on the volume */
+	u8	phys_blk_shift;		/* shift factor to convert between */
+					/* units of logical blocks and */
+					/* physical disk blocks */
+	u8	parity_rotation_shift;	/* shift factor to convert between */
+					/* units of logical stripes and */
+					/* physical stripes */
+	__le16	strip_size;		/* blocks used on each disk / stripe */
+	__le64	disk_starting_blk;	/* first disk block used in volume */
+	__le64	disk_blk_cnt;		/* disk blocks used by volume / disk */
+	__le16	data_disks_per_row;	/* data disk entries / row in the map */
+	__le16	metadata_disks_per_row;	/* mirror/parity disk entries / row */
+					/* in the map */
+	__le16	row_cnt;		/* rows in each layout map */
+	__le16	layout_map_count;	/* layout maps (1 map per */
+					/* mirror parity group) */
+	__le16	flags;
+	__le16	data_encryption_key_index;
+	u8	reserved[16];
+	struct raid_map_disk_data disk_data[RAID_MAP_MAX_ENTRIES];
+};
+
+#pragma pack()
+
+#define RAID_CTLR_LUNID		"\0\0\0\0\0\0\0\0"
+
+struct pqi_scsi_dev {
+	int	devtype;		/* as reported by INQUIRY commmand */
+	u8	device_type;		/* as reported by */
+					/* BMIC_IDENTIFY_PHYSICAL_DEVICE */
+					/* only valid for devtype = TYPE_DISK */
+	int	bus;
+	int	target;
+	int	lun;
+	u8	scsi3addr[8];
+	__be64	wwid;
+	u8	volume_id[16];
+	u8	is_physical_device : 1;
+	u8	target_lun_valid : 1;
+	u8	expose_device : 1;
+	u8	no_uld_attach : 1;
+	u8	aio_enabled : 1;	/* only valid for physical disks */
+	u8	device_gone : 1;
+	u8	new_device : 1;
+	u8	keep_device : 1;
+	u8	volume_offline : 1;
+	u8	vendor[8];		/* bytes 8-15 of inquiry data */
+	u8	model[16];		/* bytes 16-31 of inquiry data */
+	u64	sas_address;
+	u8	raid_level;
+	u16	queue_depth;		/* max. queue_depth for this device */
+	u16	advertised_queue_depth;
+	u32	aio_handle;
+	u8	volume_status;
+	u8	active_path_index;
+	u8	path_map;
+	u8	bay;
+	u8	box[8];
+	u16	phys_connector[8];
+	int	offload_configured;	/* I/O accel RAID offload configured */
+	int	offload_enabled;	/* I/O accel RAID offload enabled */
+	int	offload_enabled_pending;
+	int	offload_to_mirror;	/* Send next I/O accelerator RAID */
+					/* offload request to mirror drive. */
+	struct raid_map *raid_map;	/* I/O accelerator RAID map */
+
+	struct pqi_sas_port *sas_port;
+	struct scsi_device *sdev;
+
+	struct list_head scsi_device_list_entry;
+	struct list_head new_device_list_entry;
+	struct list_head add_list_entry;
+	struct list_head delete_list_entry;
+};
+
+/* VPD inquiry pages */
+#define SCSI_VPD_SUPPORTED_PAGES	0x0	/* standard page */
+#define SCSI_VPD_DEVICE_ID		0x83	/* standard page */
+#define CISS_VPD_LV_DEVICE_GEOMETRY	0xc1	/* vendor-specific page */
+#define CISS_VPD_LV_OFFLOAD_STATUS	0xc2	/* vendor-specific page */
+#define CISS_VPD_LV_STATUS		0xc3	/* vendor-specific page */
+
+#define VPD_PAGE	(1 << 8)
+
+#pragma pack(1)
+
+/* structure for CISS_VPD_LV_STATUS */
+struct ciss_vpd_logical_volume_status {
+	u8	peripheral_info;
+	u8	page_code;
+	u8	reserved;
+	u8	page_length;
+	u8	volume_status;
+	u8	reserved2[3];
+	__be32	flags;
+};
+
+#pragma pack()
+
+/* constants for volume_status field of ciss_vpd_logical_volume_status */
+#define CISS_LV_OK					0
+#define CISS_LV_FAILED					1
+#define CISS_LV_NOT_CONFIGURED				2
+#define CISS_LV_DEGRADED				3
+#define CISS_LV_READY_FOR_RECOVERY			4
+#define CISS_LV_UNDERGOING_RECOVERY			5
+#define CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED		6
+#define CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM	7
+#define CISS_LV_HARDWARE_OVERHEATING			8
+#define CISS_LV_HARDWARE_HAS_OVERHEATED			9
+#define CISS_LV_UNDERGOING_EXPANSION			10
+#define CISS_LV_NOT_AVAILABLE				11
+#define CISS_LV_QUEUED_FOR_EXPANSION			12
+#define CISS_LV_DISABLED_SCSI_ID_CONFLICT		13
+#define CISS_LV_EJECTED					14
+#define CISS_LV_UNDERGOING_ERASE			15
+/* state 16 not used */
+#define CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD	17
+#define CISS_LV_UNDERGOING_RPI				18
+#define CISS_LV_PENDING_RPI				19
+#define CISS_LV_ENCRYPTED_NO_KEY			20
+/* state 21 not used */
+#define CISS_LV_UNDERGOING_ENCRYPTION			22
+#define CISS_LV_UNDERGOING_ENCRYPTION_REKEYING		23
+#define CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER	24
+#define CISS_LV_PENDING_ENCRYPTION			25
+#define CISS_LV_PENDING_ENCRYPTION_REKEYING		26
+#define CISS_LV_NOT_SUPPORTED				27
+#define CISS_LV_STATUS_UNAVAILABLE			255
+
+/* constants for flags field of ciss_vpd_logical_volume_status */
+#define CISS_LV_FLAGS_NO_HOST_IO	0x1	/* volume not available for */
+						/* host I/O */
+
+/* for SAS hosts and SAS expanders */
+struct pqi_sas_node {
+	struct device *parent_dev;
+	struct list_head port_list_head;
+};
+
+struct pqi_sas_port {
+	struct list_head port_list_entry;
+	u64	sas_address;
+	struct sas_port *port;
+	int	next_phy_index;
+	struct list_head phy_list_head;
+	struct pqi_sas_node *parent_node;
+	struct sas_rphy *rphy;
+};
+
+struct pqi_sas_phy {
+	struct list_head phy_list_entry;
+	struct sas_phy *phy;
+	struct pqi_sas_port *parent_port;
+	bool	added_to_port;
+};
+
+struct pqi_io_request {
+	atomic_t	refcount;
+	u16		index;
+	void (*io_complete_callback)(struct pqi_io_request *io_request,
+		void *context);
+	void		*context;
+	int		status;
+	struct scsi_cmnd *scmd;
+	void		*error_info;
+	struct pqi_sg_descriptor *sg_chain_buffer;
+	dma_addr_t	sg_chain_buffer_dma_handle;
+	void		*iu;
+	struct list_head request_list_entry;
+};
+
+/* for indexing into the pending_events[] field of struct pqi_ctrl_info */
+#define PQI_EVENT_HEARTBEAT		0
+#define PQI_EVENT_HOTPLUG		1
+#define PQI_EVENT_HARDWARE		2
+#define PQI_EVENT_PHYSICAL_DEVICE	3
+#define PQI_EVENT_LOGICAL_DEVICE	4
+#define PQI_EVENT_AIO_STATE_CHANGE	5
+#define PQI_EVENT_AIO_CONFIG_CHANGE	6
+#define PQI_NUM_SUPPORTED_EVENTS	7
+
+struct pqi_event {
+	bool	pending;
+	u8	event_type;
+	__le16	event_id;
+	__le32	additional_event_id;
+};
+
+#define PQI_RESERVED_IO_SLOTS_LUN_RESET			1
+#define PQI_RESERVED_IO_SLOTS_EVENT_ACK			PQI_NUM_SUPPORTED_EVENTS
+#define PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS	3
+#define PQI_RESERVED_IO_SLOTS				\
+	(PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \
+	PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS)
+
+struct pqi_ctrl_info {
+	unsigned int	ctrl_id;
+	struct pci_dev	*pci_dev;
+	char		firmware_version[11];
+	void __iomem	*iomem_base;
+	struct pqi_ctrl_registers __iomem *registers;
+	struct pqi_device_registers __iomem *pqi_registers;
+	u32		max_sg_entries;
+	u32		config_table_offset;
+	u32		config_table_length;
+	u16		max_inbound_queues;
+	u16		max_elements_per_iq;
+	u16		max_iq_element_length;
+	u16		max_outbound_queues;
+	u16		max_elements_per_oq;
+	u16		max_oq_element_length;
+	u32		max_transfer_size;
+	u32		max_outstanding_requests;
+	u32		max_io_slots;
+	unsigned int	scsi_ml_can_queue;
+	unsigned short	sg_tablesize;
+	unsigned int	max_sectors;
+	u32		error_buffer_length;
+	void		*error_buffer;
+	dma_addr_t	error_buffer_dma_handle;
+	size_t		sg_chain_buffer_length;
+	unsigned int	num_queue_groups;
+	unsigned int	num_active_queue_groups;
+	u16		num_elements_per_iq;
+	u16		num_elements_per_oq;
+	u16		max_inbound_iu_length_per_firmware;
+	u16		max_inbound_iu_length;
+	unsigned int	max_sg_per_iu;
+	void		*admin_queue_memory_base;
+	u32		admin_queue_memory_length;
+	dma_addr_t	admin_queue_memory_base_dma_handle;
+	void		*queue_memory_base;
+	u32		queue_memory_length;
+	dma_addr_t	queue_memory_base_dma_handle;
+	struct pqi_admin_queues admin_queues;
+	struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
+	struct pqi_event_queue event_queue;
+	int		max_msix_vectors;
+	int		num_msix_vectors_enabled;
+	int		num_msix_vectors_initialized;
+	u32		msix_vectors[PQI_MAX_MSIX_VECTORS];
+	void		*intr_data[PQI_MAX_MSIX_VECTORS];
+	int		event_irq;
+	struct Scsi_Host *scsi_host;
+
+	struct mutex	scan_mutex;
+	u8		inbound_spanning_supported : 1;
+	u8		outbound_spanning_supported : 1;
+	u8		pqi_mode_enabled : 1;
+	u8		controller_online : 1;
+	u8		heartbeat_timer_started : 1;
+
+	struct list_head scsi_device_list;
+	spinlock_t	scsi_device_list_lock;
+
+	struct delayed_work rescan_work;
+	struct delayed_work update_time_work;
+
+	struct pqi_sas_node *sas_host;
+	u64		sas_address;
+
+	struct pqi_io_request *io_request_pool;
+	u16		next_io_request_slot;
+
+	struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS];
+	struct work_struct event_work;
+
+	atomic_t	num_interrupts;
+	int		previous_num_interrupts;
+	unsigned int	num_heartbeats_requested;
+	struct timer_list heartbeat_timer;
+
+	struct semaphore sync_request_sem;
+	struct semaphore lun_reset_sem;
+};
+
+enum pqi_ctrl_mode {
+	UNKNOWN,
+	PQI_MODE
+};
+
+/*
+ * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands
+ */
+#define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH	27
+
+/* 0 = no limit */
+#define PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH	0
+
+/* CISS commands */
+#define CISS_READ		0xc0
+#define CISS_REPORT_LOG		0xc2	/* Report Logical LUNs */
+#define CISS_REPORT_PHYS	0xc3	/* Report Physical LUNs */
+#define CISS_GET_RAID_MAP	0xc8
+
+/* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */
+#define CISS_REPORT_LOG_EXTENDED		0x1
+#define CISS_REPORT_PHYS_EXTENDED		0x2
+
+/* BMIC commands */
+#define BMIC_IDENTIFY_CONTROLLER		0x11
+#define BMIC_IDENTIFY_PHYSICAL_DEVICE		0x15
+#define BMIC_READ				0x26
+#define BMIC_WRITE				0x27
+#define BMIC_SENSE_CONTROLLER_PARAMETERS	0x64
+#define BMIC_SENSE_SUBSYSTEM_INFORMATION	0x66
+#define BMIC_WRITE_HOST_WELLNESS		0xa5
+#define BMIC_CACHE_FLUSH			0xc2
+
+#define SA_CACHE_FLUSH				0x01
+
+#define MASKED_DEVICE(lunid)			((lunid)[3] & 0xc0)
+#define CISS_GET_BUS(lunid)			((lunid)[7] & 0x3f)
+#define CISS_GET_LEVEL_2_TARGET(lunid)		((lunid)[6])
+#define CISS_GET_DRIVE_NUMBER(lunid)		\
+	(((CISS_GET_BUS((lunid)) - 1) << 8) +	\
+	CISS_GET_LEVEL_2_TARGET((lunid)))
+
+#define NO_TIMEOUT		((unsigned long) -1)
+
+#pragma pack(1)
+
+struct bmic_identify_controller {
+	u8	configured_logical_drive_count;
+	__le32	configuration_signature;
+	u8	firmware_version[4];
+	u8	reserved[145];
+	__le16	extended_logical_unit_count;
+	u8	reserved1[34];
+	__le16	firmware_build_number;
+	u8	reserved2[100];
+	u8	controller_mode;
+	u8	reserved3[32];
+};
+
+struct bmic_identify_physical_device {
+	u8	scsi_bus;		/* SCSI Bus number on controller */
+	u8	scsi_id;		/* SCSI ID on this bus */
+	__le16	block_size;		/* sector size in bytes */
+	__le32	total_blocks;		/* number for sectors on drive */
+	__le32	reserved_blocks;	/* controller reserved (RIS) */
+	u8	model[40];		/* Physical Drive Model */
+	u8	serial_number[40];	/* Drive Serial Number */
+	u8	firmware_revision[8];	/* drive firmware revision */
+	u8	scsi_inquiry_bits;	/* inquiry byte 7 bits */
+	u8	compaq_drive_stamp;	/* 0 means drive not stamped */
+	u8	last_failure_reason;
+	u8	flags;
+	u8	more_flags;
+	u8	scsi_lun;		/* SCSI LUN for phys drive */
+	u8	yet_more_flags;
+	u8	even_more_flags;
+	__le32	spi_speed_rules;
+	u8	phys_connector[2];	/* connector number on controller */
+	u8	phys_box_on_bus;	/* phys enclosure this drive resides */
+	u8	phys_bay_in_box;	/* phys drv bay this drive resides */
+	__le32	rpm;			/* drive rotational speed in RPM */
+	u8	device_type;		/* type of drive */
+	u8	sata_version;		/* only valid when device_type = */
+					/* BMIC_DEVICE_TYPE_SATA */
+	__le64	big_total_block_count;
+	__le64	ris_starting_lba;
+	__le32	ris_size;
+	u8	wwid[20];
+	u8	controller_phy_map[32];
+	__le16	phy_count;
+	u8	phy_connected_dev_type[256];
+	u8	phy_to_drive_bay_num[256];
+	__le16	phy_to_attached_dev_index[256];
+	u8	box_index;
+	u8	reserved;
+	__le16	extra_physical_drive_flags;
+	u8	negotiated_link_rate[256];
+	u8	phy_to_phy_map[256];
+	u8	redundant_path_present_map;
+	u8	redundant_path_failure_map;
+	u8	active_path_number;
+	__le16	alternate_paths_phys_connector[8];
+	u8	alternate_paths_phys_box_on_port[8];
+	u8	multi_lun_device_lun_count;
+	u8	minimum_good_fw_revision[8];
+	u8	unique_inquiry_bytes[20];
+	u8	current_temperature_degreesC;
+	u8	temperature_threshold_degreesC;
+	u8	max_temperature_degreesC;
+	u8	logical_blocks_per_phys_block_exp;
+	__le16	current_queue_depth_limit;
+	u8	switch_name[10];
+	__le16	switch_port;
+	u8	alternate_paths_switch_name[40];
+	u8	alternate_paths_switch_port[8];
+	__le16	power_on_hours;
+	__le16	percent_endurance_used;
+	u8	drive_authentication;
+	u8	smart_carrier_authentication;
+	u8	smart_carrier_app_fw_version;
+	u8	smart_carrier_bootloader_fw_version;
+	u8	encryption_key_name[64];
+	__le32	misc_drive_flags;
+	__le16	dek_index;
+	u8	padding[112];
+};
+
+#pragma pack()
+
+int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
+void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info);
+int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
+	struct pqi_scsi_dev *device);
+void pqi_remove_sas_device(struct pqi_scsi_dev *device);
+struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
+	struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
+
+extern struct sas_function_template pqi_sas_transport_functions;
+
+#if !defined(readq)
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+	u32 lower32;
+	u32 upper32;
+
+	lower32 = readl(addr);
+	upper32 = readl(addr + 4);
+
+	return ((u64)upper32 << 32) | lower32;
+}
+#endif
+
+#if !defined(writeq)
+#define writeq writeq
+static inline void writeq(u64 value, volatile void __iomem *addr)
+{
+	u32 lower32;
+	u32 upper32;
+
+	lower32 = lower_32_bits(value);
+	upper32 = upper_32_bits(value);
+
+	writel(lower32, addr);
+	writel(upper32, addr + 4);
+}
+#endif
+
+#endif /* _SMARTPQI_H */

+ 6303 - 0
drivers/scsi/smartpqi/smartpqi_init.c

@@ -0,0 +1,6303 @@
+/*
+ *    driver for Microsemi PQI-based storage controllers
+ *    Copyright (c) 2016 Microsemi Corporation
+ *    Copyright (c) 2016 PMC-Sierra, Inc.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; version 2 of the License.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/cciss_ioctl.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_transport_sas.h>
+#include <asm/unaligned.h>
+#include "smartpqi.h"
+#include "smartpqi_sis.h"
+
+#if !defined(BUILD_TIMESTAMP)
+#define BUILD_TIMESTAMP
+#endif
+
+#define DRIVER_VERSION		"0.9.13-370"
+#define DRIVER_MAJOR		0
+#define DRIVER_MINOR		9
+#define DRIVER_RELEASE		13
+#define DRIVER_REVISION		370
+
+#define DRIVER_NAME		"Microsemi PQI Driver (v" DRIVER_VERSION ")"
+#define DRIVER_NAME_SHORT	"smartpqi"
+
+MODULE_AUTHOR("Microsemi");
+MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
+	DRIVER_VERSION);
+MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+#define PQI_ENABLE_MULTI_QUEUE_SUPPORT	0
+
+static char *hpe_branded_controller = "HPE Smart Array Controller";
+static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
+
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
+static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
+static void pqi_scan_start(struct Scsi_Host *shost);
+static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_queue_group *queue_group, enum pqi_io_path path,
+	struct pqi_io_request *io_request);
+static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_iu_header *request, unsigned int flags,
+	struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
+static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
+	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
+	unsigned int cdb_length, struct pqi_queue_group *queue_group,
+	struct pqi_encryption_info *encryption_info);
+
+/* for flags argument to pqi_submit_raid_request_synchronous() */
+#define PQI_SYNC_FLAGS_INTERRUPTABLE	0x1
+
+static struct scsi_transport_template *pqi_sas_transport_template;
+
+static atomic_t pqi_controller_count = ATOMIC_INIT(0);
+
+static int pqi_disable_device_id_wildcards;
+module_param_named(disable_device_id_wildcards,
+	pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(disable_device_id_wildcards,
+	"Disable device ID wildcards.");
+
+static char *raid_levels[] = {
+	"RAID-0",
+	"RAID-4",
+	"RAID-1(1+0)",
+	"RAID-5",
+	"RAID-5+1",
+	"RAID-ADG",
+	"RAID-1(ADM)",
+};
+
+static char *pqi_raid_level_to_string(u8 raid_level)
+{
+	if (raid_level < ARRAY_SIZE(raid_levels))
+		return raid_levels[raid_level];
+
+	return "";
+}
+
+#define SA_RAID_0		0
+#define SA_RAID_4		1
+#define SA_RAID_1		2	/* also used for RAID 10 */
+#define SA_RAID_5		3	/* also used for RAID 50 */
+#define SA_RAID_51		4
+#define SA_RAID_6		5	/* also used for RAID 60 */
+#define SA_RAID_ADM		6	/* also used for RAID 1+0 ADM */
+#define SA_RAID_MAX		SA_RAID_ADM
+#define SA_RAID_UNKNOWN		0xff
+
+static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
+{
+	scmd->scsi_done(scmd);
+}
+
+static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
+{
+	return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
+}
+
+static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
+{
+	void *hostdata = shost_priv(shost);
+
+	return *((struct pqi_ctrl_info **)hostdata);
+}
+
+static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
+{
+	return !device->is_physical_device;
+}
+
+static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+	return !ctrl_info->controller_online;
+}
+
+static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
+{
+	if (ctrl_info->controller_online)
+		if (!sis_is_firmware_running(ctrl_info))
+			pqi_take_ctrl_offline(ctrl_info);
+}
+
+static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
+{
+	return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
+}
+
+static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
+	struct pqi_ctrl_info *ctrl_info)
+{
+	return sis_read_driver_scratch(ctrl_info);
+}
+
+static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
+	enum pqi_ctrl_mode mode)
+{
+	sis_write_driver_scratch(ctrl_info, mode);
+}
+
+#define PQI_RESCAN_WORK_INTERVAL	(10 * HZ)
+
+static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
+{
+	schedule_delayed_work(&ctrl_info->rescan_work,
+		PQI_RESCAN_WORK_INTERVAL);
+}
+
+static int pqi_map_single(struct pci_dev *pci_dev,
+	struct pqi_sg_descriptor *sg_descriptor, void *buffer,
+	size_t buffer_length, int data_direction)
+{
+	dma_addr_t bus_address;
+
+	if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
+		return 0;
+
+	bus_address = pci_map_single(pci_dev, buffer, buffer_length,
+		data_direction);
+	if (pci_dma_mapping_error(pci_dev, bus_address))
+		return -ENOMEM;
+
+	put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
+	put_unaligned_le32(buffer_length, &sg_descriptor->length);
+	put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+
+	return 0;
+}
+
+static void pqi_pci_unmap(struct pci_dev *pci_dev,
+	struct pqi_sg_descriptor *descriptors, int num_descriptors,
+	int data_direction)
+{
+	int i;
+
+	if (data_direction == PCI_DMA_NONE)
+		return;
+
+	for (i = 0; i < num_descriptors; i++)
+		pci_unmap_single(pci_dev,
+			(dma_addr_t)get_unaligned_le64(&descriptors[i].address),
+			get_unaligned_le32(&descriptors[i].length),
+			data_direction);
+}
+
+static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_raid_path_request *request, u8 cmd,
+	u8 *scsi3addr, void *buffer, size_t buffer_length,
+	u16 vpd_page, int *pci_direction)
+{
+	u8 *cdb;
+	int pci_dir;
+
+	memset(request, 0, sizeof(*request));
+
+	request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
+	put_unaligned_le16(offsetof(struct pqi_raid_path_request,
+		sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
+		&request->header.iu_length);
+	put_unaligned_le32(buffer_length, &request->buffer_length);
+	memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
+	request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+	request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
+
+	cdb = request->cdb;
+
+	switch (cmd) {
+	case INQUIRY:
+		request->data_direction = SOP_READ_FLAG;
+		cdb[0] = INQUIRY;
+		if (vpd_page & VPD_PAGE) {
+			cdb[1] = 0x1;
+			cdb[2] = (u8)vpd_page;
+		}
+		cdb[4] = (u8)buffer_length;
+		break;
+	case CISS_REPORT_LOG:
+	case CISS_REPORT_PHYS:
+		request->data_direction = SOP_READ_FLAG;
+		cdb[0] = cmd;
+		if (cmd == CISS_REPORT_PHYS)
+			cdb[1] = CISS_REPORT_PHYS_EXTENDED;
+		else
+			cdb[1] = CISS_REPORT_LOG_EXTENDED;
+		put_unaligned_be32(buffer_length, &cdb[6]);
+		break;
+	case CISS_GET_RAID_MAP:
+		request->data_direction = SOP_READ_FLAG;
+		cdb[0] = CISS_READ;
+		cdb[1] = CISS_GET_RAID_MAP;
+		put_unaligned_be32(buffer_length, &cdb[6]);
+		break;
+	case SA_CACHE_FLUSH:
+		request->data_direction = SOP_WRITE_FLAG;
+		cdb[0] = BMIC_WRITE;
+		cdb[6] = BMIC_CACHE_FLUSH;
+		put_unaligned_be16(buffer_length, &cdb[7]);
+		break;
+	case BMIC_IDENTIFY_CONTROLLER:
+	case BMIC_IDENTIFY_PHYSICAL_DEVICE:
+		request->data_direction = SOP_READ_FLAG;
+		cdb[0] = BMIC_READ;
+		cdb[6] = cmd;
+		put_unaligned_be16(buffer_length, &cdb[7]);
+		break;
+	case BMIC_WRITE_HOST_WELLNESS:
+		request->data_direction = SOP_WRITE_FLAG;
+		cdb[0] = BMIC_WRITE;
+		cdb[6] = cmd;
+		put_unaligned_be16(buffer_length, &cdb[7]);
+		break;
+	default:
+		dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
+			cmd);
+		WARN_ON(cmd);
+		break;
+	}
+
+	switch (request->data_direction) {
+	case SOP_READ_FLAG:
+		pci_dir = PCI_DMA_FROMDEVICE;
+		break;
+	case SOP_WRITE_FLAG:
+		pci_dir = PCI_DMA_TODEVICE;
+		break;
+	case SOP_NO_DIRECTION_FLAG:
+		pci_dir = PCI_DMA_NONE;
+		break;
+	default:
+		pci_dir = PCI_DMA_BIDIRECTIONAL;
+		break;
+	}
+
+	*pci_direction = pci_dir;
+
+	return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
+		buffer, buffer_length, pci_dir);
+}
+
+static struct pqi_io_request *pqi_alloc_io_request(
+	struct pqi_ctrl_info *ctrl_info)
+{
+	struct pqi_io_request *io_request;
+	u16 i = ctrl_info->next_io_request_slot;	/* benignly racy */
+
+	while (1) {
+		io_request = &ctrl_info->io_request_pool[i];
+		if (atomic_inc_return(&io_request->refcount) == 1)
+			break;
+		atomic_dec(&io_request->refcount);
+		i = (i + 1) % ctrl_info->max_io_slots;
+	}
+
+	/* benignly racy */
+	ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
+
+	io_request->scmd = NULL;
+	io_request->status = 0;
+	io_request->error_info = NULL;
+
+	return io_request;
+}
+
+static void pqi_free_io_request(struct pqi_io_request *io_request)
+{
+	atomic_dec(&io_request->refcount);
+}
+
+static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
+	struct bmic_identify_controller *buffer)
+{
+	int rc;
+	int pci_direction;
+	struct pqi_raid_path_request request;
+
+	rc = pqi_build_raid_path_request(ctrl_info, &request,
+		BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
+		sizeof(*buffer), 0, &pci_direction);
+	if (rc)
+		return rc;
+
+	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+		NULL, NO_TIMEOUT);
+
+	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+		pci_direction);
+
+	return rc;
+}
+
+static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
+	u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
+{
+	int rc;
+	int pci_direction;
+	struct pqi_raid_path_request request;
+
+	rc = pqi_build_raid_path_request(ctrl_info, &request,
+		INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
+		&pci_direction);
+	if (rc)
+		return rc;
+
+	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+		NULL, NO_TIMEOUT);
+
+	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+		pci_direction);
+
+	return rc;
+}
+
+static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device,
+	struct bmic_identify_physical_device *buffer,
+	size_t buffer_length)
+{
+	int rc;
+	int pci_direction;
+	u16 bmic_device_index;
+	struct pqi_raid_path_request request;
+
+	rc = pqi_build_raid_path_request(ctrl_info, &request,
+		BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
+		buffer_length, 0, &pci_direction);
+	if (rc)
+		return rc;
+
+	bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
+	request.cdb[2] = (u8)bmic_device_index;
+	request.cdb[9] = (u8)(bmic_device_index >> 8);
+
+	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+		0, NULL, NO_TIMEOUT);
+
+	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+		pci_direction);
+
+	return rc;
+}
+
+#define SA_CACHE_FLUSH_BUFFER_LENGTH	4
+
+static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	struct pqi_raid_path_request request;
+	int pci_direction;
+	u8 *buffer;
+
+	/*
+	 * Don't bother trying to flush the cache if the controller is
+	 * locked up.
+	 */
+	if (pqi_ctrl_offline(ctrl_info))
+		return -ENXIO;
+
+	buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	rc = pqi_build_raid_path_request(ctrl_info, &request,
+		SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
+		SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
+	if (rc)
+		goto out;
+
+	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+		0, NULL, NO_TIMEOUT);
+
+	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+		pci_direction);
+
+out:
+	kfree(buffer);
+
+	return rc;
+}
+
+static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
+	void *buffer, size_t buffer_length)
+{
+	int rc;
+	struct pqi_raid_path_request request;
+	int pci_direction;
+
+	rc = pqi_build_raid_path_request(ctrl_info, &request,
+		BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
+		buffer_length, 0, &pci_direction);
+	if (rc)
+		return rc;
+
+	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+		0, NULL, NO_TIMEOUT);
+
+	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+		pci_direction);
+
+	return rc;
+}
+
+#pragma pack(1)
+
+struct bmic_host_wellness_driver_version {
+	u8	start_tag[4];
+	u8	driver_version_tag[2];
+	__le16	driver_version_length;
+	char	driver_version[32];
+	u8	end_tag[2];
+};
+
+#pragma pack()
+
+static int pqi_write_driver_version_to_host_wellness(
+	struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	struct bmic_host_wellness_driver_version *buffer;
+	size_t buffer_length;
+
+	buffer_length = sizeof(*buffer);
+
+	buffer = kmalloc(buffer_length, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	buffer->start_tag[0] = '<';
+	buffer->start_tag[1] = 'H';
+	buffer->start_tag[2] = 'W';
+	buffer->start_tag[3] = '>';
+	buffer->driver_version_tag[0] = 'D';
+	buffer->driver_version_tag[1] = 'V';
+	put_unaligned_le16(sizeof(buffer->driver_version),
+		&buffer->driver_version_length);
+	strncpy(buffer->driver_version, DRIVER_VERSION,
+		sizeof(buffer->driver_version) - 1);
+	buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
+	buffer->end_tag[0] = 'Z';
+	buffer->end_tag[1] = 'Z';
+
+	rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
+
+	kfree(buffer);
+
+	return rc;
+}
+
+#pragma pack(1)
+
+struct bmic_host_wellness_time {
+	u8	start_tag[4];
+	u8	time_tag[2];
+	__le16	time_length;
+	u8	time[8];
+	u8	dont_write_tag[2];
+	u8	end_tag[2];
+};
+
+#pragma pack()
+
+static int pqi_write_current_time_to_host_wellness(
+	struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	struct bmic_host_wellness_time *buffer;
+	size_t buffer_length;
+	time64_t local_time;
+	unsigned int year;
+	struct timeval time;
+	struct rtc_time tm;
+
+	buffer_length = sizeof(*buffer);
+
+	buffer = kmalloc(buffer_length, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	buffer->start_tag[0] = '<';
+	buffer->start_tag[1] = 'H';
+	buffer->start_tag[2] = 'W';
+	buffer->start_tag[3] = '>';
+	buffer->time_tag[0] = 'T';
+	buffer->time_tag[1] = 'D';
+	put_unaligned_le16(sizeof(buffer->time),
+		&buffer->time_length);
+
+	do_gettimeofday(&time);
+	local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
+	rtc_time64_to_tm(local_time, &tm);
+	year = tm.tm_year + 1900;
+
+	buffer->time[0] = bin2bcd(tm.tm_hour);
+	buffer->time[1] = bin2bcd(tm.tm_min);
+	buffer->time[2] = bin2bcd(tm.tm_sec);
+	buffer->time[3] = 0;
+	buffer->time[4] = bin2bcd(tm.tm_mon + 1);
+	buffer->time[5] = bin2bcd(tm.tm_mday);
+	buffer->time[6] = bin2bcd(year / 100);
+	buffer->time[7] = bin2bcd(year % 100);
+
+	buffer->dont_write_tag[0] = 'D';
+	buffer->dont_write_tag[1] = 'W';
+	buffer->end_tag[0] = 'Z';
+	buffer->end_tag[1] = 'Z';
+
+	rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
+
+	kfree(buffer);
+
+	return rc;
+}
+
+#define PQI_UPDATE_TIME_WORK_INTERVAL	(24UL * 60 * 60 * HZ)
+
+static void pqi_update_time_worker(struct work_struct *work)
+{
+	int rc;
+	struct pqi_ctrl_info *ctrl_info;
+
+	ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
+		update_time_work);
+
+	rc = pqi_write_current_time_to_host_wellness(ctrl_info);
+	if (rc)
+		dev_warn(&ctrl_info->pci_dev->dev,
+			"error updating time on controller\n");
+
+	schedule_delayed_work(&ctrl_info->update_time_work,
+		PQI_UPDATE_TIME_WORK_INTERVAL);
+}
+
+static inline void pqi_schedule_update_time_worker(
+	struct pqi_ctrl_info *ctrl_info)
+{
+	schedule_delayed_work(&ctrl_info->update_time_work, 0);
+}
+
+static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
+	void *buffer, size_t buffer_length)
+{
+	int rc;
+	int pci_direction;
+	struct pqi_raid_path_request request;
+
+	rc = pqi_build_raid_path_request(ctrl_info, &request,
+		cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
+	if (rc)
+		return rc;
+
+	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+		NULL, NO_TIMEOUT);
+
+	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+		pci_direction);
+
+	return rc;
+}
+
+static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
+	void **buffer)
+{
+	int rc;
+	size_t lun_list_length;
+	size_t lun_data_length;
+	size_t new_lun_list_length;
+	void *lun_data = NULL;
+	struct report_lun_header *report_lun_header;
+
+	report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
+	if (!report_lun_header) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
+		sizeof(*report_lun_header));
+	if (rc)
+		goto out;
+
+	lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
+
+again:
+	lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
+
+	lun_data = kmalloc(lun_data_length, GFP_KERNEL);
+	if (!lun_data) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	if (lun_list_length == 0) {
+		memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
+		goto out;
+	}
+
+	rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
+	if (rc)
+		goto out;
+
+	new_lun_list_length = get_unaligned_be32(
+		&((struct report_lun_header *)lun_data)->list_length);
+
+	if (new_lun_list_length > lun_list_length) {
+		lun_list_length = new_lun_list_length;
+		kfree(lun_data);
+		goto again;
+	}
+
+out:
+	kfree(report_lun_header);
+
+	if (rc) {
+		kfree(lun_data);
+		lun_data = NULL;
+	}
+
+	*buffer = lun_data;
+
+	return rc;
+}
+
+static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
+	void **buffer)
+{
+	return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
+		buffer);
+}
+
+static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
+	void **buffer)
+{
+	return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
+}
+
+static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
+	struct report_phys_lun_extended **physdev_list,
+	struct report_log_lun_extended **logdev_list)
+{
+	int rc;
+	size_t logdev_list_length;
+	size_t logdev_data_length;
+	struct report_log_lun_extended *internal_logdev_list;
+	struct report_log_lun_extended *logdev_data;
+	struct report_lun_header report_lun_header;
+
+	rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
+	if (rc)
+		dev_err(&ctrl_info->pci_dev->dev,
+			"report physical LUNs failed\n");
+
+	rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
+	if (rc)
+		dev_err(&ctrl_info->pci_dev->dev,
+			"report logical LUNs failed\n");
+
+	/*
+	 * Tack the controller itself onto the end of the logical device list.
+	 */
+
+	logdev_data = *logdev_list;
+
+	if (logdev_data) {
+		logdev_list_length =
+			get_unaligned_be32(&logdev_data->header.list_length);
+	} else {
+		memset(&report_lun_header, 0, sizeof(report_lun_header));
+		logdev_data =
+			(struct report_log_lun_extended *)&report_lun_header;
+		logdev_list_length = 0;
+	}
+
+	logdev_data_length = sizeof(struct report_lun_header) +
+		logdev_list_length;
+
+	internal_logdev_list = kmalloc(logdev_data_length +
+		sizeof(struct report_log_lun_extended), GFP_KERNEL);
+	if (!internal_logdev_list) {
+		kfree(*logdev_list);
+		*logdev_list = NULL;
+		return -ENOMEM;
+	}
+
+	memcpy(internal_logdev_list, logdev_data, logdev_data_length);
+	memset((u8 *)internal_logdev_list + logdev_data_length, 0,
+		sizeof(struct report_log_lun_extended_entry));
+	put_unaligned_be32(logdev_list_length +
+		sizeof(struct report_log_lun_extended_entry),
+		&internal_logdev_list->header.list_length);
+
+	kfree(*logdev_list);
+	*logdev_list = internal_logdev_list;
+
+	return 0;
+}
+
+static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
+	int bus, int target, int lun)
+{
+	device->bus = bus;
+	device->target = target;
+	device->lun = lun;
+}
+
+static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
+{
+	u8 *scsi3addr;
+	u32 lunid;
+
+	scsi3addr = device->scsi3addr;
+	lunid = get_unaligned_le32(scsi3addr);
+
+	if (pqi_is_hba_lunid(scsi3addr)) {
+		/* The specified device is the controller. */
+		pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
+		device->target_lun_valid = true;
+		return;
+	}
+
+	if (pqi_is_logical_device(device)) {
+		pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
+			lunid & 0x3fff);
+		device->target_lun_valid = true;
+		return;
+	}
+
+	/*
+	 * Defer target and LUN assignment for non-controller physical devices
+	 * because the SAS transport layer will make these assignments later.
+	 */
+	pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
+}
+
+static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	int rc;
+	u8 raid_level;
+	u8 *buffer;
+
+	raid_level = SA_RAID_UNKNOWN;
+
+	buffer = kmalloc(64, GFP_KERNEL);
+	if (buffer) {
+		rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
+			VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
+		if (rc == 0) {
+			raid_level = buffer[8];
+			if (raid_level > SA_RAID_MAX)
+				raid_level = SA_RAID_UNKNOWN;
+		}
+		kfree(buffer);
+	}
+
+	device->raid_level = raid_level;
+}
+
+static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device, struct raid_map *raid_map)
+{
+	char *err_msg;
+	u32 raid_map_size;
+	u32 r5or6_blocks_per_row;
+	unsigned int num_phys_disks;
+	unsigned int num_raid_map_entries;
+
+	raid_map_size = get_unaligned_le32(&raid_map->structure_size);
+
+	if (raid_map_size < offsetof(struct raid_map, disk_data)) {
+		err_msg = "RAID map too small";
+		goto bad_raid_map;
+	}
+
+	if (raid_map_size > sizeof(*raid_map)) {
+		err_msg = "RAID map too large";
+		goto bad_raid_map;
+	}
+
+	num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
+		(get_unaligned_le16(&raid_map->data_disks_per_row) +
+		get_unaligned_le16(&raid_map->metadata_disks_per_row));
+	num_raid_map_entries = num_phys_disks *
+		get_unaligned_le16(&raid_map->row_cnt);
+
+	if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
+		err_msg = "invalid number of map entries in RAID map";
+		goto bad_raid_map;
+	}
+
+	if (device->raid_level == SA_RAID_1) {
+		if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
+			err_msg = "invalid RAID-1 map";
+			goto bad_raid_map;
+		}
+	} else if (device->raid_level == SA_RAID_ADM) {
+		if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
+			err_msg = "invalid RAID-1(ADM) map";
+			goto bad_raid_map;
+		}
+	} else if ((device->raid_level == SA_RAID_5 ||
+		device->raid_level == SA_RAID_6) &&
+		get_unaligned_le16(&raid_map->layout_map_count) > 1) {
+		/* RAID 50/60 */
+		r5or6_blocks_per_row =
+			get_unaligned_le16(&raid_map->strip_size) *
+			get_unaligned_le16(&raid_map->data_disks_per_row);
+		if (r5or6_blocks_per_row == 0) {
+			err_msg = "invalid RAID-5 or RAID-6 map";
+			goto bad_raid_map;
+		}
+	}
+
+	return 0;
+
+bad_raid_map:
+	dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
+
+	return -EINVAL;
+}
+
+static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	int rc;
+	int pci_direction;
+	struct pqi_raid_path_request request;
+	struct raid_map *raid_map;
+
+	raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
+	if (!raid_map)
+		return -ENOMEM;
+
+	rc = pqi_build_raid_path_request(ctrl_info, &request,
+		CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
+		sizeof(*raid_map), 0, &pci_direction);
+	if (rc)
+		goto error;
+
+	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+		NULL, NO_TIMEOUT);
+
+	pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+		pci_direction);
+
+	if (rc)
+		goto error;
+
+	rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
+	if (rc)
+		goto error;
+
+	device->raid_map = raid_map;
+
+	return 0;
+
+error:
+	kfree(raid_map);
+
+	return rc;
+}
+
+static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	int rc;
+	u8 *buffer;
+	u8 offload_status;
+
+	buffer = kmalloc(64, GFP_KERNEL);
+	if (!buffer)
+		return;
+
+	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
+		VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
+	if (rc)
+		goto out;
+
+#define OFFLOAD_STATUS_BYTE	4
+#define OFFLOAD_CONFIGURED_BIT	0x1
+#define OFFLOAD_ENABLED_BIT	0x2
+
+	offload_status = buffer[OFFLOAD_STATUS_BYTE];
+	device->offload_configured =
+		!!(offload_status & OFFLOAD_CONFIGURED_BIT);
+	if (device->offload_configured) {
+		device->offload_enabled_pending =
+			!!(offload_status & OFFLOAD_ENABLED_BIT);
+		if (pqi_get_raid_map(ctrl_info, device))
+			device->offload_enabled_pending = false;
+	}
+
+out:
+	kfree(buffer);
+}
+
+/*
+ * Use vendor-specific VPD to determine online/offline status of a volume.
+ */
+
+static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	int rc;
+	size_t page_length;
+	u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
+	bool volume_offline = true;
+	u32 volume_flags;
+	struct ciss_vpd_logical_volume_status *vpd;
+
+	vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
+	if (!vpd)
+		goto no_buffer;
+
+	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
+		VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
+	if (rc)
+		goto out;
+
+	page_length = offsetof(struct ciss_vpd_logical_volume_status,
+		volume_status) + vpd->page_length;
+	if (page_length < sizeof(*vpd))
+		goto out;
+
+	volume_status = vpd->volume_status;
+	volume_flags = get_unaligned_be32(&vpd->flags);
+	volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
+
+out:
+	kfree(vpd);
+no_buffer:
+	device->volume_status = volume_status;
+	device->volume_offline = volume_offline;
+}
+
+static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	int rc;
+	u8 *buffer;
+
+	buffer = kmalloc(64, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	/* Send an inquiry to the device to see what it is. */
+	rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
+	if (rc)
+		goto out;
+
+	scsi_sanitize_inquiry_string(&buffer[8], 8);
+	scsi_sanitize_inquiry_string(&buffer[16], 16);
+
+	device->devtype = buffer[0] & 0x1f;
+	memcpy(device->vendor, &buffer[8],
+		sizeof(device->vendor));
+	memcpy(device->model, &buffer[16],
+		sizeof(device->model));
+
+	if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
+		pqi_get_raid_level(ctrl_info, device);
+		pqi_get_offload_status(ctrl_info, device);
+		pqi_get_volume_status(ctrl_info, device);
+	}
+
+out:
+	kfree(buffer);
+
+	return rc;
+}
+
+static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device,
+	struct bmic_identify_physical_device *id_phys)
+{
+	int rc;
+
+	memset(id_phys, 0, sizeof(*id_phys));
+
+	rc = pqi_identify_physical_device(ctrl_info, device,
+		id_phys, sizeof(*id_phys));
+	if (rc) {
+		device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
+		return;
+	}
+
+	device->queue_depth =
+		get_unaligned_le16(&id_phys->current_queue_depth_limit);
+	device->device_type = id_phys->device_type;
+	device->active_path_index = id_phys->active_path_number;
+	device->path_map = id_phys->redundant_path_present_map;
+	memcpy(&device->box,
+		&id_phys->alternate_paths_phys_box_on_port,
+		sizeof(device->box));
+	memcpy(&device->phys_connector,
+		&id_phys->alternate_paths_phys_connector,
+		sizeof(device->phys_connector));
+	device->bay = id_phys->phys_bay_in_box;
+}
+
+static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	char *status;
+	static const char unknown_state_str[] =
+		"Volume is in an unknown state (%u)";
+	char unknown_state_buffer[sizeof(unknown_state_str) + 10];
+
+	switch (device->volume_status) {
+	case CISS_LV_OK:
+		status = "Volume online";
+		break;
+	case CISS_LV_FAILED:
+		status = "Volume failed";
+		break;
+	case CISS_LV_NOT_CONFIGURED:
+		status = "Volume not configured";
+		break;
+	case CISS_LV_DEGRADED:
+		status = "Volume degraded";
+		break;
+	case CISS_LV_READY_FOR_RECOVERY:
+		status = "Volume ready for recovery operation";
+		break;
+	case CISS_LV_UNDERGOING_RECOVERY:
+		status = "Volume undergoing recovery";
+		break;
+	case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
+		status = "Wrong physical drive was replaced";
+		break;
+	case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
+		status = "A physical drive not properly connected";
+		break;
+	case CISS_LV_HARDWARE_OVERHEATING:
+		status = "Hardware is overheating";
+		break;
+	case CISS_LV_HARDWARE_HAS_OVERHEATED:
+		status = "Hardware has overheated";
+		break;
+	case CISS_LV_UNDERGOING_EXPANSION:
+		status = "Volume undergoing expansion";
+		break;
+	case CISS_LV_NOT_AVAILABLE:
+		status = "Volume waiting for transforming volume";
+		break;
+	case CISS_LV_QUEUED_FOR_EXPANSION:
+		status = "Volume queued for expansion";
+		break;
+	case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
+		status = "Volume disabled due to SCSI ID conflict";
+		break;
+	case CISS_LV_EJECTED:
+		status = "Volume has been ejected";
+		break;
+	case CISS_LV_UNDERGOING_ERASE:
+		status = "Volume undergoing background erase";
+		break;
+	case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
+		status = "Volume ready for predictive spare rebuild";
+		break;
+	case CISS_LV_UNDERGOING_RPI:
+		status = "Volume undergoing rapid parity initialization";
+		break;
+	case CISS_LV_PENDING_RPI:
+		status = "Volume queued for rapid parity initialization";
+		break;
+	case CISS_LV_ENCRYPTED_NO_KEY:
+		status = "Encrypted volume inaccessible - key not present";
+		break;
+	case CISS_LV_UNDERGOING_ENCRYPTION:
+		status = "Volume undergoing encryption process";
+		break;
+	case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
+		status = "Volume undergoing encryption re-keying process";
+		break;
+	case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
+		status =
+			"Encrypted volume inaccessible - disabled on ctrl";
+		break;
+	case CISS_LV_PENDING_ENCRYPTION:
+		status = "Volume pending migration to encrypted state";
+		break;
+	case CISS_LV_PENDING_ENCRYPTION_REKEYING:
+		status = "Volume pending encryption rekeying";
+		break;
+	case CISS_LV_NOT_SUPPORTED:
+		status = "Volume not supported on this controller";
+		break;
+	case CISS_LV_STATUS_UNAVAILABLE:
+		status = "Volume status not available";
+		break;
+	default:
+		snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
+			unknown_state_str, device->volume_status);
+		status = unknown_state_buffer;
+		break;
+	}
+
+	dev_info(&ctrl_info->pci_dev->dev,
+		"scsi %d:%d:%d:%d %s\n",
+		ctrl_info->scsi_host->host_no,
+		device->bus, device->target, device->lun, status);
+}
+
+static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
+	struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
+{
+	struct pqi_scsi_dev *device;
+
+	list_for_each_entry(device, &ctrl_info->scsi_device_list,
+		scsi_device_list_entry) {
+		if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
+			continue;
+		if (pqi_is_logical_device(device))
+			continue;
+		if (device->aio_handle == aio_handle)
+			return device;
+	}
+
+	return NULL;
+}
+
+static void pqi_update_logical_drive_queue_depth(
+	struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
+{
+	unsigned int i;
+	struct raid_map *raid_map;
+	struct raid_map_disk_data *disk_data;
+	struct pqi_scsi_dev *phys_disk;
+	unsigned int num_phys_disks;
+	unsigned int num_raid_map_entries;
+	unsigned int queue_depth;
+
+	logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
+
+	raid_map = logical_drive->raid_map;
+	if (!raid_map)
+		return;
+
+	disk_data = raid_map->disk_data;
+	num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
+		(get_unaligned_le16(&raid_map->data_disks_per_row) +
+		get_unaligned_le16(&raid_map->metadata_disks_per_row));
+	num_raid_map_entries = num_phys_disks *
+		get_unaligned_le16(&raid_map->row_cnt);
+
+	queue_depth = 0;
+	for (i = 0; i < num_raid_map_entries; i++) {
+		phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
+			disk_data[i].aio_handle);
+
+		if (!phys_disk) {
+			dev_warn(&ctrl_info->pci_dev->dev,
+				"failed to find physical disk for logical drive %016llx\n",
+				get_unaligned_be64(logical_drive->scsi3addr));
+			logical_drive->offload_enabled = false;
+			logical_drive->offload_enabled_pending = false;
+			kfree(raid_map);
+			logical_drive->raid_map = NULL;
+			return;
+		}
+
+		queue_depth += phys_disk->queue_depth;
+	}
+
+	logical_drive->queue_depth = queue_depth;
+}
+
+static void pqi_update_all_logical_drive_queue_depths(
+	struct pqi_ctrl_info *ctrl_info)
+{
+	struct pqi_scsi_dev *device;
+
+	list_for_each_entry(device, &ctrl_info->scsi_device_list,
+		scsi_device_list_entry) {
+		if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
+			continue;
+		if (!pqi_is_logical_device(device))
+			continue;
+		pqi_update_logical_drive_queue_depth(ctrl_info, device);
+	}
+}
+
+static void pqi_rescan_worker(struct work_struct *work)
+{
+	struct pqi_ctrl_info *ctrl_info;
+
+	ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
+		rescan_work);
+
+	pqi_scan_scsi_devices(ctrl_info);
+}
+
+static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	int rc;
+
+	if (pqi_is_logical_device(device))
+		rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
+			device->target, device->lun);
+	else
+		rc = pqi_add_sas_device(ctrl_info->sas_host, device);
+
+	return rc;
+}
+
+static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	if (pqi_is_logical_device(device))
+		scsi_remove_device(device->sdev);
+	else
+		pqi_remove_sas_device(device);
+}
+
+/* Assumes the SCSI device list lock is held. */
+
+static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
+	int bus, int target, int lun)
+{
+	struct pqi_scsi_dev *device;
+
+	list_for_each_entry(device, &ctrl_info->scsi_device_list,
+		scsi_device_list_entry)
+		if (device->bus == bus && device->target == target &&
+			device->lun == lun)
+			return device;
+
+	return NULL;
+}
+
+static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
+	struct pqi_scsi_dev *dev2)
+{
+	if (dev1->is_physical_device != dev2->is_physical_device)
+		return false;
+
+	if (dev1->is_physical_device)
+		return dev1->wwid == dev2->wwid;
+
+	return memcmp(dev1->volume_id, dev2->volume_id,
+		sizeof(dev1->volume_id)) == 0;
+}
+
+enum pqi_find_result {
+	DEVICE_NOT_FOUND,
+	DEVICE_CHANGED,
+	DEVICE_SAME,
+};
+
+static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device_to_find,
+	struct pqi_scsi_dev **matching_device)
+{
+	struct pqi_scsi_dev *device;
+
+	list_for_each_entry(device, &ctrl_info->scsi_device_list,
+		scsi_device_list_entry) {
+		if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
+			device->scsi3addr)) {
+			*matching_device = device;
+			if (pqi_device_equal(device_to_find, device)) {
+				if (device_to_find->volume_offline)
+					return DEVICE_CHANGED;
+				return DEVICE_SAME;
+			}
+			return DEVICE_CHANGED;
+		}
+	}
+
+	return DEVICE_NOT_FOUND;
+}
+
+static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
+	char *action, struct pqi_scsi_dev *device)
+{
+	dev_info(&ctrl_info->pci_dev->dev,
+		"%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
+		action,
+		ctrl_info->scsi_host->host_no,
+		device->bus,
+		device->target,
+		device->lun,
+		scsi_device_type(device->devtype),
+		device->vendor,
+		device->model,
+		pqi_raid_level_to_string(device->raid_level),
+		device->offload_configured ? '+' : '-',
+		device->offload_enabled_pending ? '+' : '-',
+		device->expose_device ? '+' : '-',
+		device->queue_depth);
+}
+
+/* Assumes the SCSI device list lock is held. */
+
+static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
+	struct pqi_scsi_dev *new_device)
+{
+	existing_device->devtype = new_device->devtype;
+	existing_device->device_type = new_device->device_type;
+	existing_device->bus = new_device->bus;
+	if (new_device->target_lun_valid) {
+		existing_device->target = new_device->target;
+		existing_device->lun = new_device->lun;
+		existing_device->target_lun_valid = true;
+	}
+
+	/* By definition, the scsi3addr and wwid fields are already the same. */
+
+	existing_device->is_physical_device = new_device->is_physical_device;
+	existing_device->expose_device = new_device->expose_device;
+	existing_device->no_uld_attach = new_device->no_uld_attach;
+	existing_device->aio_enabled = new_device->aio_enabled;
+	memcpy(existing_device->vendor, new_device->vendor,
+		sizeof(existing_device->vendor));
+	memcpy(existing_device->model, new_device->model,
+		sizeof(existing_device->model));
+	existing_device->sas_address = new_device->sas_address;
+	existing_device->raid_level = new_device->raid_level;
+	existing_device->queue_depth = new_device->queue_depth;
+	existing_device->aio_handle = new_device->aio_handle;
+	existing_device->volume_status = new_device->volume_status;
+	existing_device->active_path_index = new_device->active_path_index;
+	existing_device->path_map = new_device->path_map;
+	existing_device->bay = new_device->bay;
+	memcpy(existing_device->box, new_device->box,
+		sizeof(existing_device->box));
+	memcpy(existing_device->phys_connector, new_device->phys_connector,
+		sizeof(existing_device->phys_connector));
+	existing_device->offload_configured = new_device->offload_configured;
+	existing_device->offload_enabled = false;
+	existing_device->offload_enabled_pending =
+		new_device->offload_enabled_pending;
+	existing_device->offload_to_mirror = 0;
+	kfree(existing_device->raid_map);
+	existing_device->raid_map = new_device->raid_map;
+
+	/* To prevent this from being freed later. */
+	new_device->raid_map = NULL;
+}
+
+static inline void pqi_free_device(struct pqi_scsi_dev *device)
+{
+	if (device) {
+		kfree(device->raid_map);
+		kfree(device);
+	}
+}
+
+/*
+ * Called when exposing a new device to the OS fails in order to re-adjust
+ * our internal SCSI device list to match the SCSI ML's view.
+ */
+
+static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+	list_del(&device->scsi_device_list_entry);
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	/* Allow the device structure to be freed later. */
+	device->keep_device = false;
+}
+
+static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
+{
+	int rc;
+	unsigned int i;
+	unsigned long flags;
+	enum pqi_find_result find_result;
+	struct pqi_scsi_dev *device;
+	struct pqi_scsi_dev *next;
+	struct pqi_scsi_dev *matching_device;
+	struct list_head add_list;
+	struct list_head delete_list;
+
+	INIT_LIST_HEAD(&add_list);
+	INIT_LIST_HEAD(&delete_list);
+
+	/*
+	 * The idea here is to do as little work as possible while holding the
+	 * spinlock.  That's why we go to great pains to defer anything other
+	 * than updating the internal device list until after we release the
+	 * spinlock.
+	 */
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	/* Assume that all devices in the existing list have gone away. */
+	list_for_each_entry(device, &ctrl_info->scsi_device_list,
+		scsi_device_list_entry)
+		device->device_gone = true;
+
+	for (i = 0; i < num_new_devices; i++) {
+		device = new_device_list[i];
+
+		find_result = pqi_scsi_find_entry(ctrl_info, device,
+						&matching_device);
+
+		switch (find_result) {
+		case DEVICE_SAME:
+			/*
+			 * The newly found device is already in the existing
+			 * device list.
+			 */
+			device->new_device = false;
+			matching_device->device_gone = false;
+			pqi_scsi_update_device(matching_device, device);
+			break;
+		case DEVICE_NOT_FOUND:
+			/*
+			 * The newly found device is NOT in the existing device
+			 * list.
+			 */
+			device->new_device = true;
+			break;
+		case DEVICE_CHANGED:
+			/*
+			 * The original device has gone away and we need to add
+			 * the new device.
+			 */
+			device->new_device = true;
+			break;
+		default:
+			WARN_ON(find_result);
+			break;
+		}
+	}
+
+	/* Process all devices that have gone away. */
+	list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
+		scsi_device_list_entry) {
+		if (device->device_gone) {
+			list_del(&device->scsi_device_list_entry);
+			list_add_tail(&device->delete_list_entry, &delete_list);
+		}
+	}
+
+	/* Process all new devices. */
+	for (i = 0; i < num_new_devices; i++) {
+		device = new_device_list[i];
+		if (!device->new_device)
+			continue;
+		if (device->volume_offline)
+			continue;
+		list_add_tail(&device->scsi_device_list_entry,
+			&ctrl_info->scsi_device_list);
+		list_add_tail(&device->add_list_entry, &add_list);
+		/* To prevent this device structure from being freed later. */
+		device->keep_device = true;
+	}
+
+	pqi_update_all_logical_drive_queue_depths(ctrl_info);
+
+	list_for_each_entry(device, &ctrl_info->scsi_device_list,
+		scsi_device_list_entry)
+		device->offload_enabled =
+			device->offload_enabled_pending;
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	/* Remove all devices that have gone away. */
+	list_for_each_entry_safe(device, next, &delete_list,
+		delete_list_entry) {
+		if (device->sdev)
+			pqi_remove_device(ctrl_info, device);
+		if (device->volume_offline) {
+			pqi_dev_info(ctrl_info, "offline", device);
+			pqi_show_volume_status(ctrl_info, device);
+		} else {
+			pqi_dev_info(ctrl_info, "removed", device);
+		}
+		list_del(&device->delete_list_entry);
+		pqi_free_device(device);
+	}
+
+	/*
+	 * Notify the SCSI ML if the queue depth of any existing device has
+	 * changed.
+	 */
+	list_for_each_entry(device, &ctrl_info->scsi_device_list,
+		scsi_device_list_entry) {
+		if (device->sdev && device->queue_depth !=
+			device->advertised_queue_depth) {
+			device->advertised_queue_depth = device->queue_depth;
+			scsi_change_queue_depth(device->sdev,
+				device->advertised_queue_depth);
+		}
+	}
+
+	/* Expose any new devices. */
+	list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
+		if (device->expose_device && !device->sdev) {
+			rc = pqi_add_device(ctrl_info, device);
+			if (rc) {
+				dev_warn(&ctrl_info->pci_dev->dev,
+					"scsi %d:%d:%d:%d addition failed, device not added\n",
+					ctrl_info->scsi_host->host_no,
+					device->bus, device->target,
+					device->lun);
+				pqi_fixup_botched_add(ctrl_info, device);
+				continue;
+			}
+		}
+		pqi_dev_info(ctrl_info, "added", device);
+	}
+}
+
+static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
+{
+	bool is_supported = false;
+
+	switch (device->devtype) {
+	case TYPE_DISK:
+	case TYPE_ZBC:
+	case TYPE_TAPE:
+	case TYPE_MEDIUM_CHANGER:
+	case TYPE_ENCLOSURE:
+		is_supported = true;
+		break;
+	case TYPE_RAID:
+		/*
+		 * Only support the HBA controller itself as a RAID
+		 * controller.  If it's a RAID controller other than
+		 * the HBA itself (an external RAID controller, MSA500
+		 * or similar), we don't support it.
+		 */
+		if (pqi_is_hba_lunid(device->scsi3addr))
+			is_supported = true;
+		break;
+	}
+
+	return is_supported;
+}
+
+static inline bool pqi_skip_device(u8 *scsi3addr,
+	struct report_phys_lun_extended_entry *phys_lun_ext_entry)
+{
+	u8 device_flags;
+
+	if (!MASKED_DEVICE(scsi3addr))
+		return false;
+
+	/* The device is masked. */
+
+	device_flags = phys_lun_ext_entry->device_flags;
+
+	if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
+		/*
+		 * It's a non-disk device.  We ignore all devices of this type
+		 * when they're masked.
+		 */
+		return true;
+	}
+
+	return false;
+}
+
+static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
+{
+	/* Expose all devices except for physical devices that are masked. */
+	if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
+		return false;
+
+	return true;
+}
+
+static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
+{
+	int i;
+	int rc;
+	struct list_head new_device_list_head;
+	struct report_phys_lun_extended *physdev_list = NULL;
+	struct report_log_lun_extended *logdev_list = NULL;
+	struct report_phys_lun_extended_entry *phys_lun_ext_entry;
+	struct report_log_lun_extended_entry *log_lun_ext_entry;
+	struct bmic_identify_physical_device *id_phys = NULL;
+	u32 num_physicals;
+	u32 num_logicals;
+	struct pqi_scsi_dev **new_device_list = NULL;
+	struct pqi_scsi_dev *device;
+	struct pqi_scsi_dev *next;
+	unsigned int num_new_devices;
+	unsigned int num_valid_devices;
+	bool is_physical_device;
+	u8 *scsi3addr;
+	static char *out_of_memory_msg =
+		"out of memory, device discovery stopped";
+
+	INIT_LIST_HEAD(&new_device_list_head);
+
+	rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
+	if (rc)
+		goto out;
+
+	if (physdev_list)
+		num_physicals =
+			get_unaligned_be32(&physdev_list->header.list_length)
+				/ sizeof(physdev_list->lun_entries[0]);
+	else
+		num_physicals = 0;
+
+	if (logdev_list)
+		num_logicals =
+			get_unaligned_be32(&logdev_list->header.list_length)
+				/ sizeof(logdev_list->lun_entries[0]);
+	else
+		num_logicals = 0;
+
+	if (num_physicals) {
+		/*
+		 * We need this buffer for calls to pqi_get_physical_disk_info()
+		 * below.  We allocate it here instead of inside
+		 * pqi_get_physical_disk_info() because it's a fairly large
+		 * buffer.
+		 */
+		id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
+		if (!id_phys) {
+			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
+				out_of_memory_msg);
+			rc = -ENOMEM;
+			goto out;
+		}
+	}
+
+	num_new_devices = num_physicals + num_logicals;
+
+	new_device_list = kmalloc(sizeof(*new_device_list) *
+		num_new_devices, GFP_KERNEL);
+	if (!new_device_list) {
+		dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	for (i = 0; i < num_new_devices; i++) {
+		device = kzalloc(sizeof(*device), GFP_KERNEL);
+		if (!device) {
+			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
+				out_of_memory_msg);
+			rc = -ENOMEM;
+			goto out;
+		}
+		list_add_tail(&device->new_device_list_entry,
+			&new_device_list_head);
+	}
+
+	device = NULL;
+	num_valid_devices = 0;
+
+	for (i = 0; i < num_new_devices; i++) {
+
+		if (i < num_physicals) {
+			is_physical_device = true;
+			phys_lun_ext_entry = &physdev_list->lun_entries[i];
+			log_lun_ext_entry = NULL;
+			scsi3addr = phys_lun_ext_entry->lunid;
+		} else {
+			is_physical_device = false;
+			phys_lun_ext_entry = NULL;
+			log_lun_ext_entry =
+				&logdev_list->lun_entries[i - num_physicals];
+			scsi3addr = log_lun_ext_entry->lunid;
+		}
+
+		if (is_physical_device &&
+			pqi_skip_device(scsi3addr, phys_lun_ext_entry))
+			continue;
+
+		if (device)
+			device = list_next_entry(device, new_device_list_entry);
+		else
+			device = list_first_entry(&new_device_list_head,
+				struct pqi_scsi_dev, new_device_list_entry);
+
+		memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
+		device->is_physical_device = is_physical_device;
+		device->raid_level = SA_RAID_UNKNOWN;
+
+		/* Gather information about the device. */
+		rc = pqi_get_device_info(ctrl_info, device);
+		if (rc == -ENOMEM) {
+			dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
+				out_of_memory_msg);
+			goto out;
+		}
+		if (rc) {
+			dev_warn(&ctrl_info->pci_dev->dev,
+				"obtaining device info failed, skipping device %016llx\n",
+				get_unaligned_be64(device->scsi3addr));
+			rc = 0;
+			continue;
+		}
+
+		if (!pqi_is_supported_device(device))
+			continue;
+
+		pqi_assign_bus_target_lun(device);
+
+		device->expose_device = pqi_expose_device(device);
+
+		if (device->is_physical_device) {
+			device->wwid = phys_lun_ext_entry->wwid;
+			if ((phys_lun_ext_entry->device_flags &
+				REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
+				phys_lun_ext_entry->aio_handle)
+				device->aio_enabled = true;
+		} else {
+			memcpy(device->volume_id, log_lun_ext_entry->volume_id,
+				sizeof(device->volume_id));
+		}
+
+		switch (device->devtype) {
+		case TYPE_DISK:
+		case TYPE_ZBC:
+		case TYPE_ENCLOSURE:
+			if (device->is_physical_device) {
+				device->sas_address =
+					get_unaligned_be64(&device->wwid);
+				if (device->devtype == TYPE_DISK ||
+					device->devtype == TYPE_ZBC) {
+					device->aio_handle =
+						phys_lun_ext_entry->aio_handle;
+					pqi_get_physical_disk_info(ctrl_info,
+						device, id_phys);
+				}
+			}
+			break;
+		}
+
+		new_device_list[num_valid_devices++] = device;
+	}
+
+	pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
+
+out:
+	list_for_each_entry_safe(device, next, &new_device_list_head,
+		new_device_list_entry) {
+		if (device->keep_device)
+			continue;
+		list_del(&device->new_device_list_entry);
+		pqi_free_device(device);
+	}
+
+	kfree(new_device_list);
+	kfree(physdev_list);
+	kfree(logdev_list);
+	kfree(id_phys);
+
+	return rc;
+}
+
+static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned long flags;
+	struct pqi_scsi_dev *device;
+	struct pqi_scsi_dev *next;
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
+		scsi_device_list_entry) {
+		if (device->sdev)
+			pqi_remove_device(ctrl_info, device);
+		list_del(&device->scsi_device_list_entry);
+		pqi_free_device(device);
+	}
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+}
+
+static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+
+	if (pqi_ctrl_offline(ctrl_info))
+		return -ENXIO;
+
+	mutex_lock(&ctrl_info->scan_mutex);
+
+	rc = pqi_update_scsi_devices(ctrl_info);
+	if (rc)
+		pqi_schedule_rescan_worker(ctrl_info);
+
+	mutex_unlock(&ctrl_info->scan_mutex);
+
+	return rc;
+}
+
+static void pqi_scan_start(struct Scsi_Host *shost)
+{
+	pqi_scan_scsi_devices(shost_to_hba(shost));
+}
+
+/* Returns TRUE if scan is finished. */
+
+static int pqi_scan_finished(struct Scsi_Host *shost,
+	unsigned long elapsed_time)
+{
+	struct pqi_ctrl_info *ctrl_info;
+
+	ctrl_info = shost_priv(shost);
+
+	return !mutex_is_locked(&ctrl_info->scan_mutex);
+}
+
+static inline void pqi_set_encryption_info(
+	struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
+	u64 first_block)
+{
+	u32 volume_blk_size;
+
+	/*
+	 * Set the encryption tweak values based on logical block address.
+	 * If the block size is 512, the tweak value is equal to the LBA.
+	 * For other block sizes, tweak value is (LBA * block size) / 512.
+	 */
+	volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
+	if (volume_blk_size != 512)
+		first_block = (first_block * volume_blk_size) / 512;
+
+	encryption_info->data_encryption_key_index =
+		get_unaligned_le16(&raid_map->data_encryption_key_index);
+	encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
+	encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
+}
+
+/*
+ * Attempt to perform offload RAID mapping for a logical volume I/O.
+ */
+
+#define PQI_RAID_BYPASS_INELIGIBLE	1
+
+static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+	struct pqi_queue_group *queue_group)
+{
+	struct raid_map *raid_map;
+	bool is_write = false;
+	u32 map_index;
+	u64 first_block;
+	u64 last_block;
+	u32 block_cnt;
+	u32 blocks_per_row;
+	u64 first_row;
+	u64 last_row;
+	u32 first_row_offset;
+	u32 last_row_offset;
+	u32 first_column;
+	u32 last_column;
+	u64 r0_first_row;
+	u64 r0_last_row;
+	u32 r5or6_blocks_per_row;
+	u64 r5or6_first_row;
+	u64 r5or6_last_row;
+	u32 r5or6_first_row_offset;
+	u32 r5or6_last_row_offset;
+	u32 r5or6_first_column;
+	u32 r5or6_last_column;
+	u16 data_disks_per_row;
+	u32 total_disks_per_row;
+	u16 layout_map_count;
+	u32 stripesize;
+	u16 strip_size;
+	u32 first_group;
+	u32 last_group;
+	u32 current_group;
+	u32 map_row;
+	u32 aio_handle;
+	u64 disk_block;
+	u32 disk_block_cnt;
+	u8 cdb[16];
+	u8 cdb_length;
+	int offload_to_mirror;
+	struct pqi_encryption_info *encryption_info_ptr;
+	struct pqi_encryption_info encryption_info;
+#if BITS_PER_LONG == 32
+	u64 tmpdiv;
+#endif
+
+	/* Check for valid opcode, get LBA and block count. */
+	switch (scmd->cmnd[0]) {
+	case WRITE_6:
+		is_write = true;
+		/* fall through */
+	case READ_6:
+		first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
+			(scmd->cmnd[2] << 8) | scmd->cmnd[3]);
+		block_cnt = (u32)scmd->cmnd[4];
+		if (block_cnt == 0)
+			block_cnt = 256;
+		break;
+	case WRITE_10:
+		is_write = true;
+		/* fall through */
+	case READ_10:
+		first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+		block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
+		break;
+	case WRITE_12:
+		is_write = true;
+		/* fall through */
+	case READ_12:
+		first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
+		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+		break;
+	case WRITE_16:
+		is_write = true;
+		/* fall through */
+	case READ_16:
+		first_block = get_unaligned_be64(&scmd->cmnd[2]);
+		block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
+		break;
+	default:
+		/* Process via normal I/O path. */
+		return PQI_RAID_BYPASS_INELIGIBLE;
+	}
+
+	/* Check for write to non-RAID-0. */
+	if (is_write && device->raid_level != SA_RAID_0)
+		return PQI_RAID_BYPASS_INELIGIBLE;
+
+	if (unlikely(block_cnt == 0))
+		return PQI_RAID_BYPASS_INELIGIBLE;
+
+	last_block = first_block + block_cnt - 1;
+	raid_map = device->raid_map;
+
+	/* Check for invalid block or wraparound. */
+	if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
+		last_block < first_block)
+		return PQI_RAID_BYPASS_INELIGIBLE;
+
+	data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
+	strip_size = get_unaligned_le16(&raid_map->strip_size);
+	layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
+
+	/* Calculate stripe information for the request. */
+	blocks_per_row = data_disks_per_row * strip_size;
+#if BITS_PER_LONG == 32
+	tmpdiv = first_block;
+	do_div(tmpdiv, blocks_per_row);
+	first_row = tmpdiv;
+	tmpdiv = last_block;
+	do_div(tmpdiv, blocks_per_row);
+	last_row = tmpdiv;
+	first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
+	last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
+	tmpdiv = first_row_offset;
+	do_div(tmpdiv, strip_size);
+	first_column = tmpdiv;
+	tmpdiv = last_row_offset;
+	do_div(tmpdiv, strip_size);
+	last_column = tmpdiv;
+#else
+	first_row = first_block / blocks_per_row;
+	last_row = last_block / blocks_per_row;
+	first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
+	last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
+	first_column = first_row_offset / strip_size;
+	last_column = last_row_offset / strip_size;
+#endif
+
+	/* If this isn't a single row/column then give to the controller. */
+	if (first_row != last_row || first_column != last_column)
+		return PQI_RAID_BYPASS_INELIGIBLE;
+
+	/* Proceeding with driver mapping. */
+	total_disks_per_row = data_disks_per_row +
+		get_unaligned_le16(&raid_map->metadata_disks_per_row);
+	map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
+		get_unaligned_le16(&raid_map->row_cnt);
+	map_index = (map_row * total_disks_per_row) + first_column;
+
+	/* RAID 1 */
+	if (device->raid_level == SA_RAID_1) {
+		if (device->offload_to_mirror)
+			map_index += data_disks_per_row;
+		device->offload_to_mirror = !device->offload_to_mirror;
+	} else if (device->raid_level == SA_RAID_ADM) {
+		/* RAID ADM */
+		/*
+		 * Handles N-way mirrors  (R1-ADM) and R10 with # of drives
+		 * divisible by 3.
+		 */
+		offload_to_mirror = device->offload_to_mirror;
+		if (offload_to_mirror == 0)  {
+			/* use physical disk in the first mirrored group. */
+			map_index %= data_disks_per_row;
+		} else {
+			do {
+				/*
+				 * Determine mirror group that map_index
+				 * indicates.
+				 */
+				current_group = map_index / data_disks_per_row;
+
+				if (offload_to_mirror != current_group) {
+					if (current_group <
+						layout_map_count - 1) {
+						/*
+						 * Select raid index from
+						 * next group.
+						 */
+						map_index += data_disks_per_row;
+						current_group++;
+					} else {
+						/*
+						 * Select raid index from first
+						 * group.
+						 */
+						map_index %= data_disks_per_row;
+						current_group = 0;
+					}
+				}
+			} while (offload_to_mirror != current_group);
+		}
+
+		/* Set mirror group to use next time. */
+		offload_to_mirror =
+			(offload_to_mirror >= layout_map_count - 1) ?
+				0 : offload_to_mirror + 1;
+		WARN_ON(offload_to_mirror >= layout_map_count);
+		device->offload_to_mirror = offload_to_mirror;
+		/*
+		 * Avoid direct use of device->offload_to_mirror within this
+		 * function since multiple threads might simultaneously
+		 * increment it beyond the range of device->layout_map_count -1.
+		 */
+	} else if ((device->raid_level == SA_RAID_5 ||
+		device->raid_level == SA_RAID_6) && layout_map_count > 1) {
+		/* RAID 50/60 */
+		/* Verify first and last block are in same RAID group */
+		r5or6_blocks_per_row = strip_size * data_disks_per_row;
+		stripesize = r5or6_blocks_per_row * layout_map_count;
+#if BITS_PER_LONG == 32
+		tmpdiv = first_block;
+		first_group = do_div(tmpdiv, stripesize);
+		tmpdiv = first_group;
+		do_div(tmpdiv, r5or6_blocks_per_row);
+		first_group = tmpdiv;
+		tmpdiv = last_block;
+		last_group = do_div(tmpdiv, stripesize);
+		tmpdiv = last_group;
+		do_div(tmpdiv, r5or6_blocks_per_row);
+		last_group = tmpdiv;
+#else
+		first_group = (first_block % stripesize) / r5or6_blocks_per_row;
+		last_group = (last_block % stripesize) / r5or6_blocks_per_row;
+#endif
+		if (first_group != last_group)
+			return PQI_RAID_BYPASS_INELIGIBLE;
+
+		/* Verify request is in a single row of RAID 5/6 */
+#if BITS_PER_LONG == 32
+		tmpdiv = first_block;
+		do_div(tmpdiv, stripesize);
+		first_row = r5or6_first_row = r0_first_row = tmpdiv;
+		tmpdiv = last_block;
+		do_div(tmpdiv, stripesize);
+		r5or6_last_row = r0_last_row = tmpdiv;
+#else
+		first_row = r5or6_first_row = r0_first_row =
+			first_block / stripesize;
+		r5or6_last_row = r0_last_row = last_block / stripesize;
+#endif
+		if (r5or6_first_row != r5or6_last_row)
+			return PQI_RAID_BYPASS_INELIGIBLE;
+
+		/* Verify request is in a single column */
+#if BITS_PER_LONG == 32
+		tmpdiv = first_block;
+		first_row_offset = do_div(tmpdiv, stripesize);
+		tmpdiv = first_row_offset;
+		first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
+		r5or6_first_row_offset = first_row_offset;
+		tmpdiv = last_block;
+		r5or6_last_row_offset = do_div(tmpdiv, stripesize);
+		tmpdiv = r5or6_last_row_offset;
+		r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
+		tmpdiv = r5or6_first_row_offset;
+		do_div(tmpdiv, strip_size);
+		first_column = r5or6_first_column = tmpdiv;
+		tmpdiv = r5or6_last_row_offset;
+		do_div(tmpdiv, strip_size);
+		r5or6_last_column = tmpdiv;
+#else
+		first_row_offset = r5or6_first_row_offset =
+			(u32)((first_block % stripesize) %
+			r5or6_blocks_per_row);
+
+		r5or6_last_row_offset =
+			(u32)((last_block % stripesize) %
+			r5or6_blocks_per_row);
+
+		first_column = r5or6_first_row_offset / strip_size;
+		r5or6_first_column = first_column;
+		r5or6_last_column = r5or6_last_row_offset / strip_size;
+#endif
+		if (r5or6_first_column != r5or6_last_column)
+			return PQI_RAID_BYPASS_INELIGIBLE;
+
+		/* Request is eligible */
+		map_row =
+			((u32)(first_row >> raid_map->parity_rotation_shift)) %
+			get_unaligned_le16(&raid_map->row_cnt);
+
+		map_index = (first_group *
+			(get_unaligned_le16(&raid_map->row_cnt) *
+			total_disks_per_row)) +
+			(map_row * total_disks_per_row) + first_column;
+	}
+
+	if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
+		return PQI_RAID_BYPASS_INELIGIBLE;
+
+	aio_handle = raid_map->disk_data[map_index].aio_handle;
+	disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
+		first_row * strip_size +
+		(first_row_offset - first_column * strip_size);
+	disk_block_cnt = block_cnt;
+
+	/* Handle differing logical/physical block sizes. */
+	if (raid_map->phys_blk_shift) {
+		disk_block <<= raid_map->phys_blk_shift;
+		disk_block_cnt <<= raid_map->phys_blk_shift;
+	}
+
+	if (unlikely(disk_block_cnt > 0xffff))
+		return PQI_RAID_BYPASS_INELIGIBLE;
+
+	/* Build the new CDB for the physical disk I/O. */
+	if (disk_block > 0xffffffff) {
+		cdb[0] = is_write ? WRITE_16 : READ_16;
+		cdb[1] = 0;
+		put_unaligned_be64(disk_block, &cdb[2]);
+		put_unaligned_be32(disk_block_cnt, &cdb[10]);
+		cdb[14] = 0;
+		cdb[15] = 0;
+		cdb_length = 16;
+	} else {
+		cdb[0] = is_write ? WRITE_10 : READ_10;
+		cdb[1] = 0;
+		put_unaligned_be32((u32)disk_block, &cdb[2]);
+		cdb[6] = 0;
+		put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
+		cdb[9] = 0;
+		cdb_length = 10;
+	}
+
+	if (get_unaligned_le16(&raid_map->flags) &
+		RAID_MAP_ENCRYPTION_ENABLED) {
+		pqi_set_encryption_info(&encryption_info, raid_map,
+			first_block);
+		encryption_info_ptr = &encryption_info;
+	} else {
+		encryption_info_ptr = NULL;
+	}
+
+	return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
+		cdb, cdb_length, queue_group, encryption_info_ptr);
+}
+
+#define PQI_STATUS_IDLE		0x0
+
+#define PQI_CREATE_ADMIN_QUEUE_PAIR	1
+#define PQI_DELETE_ADMIN_QUEUE_PAIR	2
+
+#define PQI_DEVICE_STATE_POWER_ON_AND_RESET		0x0
+#define PQI_DEVICE_STATE_STATUS_AVAILABLE		0x1
+#define PQI_DEVICE_STATE_ALL_REGISTERS_READY		0x2
+#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY		0x3
+#define PQI_DEVICE_STATE_ERROR				0x4
+
+#define PQI_MODE_READY_TIMEOUT_SECS		30
+#define PQI_MODE_READY_POLL_INTERVAL_MSECS	1
+
+static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
+{
+	struct pqi_device_registers __iomem *pqi_registers;
+	unsigned long timeout;
+	u64 signature;
+	u8 status;
+
+	pqi_registers = ctrl_info->pqi_registers;
+	timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
+
+	while (1) {
+		signature = readq(&pqi_registers->signature);
+		if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
+			sizeof(signature)) == 0)
+			break;
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"timed out waiting for PQI signature\n");
+			return -ETIMEDOUT;
+		}
+		msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
+	}
+
+	while (1) {
+		status = readb(&pqi_registers->function_and_status_code);
+		if (status == PQI_STATUS_IDLE)
+			break;
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"timed out waiting for PQI IDLE\n");
+			return -ETIMEDOUT;
+		}
+		msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
+	}
+
+	while (1) {
+		if (readl(&pqi_registers->device_status) ==
+			PQI_DEVICE_STATE_ALL_REGISTERS_READY)
+			break;
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"timed out waiting for PQI all registers ready\n");
+			return -ETIMEDOUT;
+		}
+		msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
+	}
+
+	return 0;
+}
+
+static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
+{
+	struct pqi_scsi_dev *device;
+
+	device = io_request->scmd->device->hostdata;
+	device->offload_enabled = false;
+}
+
+static inline void pqi_take_device_offline(struct scsi_device *sdev)
+{
+	struct pqi_ctrl_info *ctrl_info;
+	struct pqi_scsi_dev *device;
+
+	if (scsi_device_online(sdev)) {
+		scsi_device_set_state(sdev, SDEV_OFFLINE);
+		ctrl_info = shost_to_hba(sdev->host);
+		schedule_delayed_work(&ctrl_info->rescan_work, 0);
+		device = sdev->hostdata;
+		dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
+			ctrl_info->scsi_host->host_no, device->bus,
+			device->target, device->lun);
+	}
+}
+
+static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
+{
+	u8 scsi_status;
+	u8 host_byte;
+	struct scsi_cmnd *scmd;
+	struct pqi_raid_error_info *error_info;
+	size_t sense_data_length;
+	int residual_count;
+	int xfer_count;
+	struct scsi_sense_hdr sshdr;
+
+	scmd = io_request->scmd;
+	if (!scmd)
+		return;
+
+	error_info = io_request->error_info;
+	scsi_status = error_info->status;
+	host_byte = DID_OK;
+
+	if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
+		xfer_count =
+			get_unaligned_le32(&error_info->data_out_transferred);
+		residual_count = scsi_bufflen(scmd) - xfer_count;
+		scsi_set_resid(scmd, residual_count);
+		if (xfer_count < scmd->underflow)
+			host_byte = DID_SOFT_ERROR;
+	}
+
+	sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
+	if (sense_data_length == 0)
+		sense_data_length =
+			get_unaligned_le16(&error_info->response_data_length);
+	if (sense_data_length) {
+		if (sense_data_length > sizeof(error_info->data))
+			sense_data_length = sizeof(error_info->data);
+
+		if (scsi_status == SAM_STAT_CHECK_CONDITION &&
+			scsi_normalize_sense(error_info->data,
+				sense_data_length, &sshdr) &&
+				sshdr.sense_key == HARDWARE_ERROR &&
+				sshdr.asc == 0x3e &&
+				sshdr.ascq == 0x1) {
+			pqi_take_device_offline(scmd->device);
+			host_byte = DID_NO_CONNECT;
+		}
+
+		if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
+			sense_data_length = SCSI_SENSE_BUFFERSIZE;
+		memcpy(scmd->sense_buffer, error_info->data,
+			sense_data_length);
+	}
+
+	scmd->result = scsi_status;
+	set_host_byte(scmd, host_byte);
+}
+
+static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
+{
+	u8 scsi_status;
+	u8 host_byte;
+	struct scsi_cmnd *scmd;
+	struct pqi_aio_error_info *error_info;
+	size_t sense_data_length;
+	int residual_count;
+	int xfer_count;
+	bool device_offline;
+
+	scmd = io_request->scmd;
+	error_info = io_request->error_info;
+	host_byte = DID_OK;
+	sense_data_length = 0;
+	device_offline = false;
+
+	switch (error_info->service_response) {
+	case PQI_AIO_SERV_RESPONSE_COMPLETE:
+		scsi_status = error_info->status;
+		break;
+	case PQI_AIO_SERV_RESPONSE_FAILURE:
+		switch (error_info->status) {
+		case PQI_AIO_STATUS_IO_ABORTED:
+			scsi_status = SAM_STAT_TASK_ABORTED;
+			break;
+		case PQI_AIO_STATUS_UNDERRUN:
+			scsi_status = SAM_STAT_GOOD;
+			residual_count = get_unaligned_le32(
+						&error_info->residual_count);
+			scsi_set_resid(scmd, residual_count);
+			xfer_count = scsi_bufflen(scmd) - residual_count;
+			if (xfer_count < scmd->underflow)
+				host_byte = DID_SOFT_ERROR;
+			break;
+		case PQI_AIO_STATUS_OVERRUN:
+			scsi_status = SAM_STAT_GOOD;
+			break;
+		case PQI_AIO_STATUS_AIO_PATH_DISABLED:
+			pqi_aio_path_disabled(io_request);
+			scsi_status = SAM_STAT_GOOD;
+			io_request->status = -EAGAIN;
+			break;
+		case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
+		case PQI_AIO_STATUS_INVALID_DEVICE:
+			device_offline = true;
+			pqi_take_device_offline(scmd->device);
+			host_byte = DID_NO_CONNECT;
+			scsi_status = SAM_STAT_CHECK_CONDITION;
+			break;
+		case PQI_AIO_STATUS_IO_ERROR:
+		default:
+			scsi_status = SAM_STAT_CHECK_CONDITION;
+			break;
+		}
+		break;
+	case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
+	case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
+		scsi_status = SAM_STAT_GOOD;
+		break;
+	case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
+	case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
+	default:
+		scsi_status = SAM_STAT_CHECK_CONDITION;
+		break;
+	}
+
+	if (error_info->data_present) {
+		sense_data_length =
+			get_unaligned_le16(&error_info->data_length);
+		if (sense_data_length) {
+			if (sense_data_length > sizeof(error_info->data))
+				sense_data_length = sizeof(error_info->data);
+			if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
+				sense_data_length = SCSI_SENSE_BUFFERSIZE;
+			memcpy(scmd->sense_buffer, error_info->data,
+				sense_data_length);
+		}
+	}
+
+	if (device_offline && sense_data_length == 0)
+		scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
+			0x3e, 0x1);
+
+	scmd->result = scsi_status;
+	set_host_byte(scmd, host_byte);
+}
+
+static void pqi_process_io_error(unsigned int iu_type,
+	struct pqi_io_request *io_request)
+{
+	switch (iu_type) {
+	case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
+		pqi_process_raid_io_error(io_request);
+		break;
+	case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
+		pqi_process_aio_io_error(io_request);
+		break;
+	}
+}
+
+static int pqi_interpret_task_management_response(
+	struct pqi_task_management_response *response)
+{
+	int rc;
+
+	switch (response->response_code) {
+	case SOP_TMF_COMPLETE:
+	case SOP_TMF_FUNCTION_SUCCEEDED:
+		rc = 0;
+		break;
+	default:
+		rc = -EIO;
+		break;
+	}
+
+	return rc;
+}
+
+static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_queue_group *queue_group)
+{
+	unsigned int num_responses;
+	pqi_index_t oq_pi;
+	pqi_index_t oq_ci;
+	struct pqi_io_request *io_request;
+	struct pqi_io_response *response;
+	u16 request_id;
+
+	num_responses = 0;
+	oq_ci = queue_group->oq_ci_copy;
+
+	while (1) {
+		oq_pi = *queue_group->oq_pi;
+		if (oq_pi == oq_ci)
+			break;
+
+		num_responses++;
+		response = queue_group->oq_element_array +
+			(oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
+
+		request_id = get_unaligned_le16(&response->request_id);
+		WARN_ON(request_id >= ctrl_info->max_io_slots);
+
+		io_request = &ctrl_info->io_request_pool[request_id];
+		WARN_ON(atomic_read(&io_request->refcount) == 0);
+
+		switch (response->header.iu_type) {
+		case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
+		case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
+		case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
+			break;
+		case PQI_RESPONSE_IU_TASK_MANAGEMENT:
+			io_request->status =
+				pqi_interpret_task_management_response(
+					(void *)response);
+			break;
+		case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
+			pqi_aio_path_disabled(io_request);
+			io_request->status = -EAGAIN;
+			break;
+		case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
+		case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
+			io_request->error_info = ctrl_info->error_buffer +
+				(get_unaligned_le16(&response->error_index) *
+				PQI_ERROR_BUFFER_ELEMENT_LENGTH);
+			pqi_process_io_error(response->header.iu_type,
+				io_request);
+			break;
+		default:
+			dev_err(&ctrl_info->pci_dev->dev,
+				"unexpected IU type: 0x%x\n",
+				response->header.iu_type);
+			WARN_ON(response->header.iu_type);
+			break;
+		}
+
+		io_request->io_complete_callback(io_request,
+			io_request->context);
+
+		/*
+		 * Note that the I/O request structure CANNOT BE TOUCHED after
+		 * returning from the I/O completion callback!
+		 */
+
+		oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
+	}
+
+	if (num_responses) {
+		queue_group->oq_ci_copy = oq_ci;
+		writel(oq_ci, queue_group->oq_ci);
+	}
+
+	return num_responses;
+}
+
+static inline unsigned int pqi_num_elements_free(unsigned int pi,
+	unsigned int ci, unsigned int elements_in_queue)
+{
+	unsigned int num_elements_used;
+
+	if (pi >= ci)
+		num_elements_used = pi - ci;
+	else
+		num_elements_used = elements_in_queue - ci + pi;
+
+	return elements_in_queue - num_elements_used - 1;
+}
+
+#define PQI_EVENT_ACK_TIMEOUT	30
+
+static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_event_acknowledge_request *iu, size_t iu_length)
+{
+	pqi_index_t iq_pi;
+	pqi_index_t iq_ci;
+	unsigned long flags;
+	void *next_element;
+	unsigned long timeout;
+	struct pqi_queue_group *queue_group;
+
+	queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
+	put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
+
+	timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
+
+	while (1) {
+		spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
+
+		iq_pi = queue_group->iq_pi_copy[RAID_PATH];
+		iq_ci = *queue_group->iq_ci[RAID_PATH];
+
+		if (pqi_num_elements_free(iq_pi, iq_ci,
+			ctrl_info->num_elements_per_iq))
+			break;
+
+		spin_unlock_irqrestore(
+			&queue_group->submit_lock[RAID_PATH], flags);
+
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"sending event acknowledge timed out\n");
+			return;
+		}
+	}
+
+	next_element = queue_group->iq_element_array[RAID_PATH] +
+		(iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+	memcpy(next_element, iu, iu_length);
+
+	iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
+
+	queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
+
+	/*
+	 * This write notifies the controller that an IU is available to be
+	 * processed.
+	 */
+	writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
+
+	spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
+}
+
+static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_event *event)
+{
+	struct pqi_event_acknowledge_request request;
+
+	memset(&request, 0, sizeof(request));
+
+	request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
+	put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
+		&request.header.iu_length);
+	request.event_type = event->event_type;
+	request.event_id = event->event_id;
+	request.additional_event_id = event->additional_event_id;
+
+	pqi_start_event_ack(ctrl_info, &request, sizeof(request));
+}
+
+static void pqi_event_worker(struct work_struct *work)
+{
+	unsigned int i;
+	struct pqi_ctrl_info *ctrl_info;
+	struct pqi_event *pending_event;
+	bool got_non_heartbeat_event = false;
+
+	ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
+
+	pending_event = ctrl_info->pending_events;
+	for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
+		if (pending_event->pending) {
+			pending_event->pending = false;
+			pqi_acknowledge_event(ctrl_info, pending_event);
+			if (i != PQI_EVENT_HEARTBEAT)
+				got_non_heartbeat_event = true;
+		}
+		pending_event++;
+	}
+
+	if (got_non_heartbeat_event)
+		pqi_schedule_rescan_worker(ctrl_info);
+}
+
+static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned int i;
+	unsigned int path;
+	struct pqi_queue_group *queue_group;
+	unsigned long flags;
+	struct pqi_io_request *io_request;
+	struct pqi_io_request *next;
+	struct scsi_cmnd *scmd;
+
+	ctrl_info->controller_online = false;
+	dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
+
+	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+		queue_group = &ctrl_info->queue_groups[i];
+
+		for (path = 0; path < 2; path++) {
+			spin_lock_irqsave(
+				&queue_group->submit_lock[path], flags);
+
+			list_for_each_entry_safe(io_request, next,
+				&queue_group->request_list[path],
+				request_list_entry) {
+
+				scmd = io_request->scmd;
+				if (scmd) {
+					set_host_byte(scmd, DID_NO_CONNECT);
+					pqi_scsi_done(scmd);
+				}
+
+				list_del(&io_request->request_list_entry);
+			}
+
+			spin_unlock_irqrestore(
+				&queue_group->submit_lock[path], flags);
+		}
+	}
+}
+
+#define PQI_HEARTBEAT_TIMER_INTERVAL	(5 * HZ)
+#define PQI_MAX_HEARTBEAT_REQUESTS	5
+
+static void pqi_heartbeat_timer_handler(unsigned long data)
+{
+	int num_interrupts;
+	struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
+
+	num_interrupts = atomic_read(&ctrl_info->num_interrupts);
+
+	if (num_interrupts == ctrl_info->previous_num_interrupts) {
+		ctrl_info->num_heartbeats_requested++;
+		if (ctrl_info->num_heartbeats_requested >
+			PQI_MAX_HEARTBEAT_REQUESTS) {
+			pqi_take_ctrl_offline(ctrl_info);
+			return;
+		}
+		ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
+		schedule_work(&ctrl_info->event_work);
+	} else {
+		ctrl_info->num_heartbeats_requested = 0;
+	}
+
+	ctrl_info->previous_num_interrupts = num_interrupts;
+	mod_timer(&ctrl_info->heartbeat_timer,
+		jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
+}
+
+static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
+{
+	ctrl_info->previous_num_interrupts =
+		atomic_read(&ctrl_info->num_interrupts);
+
+	init_timer(&ctrl_info->heartbeat_timer);
+	ctrl_info->heartbeat_timer.expires =
+		jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
+	ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
+	ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
+	add_timer(&ctrl_info->heartbeat_timer);
+	ctrl_info->heartbeat_timer_started = true;
+}
+
+static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
+{
+	if (ctrl_info->heartbeat_timer_started)
+		del_timer_sync(&ctrl_info->heartbeat_timer);
+}
+
+static int pqi_event_type_to_event_index(unsigned int event_type)
+{
+	int index;
+
+	switch (event_type) {
+	case PQI_EVENT_TYPE_HEARTBEAT:
+		index = PQI_EVENT_HEARTBEAT;
+		break;
+	case PQI_EVENT_TYPE_HOTPLUG:
+		index = PQI_EVENT_HOTPLUG;
+		break;
+	case PQI_EVENT_TYPE_HARDWARE:
+		index = PQI_EVENT_HARDWARE;
+		break;
+	case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
+		index = PQI_EVENT_PHYSICAL_DEVICE;
+		break;
+	case PQI_EVENT_TYPE_LOGICAL_DEVICE:
+		index = PQI_EVENT_LOGICAL_DEVICE;
+		break;
+	case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
+		index = PQI_EVENT_AIO_STATE_CHANGE;
+		break;
+	case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
+		index = PQI_EVENT_AIO_CONFIG_CHANGE;
+		break;
+	default:
+		index = -1;
+		break;
+	}
+
+	return index;
+}
+
+static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned int num_events;
+	pqi_index_t oq_pi;
+	pqi_index_t oq_ci;
+	struct pqi_event_queue *event_queue;
+	struct pqi_event_response *response;
+	struct pqi_event *pending_event;
+	bool need_delayed_work;
+	int event_index;
+
+	event_queue = &ctrl_info->event_queue;
+	num_events = 0;
+	need_delayed_work = false;
+	oq_ci = event_queue->oq_ci_copy;
+
+	while (1) {
+		oq_pi = *event_queue->oq_pi;
+		if (oq_pi == oq_ci)
+			break;
+
+		num_events++;
+		response = event_queue->oq_element_array +
+			(oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
+
+		event_index =
+			pqi_event_type_to_event_index(response->event_type);
+
+		if (event_index >= 0) {
+			if (response->request_acknowlege) {
+				pending_event =
+					&ctrl_info->pending_events[event_index];
+				pending_event->event_type =
+					response->event_type;
+				pending_event->event_id = response->event_id;
+				pending_event->additional_event_id =
+					response->additional_event_id;
+				if (event_index != PQI_EVENT_HEARTBEAT) {
+					pending_event->pending = true;
+					need_delayed_work = true;
+				}
+			}
+		}
+
+		oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
+	}
+
+	if (num_events) {
+		event_queue->oq_ci_copy = oq_ci;
+		writel(oq_ci, event_queue->oq_ci);
+
+		if (need_delayed_work)
+			schedule_work(&ctrl_info->event_work);
+	}
+
+	return num_events;
+}
+
+static irqreturn_t pqi_irq_handler(int irq, void *data)
+{
+	struct pqi_ctrl_info *ctrl_info;
+	struct pqi_queue_group *queue_group;
+	unsigned int num_responses_handled;
+
+	queue_group = data;
+	ctrl_info = queue_group->ctrl_info;
+
+	if (!ctrl_info || !queue_group->oq_ci)
+		return IRQ_NONE;
+
+	num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+
+	if (irq == ctrl_info->event_irq)
+		num_responses_handled += pqi_process_event_intr(ctrl_info);
+
+	if (num_responses_handled)
+		atomic_inc(&ctrl_info->num_interrupts);
+
+	pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
+	pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
+
+	return IRQ_HANDLED;
+}
+
+static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
+{
+	int i;
+	int rc;
+
+	ctrl_info->event_irq = ctrl_info->msix_vectors[0];
+
+	for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
+		rc = request_irq(ctrl_info->msix_vectors[i],
+			pqi_irq_handler, 0,
+			DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
+		if (rc) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"irq %u init failed with error %d\n",
+				ctrl_info->msix_vectors[i], rc);
+			return rc;
+		}
+		ctrl_info->num_msix_vectors_initialized++;
+	}
+
+	return 0;
+}
+
+static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
+{
+	int i;
+
+	for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
+		free_irq(ctrl_info->msix_vectors[i],
+			ctrl_info->intr_data[i]);
+}
+
+static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned int i;
+	int max_vectors;
+	int num_vectors_enabled;
+	struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
+
+	max_vectors = ctrl_info->num_queue_groups;
+
+	for (i = 0; i < max_vectors; i++)
+		msix_entries[i].entry = i;
+
+	num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
+		msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
+
+	if (num_vectors_enabled < 0) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"MSI-X init failed with error %d\n",
+			num_vectors_enabled);
+		return num_vectors_enabled;
+	}
+
+	ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
+	for (i = 0; i < num_vectors_enabled; i++) {
+		ctrl_info->msix_vectors[i] = msix_entries[i].vector;
+		ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
+	}
+
+	return 0;
+}
+
+static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
+{
+	int i;
+	int rc;
+	int cpu;
+
+	cpu = cpumask_first(cpu_online_mask);
+	for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
+		rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
+			get_cpu_mask(cpu));
+		if (rc)
+			dev_err(&ctrl_info->pci_dev->dev,
+				"error %d setting affinity hint for irq vector %u\n",
+				rc, ctrl_info->msix_vectors[i]);
+		cpu = cpumask_next(cpu, cpu_online_mask);
+	}
+}
+
+static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
+{
+	int i;
+
+	for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
+		irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
+}
+
+static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned int i;
+	size_t alloc_length;
+	size_t element_array_length_per_iq;
+	size_t element_array_length_per_oq;
+	void *element_array;
+	void *next_queue_index;
+	void *aligned_pointer;
+	unsigned int num_inbound_queues;
+	unsigned int num_outbound_queues;
+	unsigned int num_queue_indexes;
+	struct pqi_queue_group *queue_group;
+
+	element_array_length_per_iq =
+		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
+		ctrl_info->num_elements_per_iq;
+	element_array_length_per_oq =
+		PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
+		ctrl_info->num_elements_per_oq;
+	num_inbound_queues = ctrl_info->num_queue_groups * 2;
+	num_outbound_queues = ctrl_info->num_queue_groups;
+	num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
+
+	aligned_pointer = NULL;
+
+	for (i = 0; i < num_inbound_queues; i++) {
+		aligned_pointer = PTR_ALIGN(aligned_pointer,
+			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+		aligned_pointer += element_array_length_per_iq;
+	}
+
+	for (i = 0; i < num_outbound_queues; i++) {
+		aligned_pointer = PTR_ALIGN(aligned_pointer,
+			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+		aligned_pointer += element_array_length_per_oq;
+	}
+
+	aligned_pointer = PTR_ALIGN(aligned_pointer,
+		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+	aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
+		PQI_EVENT_OQ_ELEMENT_LENGTH;
+
+	for (i = 0; i < num_queue_indexes; i++) {
+		aligned_pointer = PTR_ALIGN(aligned_pointer,
+			PQI_OPERATIONAL_INDEX_ALIGNMENT);
+		aligned_pointer += sizeof(pqi_index_t);
+	}
+
+	alloc_length = (size_t)aligned_pointer +
+		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
+
+	ctrl_info->queue_memory_base =
+		dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
+			alloc_length,
+			&ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
+
+	if (!ctrl_info->queue_memory_base) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"failed to allocate memory for PQI admin queues\n");
+		return -ENOMEM;
+	}
+
+	ctrl_info->queue_memory_length = alloc_length;
+
+	element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
+		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+
+	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+		queue_group = &ctrl_info->queue_groups[i];
+		queue_group->iq_element_array[RAID_PATH] = element_array;
+		queue_group->iq_element_array_bus_addr[RAID_PATH] =
+			ctrl_info->queue_memory_base_dma_handle +
+				(element_array - ctrl_info->queue_memory_base);
+		element_array += element_array_length_per_iq;
+		element_array = PTR_ALIGN(element_array,
+			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+		queue_group->iq_element_array[AIO_PATH] = element_array;
+		queue_group->iq_element_array_bus_addr[AIO_PATH] =
+			ctrl_info->queue_memory_base_dma_handle +
+			(element_array - ctrl_info->queue_memory_base);
+		element_array += element_array_length_per_iq;
+		element_array = PTR_ALIGN(element_array,
+			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+	}
+
+	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+		queue_group = &ctrl_info->queue_groups[i];
+		queue_group->oq_element_array = element_array;
+		queue_group->oq_element_array_bus_addr =
+			ctrl_info->queue_memory_base_dma_handle +
+			(element_array - ctrl_info->queue_memory_base);
+		element_array += element_array_length_per_oq;
+		element_array = PTR_ALIGN(element_array,
+			PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+	}
+
+	ctrl_info->event_queue.oq_element_array = element_array;
+	ctrl_info->event_queue.oq_element_array_bus_addr =
+		ctrl_info->queue_memory_base_dma_handle +
+		(element_array - ctrl_info->queue_memory_base);
+	element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
+		PQI_EVENT_OQ_ELEMENT_LENGTH;
+
+	next_queue_index = PTR_ALIGN(element_array,
+		PQI_OPERATIONAL_INDEX_ALIGNMENT);
+
+	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+		queue_group = &ctrl_info->queue_groups[i];
+		queue_group->iq_ci[RAID_PATH] = next_queue_index;
+		queue_group->iq_ci_bus_addr[RAID_PATH] =
+			ctrl_info->queue_memory_base_dma_handle +
+			(next_queue_index - ctrl_info->queue_memory_base);
+		next_queue_index += sizeof(pqi_index_t);
+		next_queue_index = PTR_ALIGN(next_queue_index,
+			PQI_OPERATIONAL_INDEX_ALIGNMENT);
+		queue_group->iq_ci[AIO_PATH] = next_queue_index;
+		queue_group->iq_ci_bus_addr[AIO_PATH] =
+			ctrl_info->queue_memory_base_dma_handle +
+			(next_queue_index - ctrl_info->queue_memory_base);
+		next_queue_index += sizeof(pqi_index_t);
+		next_queue_index = PTR_ALIGN(next_queue_index,
+			PQI_OPERATIONAL_INDEX_ALIGNMENT);
+		queue_group->oq_pi = next_queue_index;
+		queue_group->oq_pi_bus_addr =
+			ctrl_info->queue_memory_base_dma_handle +
+			(next_queue_index - ctrl_info->queue_memory_base);
+		next_queue_index += sizeof(pqi_index_t);
+		next_queue_index = PTR_ALIGN(next_queue_index,
+			PQI_OPERATIONAL_INDEX_ALIGNMENT);
+	}
+
+	ctrl_info->event_queue.oq_pi = next_queue_index;
+	ctrl_info->event_queue.oq_pi_bus_addr =
+		ctrl_info->queue_memory_base_dma_handle +
+		(next_queue_index - ctrl_info->queue_memory_base);
+
+	return 0;
+}
+
+static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned int i;
+	u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
+	u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
+
+	/*
+	 * Initialize the backpointers to the controller structure in
+	 * each operational queue group structure.
+	 */
+	for (i = 0; i < ctrl_info->num_queue_groups; i++)
+		ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
+
+	/*
+	 * Assign IDs to all operational queues.  Note that the IDs
+	 * assigned to operational IQs are independent of the IDs
+	 * assigned to operational OQs.
+	 */
+	ctrl_info->event_queue.oq_id = next_oq_id++;
+	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+		ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
+		ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
+		ctrl_info->queue_groups[i].oq_id = next_oq_id++;
+	}
+
+	/*
+	 * Assign MSI-X table entry indexes to all queues.  Note that the
+	 * interrupt for the event queue is shared with the first queue group.
+	 */
+	ctrl_info->event_queue.int_msg_num = 0;
+	for (i = 0; i < ctrl_info->num_queue_groups; i++)
+		ctrl_info->queue_groups[i].int_msg_num = i;
+
+	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+		spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
+		spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
+		INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
+		INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
+	}
+}
+
+static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
+{
+	size_t alloc_length;
+	struct pqi_admin_queues_aligned *admin_queues_aligned;
+	struct pqi_admin_queues *admin_queues;
+
+	alloc_length = sizeof(struct pqi_admin_queues_aligned) +
+		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
+
+	ctrl_info->admin_queue_memory_base =
+		dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
+			alloc_length,
+			&ctrl_info->admin_queue_memory_base_dma_handle,
+			GFP_KERNEL);
+
+	if (!ctrl_info->admin_queue_memory_base)
+		return -ENOMEM;
+
+	ctrl_info->admin_queue_memory_length = alloc_length;
+
+	admin_queues = &ctrl_info->admin_queues;
+	admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
+		PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
+	admin_queues->iq_element_array =
+		&admin_queues_aligned->iq_element_array;
+	admin_queues->oq_element_array =
+		&admin_queues_aligned->oq_element_array;
+	admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
+	admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
+
+	admin_queues->iq_element_array_bus_addr =
+		ctrl_info->admin_queue_memory_base_dma_handle +
+		(admin_queues->iq_element_array -
+		ctrl_info->admin_queue_memory_base);
+	admin_queues->oq_element_array_bus_addr =
+		ctrl_info->admin_queue_memory_base_dma_handle +
+		(admin_queues->oq_element_array -
+		ctrl_info->admin_queue_memory_base);
+	admin_queues->iq_ci_bus_addr =
+		ctrl_info->admin_queue_memory_base_dma_handle +
+		((void *)admin_queues->iq_ci -
+		ctrl_info->admin_queue_memory_base);
+	admin_queues->oq_pi_bus_addr =
+		ctrl_info->admin_queue_memory_base_dma_handle +
+		((void *)admin_queues->oq_pi -
+		ctrl_info->admin_queue_memory_base);
+
+	return 0;
+}
+
+#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES		HZ
+#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS	1
+
+static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
+{
+	struct pqi_device_registers __iomem *pqi_registers;
+	struct pqi_admin_queues *admin_queues;
+	unsigned long timeout;
+	u8 status;
+	u32 reg;
+
+	pqi_registers = ctrl_info->pqi_registers;
+	admin_queues = &ctrl_info->admin_queues;
+
+	writeq((u64)admin_queues->iq_element_array_bus_addr,
+		&pqi_registers->admin_iq_element_array_addr);
+	writeq((u64)admin_queues->oq_element_array_bus_addr,
+		&pqi_registers->admin_oq_element_array_addr);
+	writeq((u64)admin_queues->iq_ci_bus_addr,
+		&pqi_registers->admin_iq_ci_addr);
+	writeq((u64)admin_queues->oq_pi_bus_addr,
+		&pqi_registers->admin_oq_pi_addr);
+
+	reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
+		(PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
+		(admin_queues->int_msg_num << 16);
+	writel(reg, &pqi_registers->admin_iq_num_elements);
+	writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
+		&pqi_registers->function_and_status_code);
+
+	timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
+	while (1) {
+		status = readb(&pqi_registers->function_and_status_code);
+		if (status == PQI_STATUS_IDLE)
+			break;
+		if (time_after(jiffies, timeout))
+			return -ETIMEDOUT;
+		msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
+	}
+
+	/*
+	 * The offset registers are not initialized to the correct
+	 * offsets until *after* the create admin queue pair command
+	 * completes successfully.
+	 */
+	admin_queues->iq_pi = ctrl_info->iomem_base +
+		PQI_DEVICE_REGISTERS_OFFSET +
+		readq(&pqi_registers->admin_iq_pi_offset);
+	admin_queues->oq_ci = ctrl_info->iomem_base +
+		PQI_DEVICE_REGISTERS_OFFSET +
+		readq(&pqi_registers->admin_oq_ci_offset);
+
+	return 0;
+}
+
+static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_general_admin_request *request)
+{
+	struct pqi_admin_queues *admin_queues;
+	void *next_element;
+	pqi_index_t iq_pi;
+
+	admin_queues = &ctrl_info->admin_queues;
+	iq_pi = admin_queues->iq_pi_copy;
+
+	next_element = admin_queues->iq_element_array +
+		(iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
+
+	memcpy(next_element, request, sizeof(*request));
+
+	iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
+	admin_queues->iq_pi_copy = iq_pi;
+
+	/*
+	 * This write notifies the controller that an IU is available to be
+	 * processed.
+	 */
+	writel(iq_pi, admin_queues->iq_pi);
+}
+
+static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_general_admin_response *response)
+{
+	struct pqi_admin_queues *admin_queues;
+	pqi_index_t oq_pi;
+	pqi_index_t oq_ci;
+	unsigned long timeout;
+
+	admin_queues = &ctrl_info->admin_queues;
+	oq_ci = admin_queues->oq_ci_copy;
+
+	timeout = (3 * HZ) + jiffies;
+
+	while (1) {
+		oq_pi = *admin_queues->oq_pi;
+		if (oq_pi != oq_ci)
+			break;
+		if (time_after(jiffies, timeout)) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"timed out waiting for admin response\n");
+			return -ETIMEDOUT;
+		}
+		usleep_range(1000, 2000);
+	}
+
+	memcpy(response, admin_queues->oq_element_array +
+		(oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
+
+	oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
+	admin_queues->oq_ci_copy = oq_ci;
+	writel(oq_ci, admin_queues->oq_ci);
+
+	return 0;
+}
+
+static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_queue_group *queue_group, enum pqi_io_path path,
+	struct pqi_io_request *io_request)
+{
+	struct pqi_io_request *next;
+	void *next_element;
+	pqi_index_t iq_pi;
+	pqi_index_t iq_ci;
+	size_t iu_length;
+	unsigned long flags;
+	unsigned int num_elements_needed;
+	unsigned int num_elements_to_end_of_queue;
+	size_t copy_count;
+	struct pqi_iu_header *request;
+
+	spin_lock_irqsave(&queue_group->submit_lock[path], flags);
+
+	if (io_request)
+		list_add_tail(&io_request->request_list_entry,
+			&queue_group->request_list[path]);
+
+	iq_pi = queue_group->iq_pi_copy[path];
+
+	list_for_each_entry_safe(io_request, next,
+		&queue_group->request_list[path], request_list_entry) {
+
+		request = io_request->iu;
+
+		iu_length = get_unaligned_le16(&request->iu_length) +
+			PQI_REQUEST_HEADER_LENGTH;
+		num_elements_needed =
+			DIV_ROUND_UP(iu_length,
+				PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+		iq_ci = *queue_group->iq_ci[path];
+
+		if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
+			ctrl_info->num_elements_per_iq))
+			break;
+
+		put_unaligned_le16(queue_group->oq_id,
+			&request->response_queue_id);
+
+		next_element = queue_group->iq_element_array[path] +
+			(iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+		num_elements_to_end_of_queue =
+			ctrl_info->num_elements_per_iq - iq_pi;
+
+		if (num_elements_needed <= num_elements_to_end_of_queue) {
+			memcpy(next_element, request, iu_length);
+		} else {
+			copy_count = num_elements_to_end_of_queue *
+				PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
+			memcpy(next_element, request, copy_count);
+			memcpy(queue_group->iq_element_array[path],
+				(u8 *)request + copy_count,
+				iu_length - copy_count);
+		}
+
+		iq_pi = (iq_pi + num_elements_needed) %
+			ctrl_info->num_elements_per_iq;
+
+		list_del(&io_request->request_list_entry);
+	}
+
+	if (iq_pi != queue_group->iq_pi_copy[path]) {
+		queue_group->iq_pi_copy[path] = iq_pi;
+		/*
+		 * This write notifies the controller that one or more IUs are
+		 * available to be processed.
+		 */
+		writel(iq_pi, queue_group->iq_pi[path]);
+	}
+
+	spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
+}
+
+static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
+	void *context)
+{
+	struct completion *waiting = context;
+
+	complete(waiting);
+}
+
+static int pqi_submit_raid_request_synchronous_with_io_request(
+	struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
+	unsigned long timeout_msecs)
+{
+	int rc = 0;
+	DECLARE_COMPLETION_ONSTACK(wait);
+
+	io_request->io_complete_callback = pqi_raid_synchronous_complete;
+	io_request->context = &wait;
+
+	pqi_start_io(ctrl_info,
+		&ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+		io_request);
+
+	if (timeout_msecs == NO_TIMEOUT) {
+		wait_for_completion_io(&wait);
+	} else {
+		if (!wait_for_completion_io_timeout(&wait,
+			msecs_to_jiffies(timeout_msecs))) {
+			dev_warn(&ctrl_info->pci_dev->dev,
+				"command timed out\n");
+			rc = -ETIMEDOUT;
+		}
+	}
+
+	return rc;
+}
+
+static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_iu_header *request, unsigned int flags,
+	struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
+{
+	int rc;
+	struct pqi_io_request *io_request;
+	unsigned long start_jiffies;
+	unsigned long msecs_blocked;
+	size_t iu_length;
+
+	/*
+	 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
+	 * are mutually exclusive.
+	 */
+
+	if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
+		if (down_interruptible(&ctrl_info->sync_request_sem))
+			return -ERESTARTSYS;
+	} else {
+		if (timeout_msecs == NO_TIMEOUT) {
+			down(&ctrl_info->sync_request_sem);
+		} else {
+			start_jiffies = jiffies;
+			if (down_timeout(&ctrl_info->sync_request_sem,
+				msecs_to_jiffies(timeout_msecs)))
+				return -ETIMEDOUT;
+			msecs_blocked =
+				jiffies_to_msecs(jiffies - start_jiffies);
+			if (msecs_blocked >= timeout_msecs)
+				return -ETIMEDOUT;
+			timeout_msecs -= msecs_blocked;
+		}
+	}
+
+	io_request = pqi_alloc_io_request(ctrl_info);
+
+	put_unaligned_le16(io_request->index,
+		&(((struct pqi_raid_path_request *)request)->request_id));
+
+	if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
+		((struct pqi_raid_path_request *)request)->error_index =
+			((struct pqi_raid_path_request *)request)->request_id;
+
+	iu_length = get_unaligned_le16(&request->iu_length) +
+		PQI_REQUEST_HEADER_LENGTH;
+	memcpy(io_request->iu, request, iu_length);
+
+	rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
+		io_request, timeout_msecs);
+
+	if (error_info) {
+		if (io_request->error_info)
+			memcpy(error_info, io_request->error_info,
+				sizeof(*error_info));
+		else
+			memset(error_info, 0, sizeof(*error_info));
+	} else if (rc == 0 && io_request->error_info) {
+		u8 scsi_status;
+		struct pqi_raid_error_info *raid_error_info;
+
+		raid_error_info = io_request->error_info;
+		scsi_status = raid_error_info->status;
+
+		if (scsi_status == SAM_STAT_CHECK_CONDITION &&
+			raid_error_info->data_out_result ==
+			PQI_DATA_IN_OUT_UNDERFLOW)
+			scsi_status = SAM_STAT_GOOD;
+
+		if (scsi_status != SAM_STAT_GOOD)
+			rc = -EIO;
+	}
+
+	pqi_free_io_request(io_request);
+
+	up(&ctrl_info->sync_request_sem);
+
+	return rc;
+}
+
+static int pqi_validate_admin_response(
+	struct pqi_general_admin_response *response, u8 expected_function_code)
+{
+	if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
+		return -EINVAL;
+
+	if (get_unaligned_le16(&response->header.iu_length) !=
+		PQI_GENERAL_ADMIN_IU_LENGTH)
+		return -EINVAL;
+
+	if (response->function_code != expected_function_code)
+		return -EINVAL;
+
+	if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int pqi_submit_admin_request_synchronous(
+	struct pqi_ctrl_info *ctrl_info,
+	struct pqi_general_admin_request *request,
+	struct pqi_general_admin_response *response)
+{
+	int rc;
+
+	pqi_submit_admin_request(ctrl_info, request);
+
+	rc = pqi_poll_for_admin_response(ctrl_info, response);
+
+	if (rc == 0)
+		rc = pqi_validate_admin_response(response,
+			request->function_code);
+
+	return rc;
+}
+
+static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	struct pqi_general_admin_request request;
+	struct pqi_general_admin_response response;
+	struct pqi_device_capability *capability;
+	struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
+
+	capability = kmalloc(sizeof(*capability), GFP_KERNEL);
+	if (!capability)
+		return -ENOMEM;
+
+	memset(&request, 0, sizeof(request));
+
+	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+		&request.header.iu_length);
+	request.function_code =
+		PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
+	put_unaligned_le32(sizeof(*capability),
+		&request.data.report_device_capability.buffer_length);
+
+	rc = pqi_map_single(ctrl_info->pci_dev,
+		&request.data.report_device_capability.sg_descriptor,
+		capability, sizeof(*capability),
+		PCI_DMA_FROMDEVICE);
+	if (rc)
+		goto out;
+
+	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+		&response);
+
+	pqi_pci_unmap(ctrl_info->pci_dev,
+		&request.data.report_device_capability.sg_descriptor, 1,
+		PCI_DMA_FROMDEVICE);
+
+	if (rc)
+		goto out;
+
+	if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
+		rc = -EIO;
+		goto out;
+	}
+
+	ctrl_info->max_inbound_queues =
+		get_unaligned_le16(&capability->max_inbound_queues);
+	ctrl_info->max_elements_per_iq =
+		get_unaligned_le16(&capability->max_elements_per_iq);
+	ctrl_info->max_iq_element_length =
+		get_unaligned_le16(&capability->max_iq_element_length)
+		* 16;
+	ctrl_info->max_outbound_queues =
+		get_unaligned_le16(&capability->max_outbound_queues);
+	ctrl_info->max_elements_per_oq =
+		get_unaligned_le16(&capability->max_elements_per_oq);
+	ctrl_info->max_oq_element_length =
+		get_unaligned_le16(&capability->max_oq_element_length)
+		* 16;
+
+	sop_iu_layer_descriptor =
+		&capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
+
+	ctrl_info->max_inbound_iu_length_per_firmware =
+		get_unaligned_le16(
+			&sop_iu_layer_descriptor->max_inbound_iu_length);
+	ctrl_info->inbound_spanning_supported =
+		sop_iu_layer_descriptor->inbound_spanning_supported;
+	ctrl_info->outbound_spanning_supported =
+		sop_iu_layer_descriptor->outbound_spanning_supported;
+
+out:
+	kfree(capability);
+
+	return rc;
+}
+
+static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
+{
+	if (ctrl_info->max_iq_element_length <
+		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"max. inbound queue element length of %d is less than the required length of %d\n",
+			ctrl_info->max_iq_element_length,
+			PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+		return -EINVAL;
+	}
+
+	if (ctrl_info->max_oq_element_length <
+		PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"max. outbound queue element length of %d is less than the required length of %d\n",
+			ctrl_info->max_oq_element_length,
+			PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
+		return -EINVAL;
+	}
+
+	if (ctrl_info->max_inbound_iu_length_per_firmware <
+		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"max. inbound IU length of %u is less than the min. required length of %d\n",
+			ctrl_info->max_inbound_iu_length_per_firmware,
+			PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+		return -EINVAL;
+	}
+
+	if (!ctrl_info->inbound_spanning_supported) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"the controller does not support inbound spanning\n");
+		return -EINVAL;
+	}
+
+	if (ctrl_info->outbound_spanning_supported) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"the controller supports outbound spanning but this driver does not\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
+	bool inbound_queue, u16 queue_id)
+{
+	struct pqi_general_admin_request request;
+	struct pqi_general_admin_response response;
+
+	memset(&request, 0, sizeof(request));
+	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+		&request.header.iu_length);
+	if (inbound_queue)
+		request.function_code =
+			PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
+	else
+		request.function_code =
+			PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
+	put_unaligned_le16(queue_id,
+		&request.data.delete_operational_queue.queue_id);
+
+	return pqi_submit_admin_request_synchronous(ctrl_info, &request,
+		&response);
+}
+
+static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	struct pqi_event_queue *event_queue;
+	struct pqi_general_admin_request request;
+	struct pqi_general_admin_response response;
+
+	event_queue = &ctrl_info->event_queue;
+
+	/*
+	 * Create OQ (Outbound Queue - device to host queue) to dedicate
+	 * to events.
+	 */
+	memset(&request, 0, sizeof(request));
+	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+		&request.header.iu_length);
+	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
+	put_unaligned_le16(event_queue->oq_id,
+		&request.data.create_operational_oq.queue_id);
+	put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
+		&request.data.create_operational_oq.element_array_addr);
+	put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
+		&request.data.create_operational_oq.pi_addr);
+	put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
+		&request.data.create_operational_oq.num_elements);
+	put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
+		&request.data.create_operational_oq.element_length);
+	request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
+	put_unaligned_le16(event_queue->int_msg_num,
+		&request.data.create_operational_oq.int_msg_num);
+
+	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+		&response);
+	if (rc)
+		return rc;
+
+	event_queue->oq_ci = ctrl_info->iomem_base +
+		PQI_DEVICE_REGISTERS_OFFSET +
+		get_unaligned_le64(
+			&response.data.create_operational_oq.oq_ci_offset);
+
+	return 0;
+}
+
+static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned int i;
+	int rc;
+	struct pqi_queue_group *queue_group;
+	struct pqi_general_admin_request request;
+	struct pqi_general_admin_response response;
+
+	i = ctrl_info->num_active_queue_groups;
+	queue_group = &ctrl_info->queue_groups[i];
+
+	/*
+	 * Create IQ (Inbound Queue - host to device queue) for
+	 * RAID path.
+	 */
+	memset(&request, 0, sizeof(request));
+	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+		&request.header.iu_length);
+	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
+	put_unaligned_le16(queue_group->iq_id[RAID_PATH],
+		&request.data.create_operational_iq.queue_id);
+	put_unaligned_le64(
+		(u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
+		&request.data.create_operational_iq.element_array_addr);
+	put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
+		&request.data.create_operational_iq.ci_addr);
+	put_unaligned_le16(ctrl_info->num_elements_per_iq,
+		&request.data.create_operational_iq.num_elements);
+	put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
+		&request.data.create_operational_iq.element_length);
+	request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
+
+	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+		&response);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error creating inbound RAID queue\n");
+		return rc;
+	}
+
+	queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
+		PQI_DEVICE_REGISTERS_OFFSET +
+		get_unaligned_le64(
+			&response.data.create_operational_iq.iq_pi_offset);
+
+	/*
+	 * Create IQ (Inbound Queue - host to device queue) for
+	 * Advanced I/O (AIO) path.
+	 */
+	memset(&request, 0, sizeof(request));
+	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+		&request.header.iu_length);
+	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
+	put_unaligned_le16(queue_group->iq_id[AIO_PATH],
+		&request.data.create_operational_iq.queue_id);
+	put_unaligned_le64((u64)queue_group->
+		iq_element_array_bus_addr[AIO_PATH],
+		&request.data.create_operational_iq.element_array_addr);
+	put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
+		&request.data.create_operational_iq.ci_addr);
+	put_unaligned_le16(ctrl_info->num_elements_per_iq,
+		&request.data.create_operational_iq.num_elements);
+	put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
+		&request.data.create_operational_iq.element_length);
+	request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
+
+	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+		&response);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error creating inbound AIO queue\n");
+		goto delete_inbound_queue_raid;
+	}
+
+	queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
+		PQI_DEVICE_REGISTERS_OFFSET +
+		get_unaligned_le64(
+			&response.data.create_operational_iq.iq_pi_offset);
+
+	/*
+	 * Designate the 2nd IQ as the AIO path.  By default, all IQs are
+	 * assumed to be for RAID path I/O unless we change the queue's
+	 * property.
+	 */
+	memset(&request, 0, sizeof(request));
+	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+		&request.header.iu_length);
+	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
+	put_unaligned_le16(queue_group->iq_id[AIO_PATH],
+		&request.data.change_operational_iq_properties.queue_id);
+	put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
+		&request.data.change_operational_iq_properties.vendor_specific);
+
+	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+		&response);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error changing queue property\n");
+		goto delete_inbound_queue_aio;
+	}
+
+	/*
+	 * Create OQ (Outbound Queue - device to host queue).
+	 */
+	memset(&request, 0, sizeof(request));
+	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
+	put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
+		&request.header.iu_length);
+	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
+	put_unaligned_le16(queue_group->oq_id,
+		&request.data.create_operational_oq.queue_id);
+	put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
+		&request.data.create_operational_oq.element_array_addr);
+	put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
+		&request.data.create_operational_oq.pi_addr);
+	put_unaligned_le16(ctrl_info->num_elements_per_oq,
+		&request.data.create_operational_oq.num_elements);
+	put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
+		&request.data.create_operational_oq.element_length);
+	request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
+	put_unaligned_le16(queue_group->int_msg_num,
+		&request.data.create_operational_oq.int_msg_num);
+
+	rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
+		&response);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error creating outbound queue\n");
+		goto delete_inbound_queue_aio;
+	}
+
+	queue_group->oq_ci = ctrl_info->iomem_base +
+		PQI_DEVICE_REGISTERS_OFFSET +
+		get_unaligned_le64(
+			&response.data.create_operational_oq.oq_ci_offset);
+
+	ctrl_info->num_active_queue_groups++;
+
+	return 0;
+
+delete_inbound_queue_aio:
+	pqi_delete_operational_queue(ctrl_info, true,
+		queue_group->iq_id[AIO_PATH]);
+
+delete_inbound_queue_raid:
+	pqi_delete_operational_queue(ctrl_info, true,
+		queue_group->iq_id[RAID_PATH]);
+
+	return rc;
+}
+
+static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	unsigned int i;
+
+	rc = pqi_create_event_queue(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error creating event queue\n");
+		return rc;
+	}
+
+	for (i = 0; i < ctrl_info->num_queue_groups; i++) {
+		rc = pqi_create_queue_group(ctrl_info);
+		if (rc) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"error creating queue group number %u/%u\n",
+				i, ctrl_info->num_queue_groups);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH	\
+	(offsetof(struct pqi_event_config, descriptors) + \
+	(PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
+
+static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	unsigned int i;
+	struct pqi_event_config *event_config;
+	struct pqi_general_management_request request;
+
+	event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+		GFP_KERNEL);
+	if (!event_config)
+		return -ENOMEM;
+
+	memset(&request, 0, sizeof(request));
+
+	request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
+	put_unaligned_le16(offsetof(struct pqi_general_management_request,
+		data.report_event_configuration.sg_descriptors[1]) -
+		PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
+	put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+		&request.data.report_event_configuration.buffer_length);
+
+	rc = pqi_map_single(ctrl_info->pci_dev,
+		request.data.report_event_configuration.sg_descriptors,
+		event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+		PCI_DMA_FROMDEVICE);
+	if (rc)
+		goto out;
+
+	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+		0, NULL, NO_TIMEOUT);
+
+	pqi_pci_unmap(ctrl_info->pci_dev,
+		request.data.report_event_configuration.sg_descriptors, 1,
+		PCI_DMA_FROMDEVICE);
+
+	if (rc)
+		goto out;
+
+	for (i = 0; i < event_config->num_event_descriptors; i++)
+		put_unaligned_le16(ctrl_info->event_queue.oq_id,
+			&event_config->descriptors[i].oq_id);
+
+	memset(&request, 0, sizeof(request));
+
+	request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
+	put_unaligned_le16(offsetof(struct pqi_general_management_request,
+		data.report_event_configuration.sg_descriptors[1]) -
+		PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
+	put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+		&request.data.report_event_configuration.buffer_length);
+
+	rc = pqi_map_single(ctrl_info->pci_dev,
+		request.data.report_event_configuration.sg_descriptors,
+		event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
+		PCI_DMA_TODEVICE);
+	if (rc)
+		goto out;
+
+	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
+		NULL, NO_TIMEOUT);
+
+	pqi_pci_unmap(ctrl_info->pci_dev,
+		request.data.report_event_configuration.sg_descriptors, 1,
+		PCI_DMA_TODEVICE);
+
+out:
+	kfree(event_config);
+
+	return rc;
+}
+
+static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned int i;
+	struct device *dev;
+	size_t sg_chain_buffer_length;
+	struct pqi_io_request *io_request;
+
+	if (!ctrl_info->io_request_pool)
+		return;
+
+	dev = &ctrl_info->pci_dev->dev;
+	sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
+	io_request = ctrl_info->io_request_pool;
+
+	for (i = 0; i < ctrl_info->max_io_slots; i++) {
+		kfree(io_request->iu);
+		if (!io_request->sg_chain_buffer)
+			break;
+		dma_free_coherent(dev, sg_chain_buffer_length,
+			io_request->sg_chain_buffer,
+			io_request->sg_chain_buffer_dma_handle);
+		io_request++;
+	}
+
+	kfree(ctrl_info->io_request_pool);
+	ctrl_info->io_request_pool = NULL;
+}
+
+static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
+{
+	ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
+		ctrl_info->error_buffer_length,
+		&ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
+
+	if (!ctrl_info->error_buffer)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
+{
+	unsigned int i;
+	void *sg_chain_buffer;
+	size_t sg_chain_buffer_length;
+	dma_addr_t sg_chain_buffer_dma_handle;
+	struct device *dev;
+	struct pqi_io_request *io_request;
+
+	ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
+		sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
+
+	if (!ctrl_info->io_request_pool) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"failed to allocate I/O request pool\n");
+		goto error;
+	}
+
+	dev = &ctrl_info->pci_dev->dev;
+	sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
+	io_request = ctrl_info->io_request_pool;
+
+	for (i = 0; i < ctrl_info->max_io_slots; i++) {
+		io_request->iu =
+			kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
+
+		if (!io_request->iu) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"failed to allocate IU buffers\n");
+			goto error;
+		}
+
+		sg_chain_buffer = dma_alloc_coherent(dev,
+			sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
+			GFP_KERNEL);
+
+		if (!sg_chain_buffer) {
+			dev_err(&ctrl_info->pci_dev->dev,
+				"failed to allocate PQI scatter-gather chain buffers\n");
+			goto error;
+		}
+
+		io_request->index = i;
+		io_request->sg_chain_buffer = sg_chain_buffer;
+		io_request->sg_chain_buffer_dma_handle =
+			sg_chain_buffer_dma_handle;
+		io_request++;
+	}
+
+	return 0;
+
+error:
+	pqi_free_all_io_requests(ctrl_info);
+
+	return -ENOMEM;
+}
+
+/*
+ * Calculate required resources that are sized based on max. outstanding
+ * requests and max. transfer size.
+ */
+
+static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
+{
+	u32 max_transfer_size;
+	u32 max_sg_entries;
+
+	ctrl_info->scsi_ml_can_queue =
+		ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
+	ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
+
+	ctrl_info->error_buffer_length =
+		ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
+
+	max_transfer_size =
+		min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
+
+	max_sg_entries = max_transfer_size / PAGE_SIZE;
+
+	/* +1 to cover when the buffer is not page-aligned. */
+	max_sg_entries++;
+
+	max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
+
+	max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
+
+	ctrl_info->sg_chain_buffer_length =
+		max_sg_entries * sizeof(struct pqi_sg_descriptor);
+	ctrl_info->sg_tablesize = max_sg_entries;
+	ctrl_info->max_sectors = max_transfer_size / 512;
+}
+
+static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
+{
+	int num_cpus;
+	int max_queue_groups;
+	int num_queue_groups;
+	u16 num_elements_per_iq;
+	u16 num_elements_per_oq;
+
+	max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
+		ctrl_info->max_outbound_queues - 1);
+	max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
+
+	num_cpus = num_online_cpus();
+	num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
+	num_queue_groups = min(num_queue_groups, max_queue_groups);
+
+	ctrl_info->num_queue_groups = num_queue_groups;
+
+	/*
+	 * Make sure that the max. inbound IU length is an even multiple
+	 * of our inbound element length.
+	 */
+	ctrl_info->max_inbound_iu_length =
+		(ctrl_info->max_inbound_iu_length_per_firmware /
+		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
+		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
+
+	num_elements_per_iq =
+		(ctrl_info->max_inbound_iu_length /
+		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+	/* Add one because one element in each queue is unusable. */
+	num_elements_per_iq++;
+
+	num_elements_per_iq = min(num_elements_per_iq,
+		ctrl_info->max_elements_per_iq);
+
+	num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
+	num_elements_per_oq = min(num_elements_per_oq,
+		ctrl_info->max_elements_per_oq);
+
+	ctrl_info->num_elements_per_iq = num_elements_per_iq;
+	ctrl_info->num_elements_per_oq = num_elements_per_oq;
+
+	ctrl_info->max_sg_per_iu =
+		((ctrl_info->max_inbound_iu_length -
+		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
+		sizeof(struct pqi_sg_descriptor)) +
+		PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
+}
+
+static inline void pqi_set_sg_descriptor(
+	struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
+{
+	u64 address = (u64)sg_dma_address(sg);
+	unsigned int length = sg_dma_len(sg);
+
+	put_unaligned_le64(address, &sg_descriptor->address);
+	put_unaligned_le32(length, &sg_descriptor->length);
+	put_unaligned_le32(0, &sg_descriptor->flags);
+}
+
+static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
+	struct pqi_io_request *io_request)
+{
+	int i;
+	u16 iu_length;
+	int sg_count;
+	bool chained;
+	unsigned int num_sg_in_iu;
+	unsigned int max_sg_per_iu;
+	struct scatterlist *sg;
+	struct pqi_sg_descriptor *sg_descriptor;
+
+	sg_count = scsi_dma_map(scmd);
+	if (sg_count < 0)
+		return sg_count;
+
+	iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
+		PQI_REQUEST_HEADER_LENGTH;
+
+	if (sg_count == 0)
+		goto out;
+
+	sg = scsi_sglist(scmd);
+	sg_descriptor = request->sg_descriptors;
+	max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
+	chained = false;
+	num_sg_in_iu = 0;
+	i = 0;
+
+	while (1) {
+		pqi_set_sg_descriptor(sg_descriptor, sg);
+		if (!chained)
+			num_sg_in_iu++;
+		i++;
+		if (i == sg_count)
+			break;
+		sg_descriptor++;
+		if (i == max_sg_per_iu) {
+			put_unaligned_le64(
+				(u64)io_request->sg_chain_buffer_dma_handle,
+				&sg_descriptor->address);
+			put_unaligned_le32((sg_count - num_sg_in_iu)
+				* sizeof(*sg_descriptor),
+				&sg_descriptor->length);
+			put_unaligned_le32(CISS_SG_CHAIN,
+				&sg_descriptor->flags);
+			chained = true;
+			num_sg_in_iu++;
+			sg_descriptor = io_request->sg_chain_buffer;
+		}
+		sg = sg_next(sg);
+	}
+
+	put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+	request->partial = chained;
+	iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+
+out:
+	put_unaligned_le16(iu_length, &request->header.iu_length);
+
+	return 0;
+}
+
+static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
+	struct pqi_io_request *io_request)
+{
+	int i;
+	u16 iu_length;
+	int sg_count;
+	bool chained;
+	unsigned int num_sg_in_iu;
+	unsigned int max_sg_per_iu;
+	struct scatterlist *sg;
+	struct pqi_sg_descriptor *sg_descriptor;
+
+	sg_count = scsi_dma_map(scmd);
+	if (sg_count < 0)
+		return sg_count;
+
+	iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
+		PQI_REQUEST_HEADER_LENGTH;
+	num_sg_in_iu = 0;
+
+	if (sg_count == 0)
+		goto out;
+
+	sg = scsi_sglist(scmd);
+	sg_descriptor = request->sg_descriptors;
+	max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
+	chained = false;
+	i = 0;
+
+	while (1) {
+		pqi_set_sg_descriptor(sg_descriptor, sg);
+		if (!chained)
+			num_sg_in_iu++;
+		i++;
+		if (i == sg_count)
+			break;
+		sg_descriptor++;
+		if (i == max_sg_per_iu) {
+			put_unaligned_le64(
+				(u64)io_request->sg_chain_buffer_dma_handle,
+				&sg_descriptor->address);
+			put_unaligned_le32((sg_count - num_sg_in_iu)
+				* sizeof(*sg_descriptor),
+				&sg_descriptor->length);
+			put_unaligned_le32(CISS_SG_CHAIN,
+				&sg_descriptor->flags);
+			chained = true;
+			num_sg_in_iu++;
+			sg_descriptor = io_request->sg_chain_buffer;
+		}
+		sg = sg_next(sg);
+	}
+
+	put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
+	request->partial = chained;
+	iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
+
+out:
+	put_unaligned_le16(iu_length, &request->header.iu_length);
+	request->num_sg_descriptors = num_sg_in_iu;
+
+	return 0;
+}
+
+static void pqi_raid_io_complete(struct pqi_io_request *io_request,
+	void *context)
+{
+	struct scsi_cmnd *scmd;
+
+	scmd = io_request->scmd;
+	pqi_free_io_request(io_request);
+	scsi_dma_unmap(scmd);
+	pqi_scsi_done(scmd);
+}
+
+static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+	struct pqi_queue_group *queue_group)
+{
+	int rc;
+	size_t cdb_length;
+	struct pqi_io_request *io_request;
+	struct pqi_raid_path_request *request;
+
+	io_request = pqi_alloc_io_request(ctrl_info);
+	io_request->io_complete_callback = pqi_raid_io_complete;
+	io_request->scmd = scmd;
+
+	scmd->host_scribble = (unsigned char *)io_request;
+
+	request = io_request->iu;
+	memset(request, 0,
+		offsetof(struct pqi_raid_path_request, sg_descriptors));
+
+	request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
+	put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
+	request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+	put_unaligned_le16(io_request->index, &request->request_id);
+	request->error_index = request->request_id;
+	memcpy(request->lun_number, device->scsi3addr,
+		sizeof(request->lun_number));
+
+	cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
+	memcpy(request->cdb, scmd->cmnd, cdb_length);
+
+	switch (cdb_length) {
+	case 6:
+	case 10:
+	case 12:
+	case 16:
+		/* No bytes in the Additional CDB bytes field */
+		request->additional_cdb_bytes_usage =
+			SOP_ADDITIONAL_CDB_BYTES_0;
+		break;
+	case 20:
+		/* 4 bytes in the Additional cdb field */
+		request->additional_cdb_bytes_usage =
+			SOP_ADDITIONAL_CDB_BYTES_4;
+		break;
+	case 24:
+		/* 8 bytes in the Additional cdb field */
+		request->additional_cdb_bytes_usage =
+			SOP_ADDITIONAL_CDB_BYTES_8;
+		break;
+	case 28:
+		/* 12 bytes in the Additional cdb field */
+		request->additional_cdb_bytes_usage =
+			SOP_ADDITIONAL_CDB_BYTES_12;
+		break;
+	case 32:
+	default:
+		/* 16 bytes in the Additional cdb field */
+		request->additional_cdb_bytes_usage =
+			SOP_ADDITIONAL_CDB_BYTES_16;
+		break;
+	}
+
+	switch (scmd->sc_data_direction) {
+	case DMA_TO_DEVICE:
+		request->data_direction = SOP_READ_FLAG;
+		break;
+	case DMA_FROM_DEVICE:
+		request->data_direction = SOP_WRITE_FLAG;
+		break;
+	case DMA_NONE:
+		request->data_direction = SOP_NO_DIRECTION_FLAG;
+		break;
+	case DMA_BIDIRECTIONAL:
+		request->data_direction = SOP_BIDIRECTIONAL;
+		break;
+	default:
+		dev_err(&ctrl_info->pci_dev->dev,
+			"unknown data direction: %d\n",
+			scmd->sc_data_direction);
+		WARN_ON(scmd->sc_data_direction);
+		break;
+	}
+
+	rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
+	if (rc) {
+		pqi_free_io_request(io_request);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
+
+	return 0;
+}
+
+static void pqi_aio_io_complete(struct pqi_io_request *io_request,
+	void *context)
+{
+	struct scsi_cmnd *scmd;
+
+	scmd = io_request->scmd;
+	scsi_dma_unmap(scmd);
+	if (io_request->status == -EAGAIN)
+		set_host_byte(scmd, DID_IMM_RETRY);
+	pqi_free_io_request(io_request);
+	pqi_scsi_done(scmd);
+}
+
+static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
+	struct pqi_queue_group *queue_group)
+{
+	return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
+		scmd->cmnd, scmd->cmd_len, queue_group, NULL);
+}
+
+static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
+	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
+	unsigned int cdb_length, struct pqi_queue_group *queue_group,
+	struct pqi_encryption_info *encryption_info)
+{
+	int rc;
+	struct pqi_io_request *io_request;
+	struct pqi_aio_path_request *request;
+
+	io_request = pqi_alloc_io_request(ctrl_info);
+	io_request->io_complete_callback = pqi_aio_io_complete;
+	io_request->scmd = scmd;
+
+	scmd->host_scribble = (unsigned char *)io_request;
+
+	request = io_request->iu;
+	memset(request, 0,
+		offsetof(struct pqi_raid_path_request, sg_descriptors));
+
+	request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
+	put_unaligned_le32(aio_handle, &request->nexus_id);
+	put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
+	request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+	put_unaligned_le16(io_request->index, &request->request_id);
+	request->error_index = request->request_id;
+	if (cdb_length > sizeof(request->cdb))
+		cdb_length = sizeof(request->cdb);
+	request->cdb_length = cdb_length;
+	memcpy(request->cdb, cdb, cdb_length);
+
+	switch (scmd->sc_data_direction) {
+	case DMA_TO_DEVICE:
+		request->data_direction = SOP_READ_FLAG;
+		break;
+	case DMA_FROM_DEVICE:
+		request->data_direction = SOP_WRITE_FLAG;
+		break;
+	case DMA_NONE:
+		request->data_direction = SOP_NO_DIRECTION_FLAG;
+		break;
+	case DMA_BIDIRECTIONAL:
+		request->data_direction = SOP_BIDIRECTIONAL;
+		break;
+	default:
+		dev_err(&ctrl_info->pci_dev->dev,
+			"unknown data direction: %d\n",
+			scmd->sc_data_direction);
+		WARN_ON(scmd->sc_data_direction);
+		break;
+	}
+
+	if (encryption_info) {
+		request->encryption_enable = true;
+		put_unaligned_le16(encryption_info->data_encryption_key_index,
+			&request->data_encryption_key_index);
+		put_unaligned_le32(encryption_info->encrypt_tweak_lower,
+			&request->encrypt_tweak_lower);
+		put_unaligned_le32(encryption_info->encrypt_tweak_upper,
+			&request->encrypt_tweak_upper);
+	}
+
+	rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
+	if (rc) {
+		pqi_free_io_request(io_request);
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
+
+	return 0;
+}
+
+static int pqi_scsi_queue_command(struct Scsi_Host *shost,
+	struct scsi_cmnd *scmd)
+{
+	int rc;
+	struct pqi_ctrl_info *ctrl_info;
+	struct pqi_scsi_dev *device;
+	u16 hwq;
+	struct pqi_queue_group *queue_group;
+	bool raid_bypassed;
+
+	device = scmd->device->hostdata;
+	ctrl_info = shost_to_hba(shost);
+
+	if (pqi_ctrl_offline(ctrl_info)) {
+		set_host_byte(scmd, DID_NO_CONNECT);
+		pqi_scsi_done(scmd);
+		return 0;
+	}
+
+	/*
+	 * This is necessary because the SML doesn't zero out this field during
+	 * error recovery.
+	 */
+	scmd->result = 0;
+
+	hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
+	if (hwq >= ctrl_info->num_queue_groups)
+		hwq = 0;
+
+	queue_group = &ctrl_info->queue_groups[hwq];
+
+	if (pqi_is_logical_device(device)) {
+		raid_bypassed = false;
+		if (device->offload_enabled &&
+			scmd->request->cmd_type == REQ_TYPE_FS) {
+			rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
+				scmd, queue_group);
+			if (rc == 0 ||
+				rc == SCSI_MLQUEUE_HOST_BUSY ||
+				rc == SAM_STAT_CHECK_CONDITION ||
+				rc == SAM_STAT_RESERVATION_CONFLICT)
+				raid_bypassed = true;
+		}
+		if (!raid_bypassed)
+			rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
+				queue_group);
+	} else {
+		if (device->aio_enabled)
+			rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
+				queue_group);
+		else
+			rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
+				queue_group);
+	}
+
+	return rc;
+}
+
+static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
+	void *context)
+{
+	struct completion *waiting = context;
+
+	complete(waiting);
+}
+
+#define PQI_LUN_RESET_TIMEOUT_SECS	10
+
+static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device, struct completion *wait)
+{
+	int rc;
+	unsigned int wait_secs = 0;
+
+	while (1) {
+		if (wait_for_completion_io_timeout(wait,
+			PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
+			rc = 0;
+			break;
+		}
+
+		pqi_check_ctrl_health(ctrl_info);
+		if (pqi_ctrl_offline(ctrl_info)) {
+			rc = -ETIMEDOUT;
+			break;
+		}
+
+		wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
+
+		dev_err(&ctrl_info->pci_dev->dev,
+			"resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
+			ctrl_info->scsi_host->host_no, device->bus,
+			device->target, device->lun, wait_secs);
+	}
+
+	return rc;
+}
+
+static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	int rc;
+	struct pqi_io_request *io_request;
+	DECLARE_COMPLETION_ONSTACK(wait);
+	struct pqi_task_management_request *request;
+
+	down(&ctrl_info->lun_reset_sem);
+
+	io_request = pqi_alloc_io_request(ctrl_info);
+	io_request->io_complete_callback = pqi_lun_reset_complete;
+	io_request->context = &wait;
+
+	request = io_request->iu;
+	memset(request, 0, sizeof(*request));
+
+	request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
+	put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
+		&request->header.iu_length);
+	put_unaligned_le16(io_request->index, &request->request_id);
+	memcpy(request->lun_number, device->scsi3addr,
+		sizeof(request->lun_number));
+	request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
+
+	pqi_start_io(ctrl_info,
+		&ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
+		io_request);
+
+	rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
+	if (rc == 0)
+		rc = io_request->status;
+
+	pqi_free_io_request(io_request);
+	up(&ctrl_info->lun_reset_sem);
+
+	return rc;
+}
+
+/* Performs a reset at the LUN level. */
+
+static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
+	struct pqi_scsi_dev *device)
+{
+	int rc;
+
+	pqi_check_ctrl_health(ctrl_info);
+	if (pqi_ctrl_offline(ctrl_info))
+		return FAILED;
+
+	rc = pqi_lun_reset(ctrl_info, device);
+
+	return rc == 0 ? SUCCESS : FAILED;
+}
+
+static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
+{
+	int rc;
+	struct pqi_ctrl_info *ctrl_info;
+	struct pqi_scsi_dev *device;
+
+	ctrl_info = shost_to_hba(scmd->device->host);
+	device = scmd->device->hostdata;
+
+	dev_err(&ctrl_info->pci_dev->dev,
+		"resetting scsi %d:%d:%d:%d\n",
+		ctrl_info->scsi_host->host_no,
+		device->bus, device->target, device->lun);
+
+	rc = pqi_device_reset(ctrl_info, device);
+
+	dev_err(&ctrl_info->pci_dev->dev,
+		"reset of scsi %d:%d:%d:%d: %s\n",
+		ctrl_info->scsi_host->host_no,
+		device->bus, device->target, device->lun,
+		rc == SUCCESS ? "SUCCESS" : "FAILED");
+
+	return rc;
+}
+
+static int pqi_slave_alloc(struct scsi_device *sdev)
+{
+	struct pqi_scsi_dev *device;
+	unsigned long flags;
+	struct pqi_ctrl_info *ctrl_info;
+	struct scsi_target *starget;
+	struct sas_rphy *rphy;
+
+	ctrl_info = shost_to_hba(sdev->host);
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
+		starget = scsi_target(sdev);
+		rphy = target_to_rphy(starget);
+		device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
+		if (device) {
+			device->target = sdev_id(sdev);
+			device->lun = sdev->lun;
+			device->target_lun_valid = true;
+		}
+	} else {
+		device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
+			sdev_id(sdev), sdev->lun);
+	}
+
+	if (device && device->expose_device) {
+		sdev->hostdata = device;
+		device->sdev = sdev;
+		if (device->queue_depth) {
+			device->advertised_queue_depth = device->queue_depth;
+			scsi_change_queue_depth(sdev,
+				device->advertised_queue_depth);
+		}
+	}
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	return 0;
+}
+
+static int pqi_slave_configure(struct scsi_device *sdev)
+{
+	struct pqi_scsi_dev *device;
+
+	device = sdev->hostdata;
+	if (!device->expose_device)
+		sdev->no_uld_attach = true;
+
+	return 0;
+}
+
+static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
+	void __user *arg)
+{
+	struct pci_dev *pci_dev;
+	u32 subsystem_vendor;
+	u32 subsystem_device;
+	cciss_pci_info_struct pciinfo;
+
+	if (!arg)
+		return -EINVAL;
+
+	pci_dev = ctrl_info->pci_dev;
+
+	pciinfo.domain = pci_domain_nr(pci_dev->bus);
+	pciinfo.bus = pci_dev->bus->number;
+	pciinfo.dev_fn = pci_dev->devfn;
+	subsystem_vendor = pci_dev->subsystem_vendor;
+	subsystem_device = pci_dev->subsystem_device;
+	pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
+		subsystem_vendor;
+
+	if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int pqi_getdrivver_ioctl(void __user *arg)
+{
+	u32 version;
+
+	if (!arg)
+		return -EINVAL;
+
+	version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
+		(DRIVER_RELEASE << 16) | DRIVER_REVISION;
+
+	if (copy_to_user(arg, &version, sizeof(version)))
+		return -EFAULT;
+
+	return 0;
+}
+
+struct ciss_error_info {
+	u8	scsi_status;
+	int	command_status;
+	size_t	sense_data_length;
+};
+
+static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
+	struct ciss_error_info *ciss_error_info)
+{
+	int ciss_cmd_status;
+	size_t sense_data_length;
+
+	switch (pqi_error_info->data_out_result) {
+	case PQI_DATA_IN_OUT_GOOD:
+		ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
+		break;
+	case PQI_DATA_IN_OUT_UNDERFLOW:
+		ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
+		break;
+	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
+		ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
+		break;
+	case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
+	case PQI_DATA_IN_OUT_BUFFER_ERROR:
+	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
+	case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
+	case PQI_DATA_IN_OUT_ERROR:
+		ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
+		break;
+	case PQI_DATA_IN_OUT_HARDWARE_ERROR:
+	case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
+	case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
+	case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
+	case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
+	case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
+	case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
+	case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
+	case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
+	case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
+		ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
+		break;
+	case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
+		ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
+		break;
+	case PQI_DATA_IN_OUT_ABORTED:
+		ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
+		break;
+	case PQI_DATA_IN_OUT_TIMEOUT:
+		ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
+		break;
+	default:
+		ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
+		break;
+	}
+
+	sense_data_length =
+		get_unaligned_le16(&pqi_error_info->sense_data_length);
+	if (sense_data_length == 0)
+		sense_data_length =
+		get_unaligned_le16(&pqi_error_info->response_data_length);
+	if (sense_data_length)
+		if (sense_data_length > sizeof(pqi_error_info->data))
+			sense_data_length = sizeof(pqi_error_info->data);
+
+	ciss_error_info->scsi_status = pqi_error_info->status;
+	ciss_error_info->command_status = ciss_cmd_status;
+	ciss_error_info->sense_data_length = sense_data_length;
+}
+
+static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
+{
+	int rc;
+	char *kernel_buffer = NULL;
+	u16 iu_length;
+	size_t sense_data_length;
+	IOCTL_Command_struct iocommand;
+	struct pqi_raid_path_request request;
+	struct pqi_raid_error_info pqi_error_info;
+	struct ciss_error_info ciss_error_info;
+
+	if (pqi_ctrl_offline(ctrl_info))
+		return -ENXIO;
+	if (!arg)
+		return -EINVAL;
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+	if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
+		return -EFAULT;
+	if (iocommand.buf_size < 1 &&
+		iocommand.Request.Type.Direction != XFER_NONE)
+		return -EINVAL;
+	if (iocommand.Request.CDBLen > sizeof(request.cdb))
+		return -EINVAL;
+	if (iocommand.Request.Type.Type != TYPE_CMD)
+		return -EINVAL;
+
+	switch (iocommand.Request.Type.Direction) {
+	case XFER_NONE:
+	case XFER_WRITE:
+	case XFER_READ:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (iocommand.buf_size > 0) {
+		kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
+		if (!kernel_buffer)
+			return -ENOMEM;
+		if (iocommand.Request.Type.Direction & XFER_WRITE) {
+			if (copy_from_user(kernel_buffer, iocommand.buf,
+				iocommand.buf_size)) {
+				rc = -EFAULT;
+				goto out;
+			}
+		} else {
+			memset(kernel_buffer, 0, iocommand.buf_size);
+		}
+	}
+
+	memset(&request, 0, sizeof(request));
+
+	request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
+	iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
+		PQI_REQUEST_HEADER_LENGTH;
+	memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
+		sizeof(request.lun_number));
+	memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
+	request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
+
+	switch (iocommand.Request.Type.Direction) {
+	case XFER_NONE:
+		request.data_direction = SOP_NO_DIRECTION_FLAG;
+		break;
+	case XFER_WRITE:
+		request.data_direction = SOP_WRITE_FLAG;
+		break;
+	case XFER_READ:
+		request.data_direction = SOP_READ_FLAG;
+		break;
+	}
+
+	request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
+
+	if (iocommand.buf_size > 0) {
+		put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
+
+		rc = pqi_map_single(ctrl_info->pci_dev,
+			&request.sg_descriptors[0], kernel_buffer,
+			iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+		if (rc)
+			goto out;
+
+		iu_length += sizeof(request.sg_descriptors[0]);
+	}
+
+	put_unaligned_le16(iu_length, &request.header.iu_length);
+
+	rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
+		PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
+
+	if (iocommand.buf_size > 0)
+		pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
+			PCI_DMA_BIDIRECTIONAL);
+
+	memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
+
+	if (rc == 0) {
+		pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
+		iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
+		iocommand.error_info.CommandStatus =
+			ciss_error_info.command_status;
+		sense_data_length = ciss_error_info.sense_data_length;
+		if (sense_data_length) {
+			if (sense_data_length >
+				sizeof(iocommand.error_info.SenseInfo))
+				sense_data_length =
+					sizeof(iocommand.error_info.SenseInfo);
+			memcpy(iocommand.error_info.SenseInfo,
+				pqi_error_info.data, sense_data_length);
+			iocommand.error_info.SenseLen = sense_data_length;
+		}
+	}
+
+	if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
+		rc = -EFAULT;
+		goto out;
+	}
+
+	if (rc == 0 && iocommand.buf_size > 0 &&
+		(iocommand.Request.Type.Direction & XFER_READ)) {
+		if (copy_to_user(iocommand.buf, kernel_buffer,
+			iocommand.buf_size)) {
+			rc = -EFAULT;
+		}
+	}
+
+out:
+	kfree(kernel_buffer);
+
+	return rc;
+}
+
+static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+	int rc;
+	struct pqi_ctrl_info *ctrl_info;
+
+	ctrl_info = shost_to_hba(sdev->host);
+
+	switch (cmd) {
+	case CCISS_DEREGDISK:
+	case CCISS_REGNEWDISK:
+	case CCISS_REGNEWD:
+		rc = pqi_scan_scsi_devices(ctrl_info);
+		break;
+	case CCISS_GETPCIINFO:
+		rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
+		break;
+	case CCISS_GETDRIVVER:
+		rc = pqi_getdrivver_ioctl(arg);
+		break;
+	case CCISS_PASSTHRU:
+		rc = pqi_passthru_ioctl(ctrl_info, arg);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static ssize_t pqi_version_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	ssize_t count = 0;
+	struct Scsi_Host *shost;
+	struct pqi_ctrl_info *ctrl_info;
+
+	shost = class_to_shost(dev);
+	ctrl_info = shost_to_hba(shost);
+
+	count += snprintf(buffer + count, PAGE_SIZE - count,
+		"  driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
+
+	count += snprintf(buffer + count, PAGE_SIZE - count,
+		"firmware: %s\n", ctrl_info->firmware_version);
+
+	return count;
+}
+
+static ssize_t pqi_host_rescan_store(struct device *dev,
+	struct device_attribute *attr, const char *buffer, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+
+	pqi_scan_start(shost);
+
+	return count;
+}
+
+static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
+
+static struct device_attribute *pqi_shost_attrs[] = {
+	&dev_attr_version,
+	&dev_attr_rescan,
+	NULL
+};
+
+static ssize_t pqi_sas_address_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct pqi_ctrl_info *ctrl_info;
+	struct scsi_device *sdev;
+	struct pqi_scsi_dev *device;
+	unsigned long flags;
+	u64 sas_address;
+
+	sdev = to_scsi_device(dev);
+	ctrl_info = shost_to_hba(sdev->host);
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	device = sdev->hostdata;
+	if (pqi_is_logical_device(device)) {
+		spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
+			flags);
+		return -ENODEV;
+	}
+	sas_address = device->sas_address;
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
+}
+
+static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
+	struct device_attribute *attr, char *buffer)
+{
+	struct pqi_ctrl_info *ctrl_info;
+	struct scsi_device *sdev;
+	struct pqi_scsi_dev *device;
+	unsigned long flags;
+
+	sdev = to_scsi_device(dev);
+	ctrl_info = shost_to_hba(sdev->host);
+
+	spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+	device = sdev->hostdata;
+	buffer[0] = device->offload_enabled ? '1' : '0';
+	buffer[1] = '\n';
+	buffer[2] = '\0';
+
+	spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+	return 2;
+}
+
+static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
+static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
+	pqi_ssd_smart_path_enabled_show, NULL);
+
+static struct device_attribute *pqi_sdev_attrs[] = {
+	&dev_attr_sas_address,
+	&dev_attr_ssd_smart_path_enabled,
+	NULL
+};
+
+static struct scsi_host_template pqi_driver_template = {
+	.module = THIS_MODULE,
+	.name = DRIVER_NAME_SHORT,
+	.proc_name = DRIVER_NAME_SHORT,
+	.queuecommand = pqi_scsi_queue_command,
+	.scan_start = pqi_scan_start,
+	.scan_finished = pqi_scan_finished,
+	.this_id = -1,
+	.use_clustering = ENABLE_CLUSTERING,
+	.eh_device_reset_handler = pqi_eh_device_reset_handler,
+	.ioctl = pqi_ioctl,
+	.slave_alloc = pqi_slave_alloc,
+	.slave_configure = pqi_slave_configure,
+	.sdev_attrs = pqi_sdev_attrs,
+	.shost_attrs = pqi_shost_attrs,
+};
+
+static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	struct Scsi_Host *shost;
+
+	shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
+	if (!shost) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"scsi_host_alloc failed for controller %u\n",
+			ctrl_info->ctrl_id);
+		return -ENOMEM;
+	}
+
+	shost->io_port = 0;
+	shost->n_io_port = 0;
+	shost->this_id = -1;
+	shost->max_channel = PQI_MAX_BUS;
+	shost->max_cmd_len = MAX_COMMAND_SIZE;
+	shost->max_lun = ~0;
+	shost->max_id = ~0;
+	shost->max_sectors = ctrl_info->max_sectors;
+	shost->can_queue = ctrl_info->scsi_ml_can_queue;
+	shost->cmd_per_lun = shost->can_queue;
+	shost->sg_tablesize = ctrl_info->sg_tablesize;
+	shost->transportt = pqi_sas_transport_template;
+	shost->irq = ctrl_info->msix_vectors[0];
+	shost->unique_id = shost->irq;
+	shost->nr_hw_queues = ctrl_info->num_queue_groups;
+	shost->hostdata[0] = (unsigned long)ctrl_info;
+
+	rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"scsi_add_host failed for controller %u\n",
+			ctrl_info->ctrl_id);
+		goto free_host;
+	}
+
+	rc = pqi_add_sas_host(shost, ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"add SAS host failed for controller %u\n",
+			ctrl_info->ctrl_id);
+		goto remove_host;
+	}
+
+	ctrl_info->scsi_host = shost;
+
+	return 0;
+
+remove_host:
+	scsi_remove_host(shost);
+free_host:
+	scsi_host_put(shost);
+
+	return rc;
+}
+
+static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
+{
+	struct Scsi_Host *shost;
+
+	pqi_delete_sas_host(ctrl_info);
+
+	shost = ctrl_info->scsi_host;
+	if (!shost)
+		return;
+
+	scsi_remove_host(shost);
+	scsi_host_put(shost);
+}
+
+#define PQI_RESET_ACTION_RESET		0x1
+
+#define PQI_RESET_TYPE_NO_RESET		0x0
+#define PQI_RESET_TYPE_SOFT_RESET	0x1
+#define PQI_RESET_TYPE_FIRM_RESET	0x2
+#define PQI_RESET_TYPE_HARD_RESET	0x3
+
+static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	u32 reset_params;
+
+	reset_params = (PQI_RESET_ACTION_RESET << 5) |
+		PQI_RESET_TYPE_HARD_RESET;
+
+	writel(reset_params,
+		&ctrl_info->pqi_registers->device_reset);
+
+	rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
+	if (rc)
+		dev_err(&ctrl_info->pci_dev->dev,
+			"PQI reset failed\n");
+
+	return rc;
+}
+
+static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	struct bmic_identify_controller *identify;
+
+	identify = kmalloc(sizeof(*identify), GFP_KERNEL);
+	if (!identify)
+		return -ENOMEM;
+
+	rc = pqi_identify_controller(ctrl_info, identify);
+	if (rc)
+		goto out;
+
+	memcpy(ctrl_info->firmware_version, identify->firmware_version,
+		sizeof(identify->firmware_version));
+	ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
+	snprintf(ctrl_info->firmware_version +
+		strlen(ctrl_info->firmware_version),
+		sizeof(ctrl_info->firmware_version),
+		"-%u", get_unaligned_le16(&identify->firmware_build_number));
+
+out:
+	kfree(identify);
+
+	return rc;
+}
+
+static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
+{
+	if (!sis_is_firmware_running(ctrl_info))
+		return -ENXIO;
+
+	if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
+		sis_disable_msix(ctrl_info);
+		if (pqi_reset(ctrl_info) == 0)
+			sis_reenable_sis_mode(ctrl_info);
+	}
+
+	return 0;
+}
+
+static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+
+	if (reset_devices) {
+		rc = pqi_kdump_init(ctrl_info);
+		if (rc)
+			return rc;
+	}
+
+	/*
+	 * When the controller comes out of reset, it is always running
+	 * in legacy SIS mode.  This is so that it can be compatible
+	 * with legacy drivers shipped with OSes.  So we have to talk
+	 * to it using SIS commands at first.  Once we are satisified
+	 * that the controller supports PQI, we transition it into PQI
+	 * mode.
+	 */
+
+	/*
+	 * Wait until the controller is ready to start accepting SIS
+	 * commands.
+	 */
+	rc = sis_wait_for_ctrl_ready(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error initializing SIS interface\n");
+		return rc;
+	}
+
+	/*
+	 * Get the controller properties.  This allows us to determine
+	 * whether or not it supports PQI mode.
+	 */
+	rc = sis_get_ctrl_properties(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error obtaining controller properties\n");
+		return rc;
+	}
+
+	rc = sis_get_pqi_capabilities(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error obtaining controller capabilities\n");
+		return rc;
+	}
+
+	if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
+		ctrl_info->max_outstanding_requests =
+			PQI_MAX_OUTSTANDING_REQUESTS;
+
+	pqi_calculate_io_resources(ctrl_info);
+
+	rc = pqi_alloc_error_buffer(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"failed to allocate PQI error buffer\n");
+		return rc;
+	}
+
+	/*
+	 * If the function we are about to call succeeds, the
+	 * controller will transition from legacy SIS mode
+	 * into PQI mode.
+	 */
+	rc = sis_init_base_struct_addr(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error initializing PQI mode\n");
+		return rc;
+	}
+
+	/* Wait for the controller to complete the SIS -> PQI transition. */
+	rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"transition to PQI mode failed\n");
+		return rc;
+	}
+
+	/* From here on, we are running in PQI mode. */
+	ctrl_info->pqi_mode_enabled = true;
+	pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
+
+	rc = pqi_alloc_admin_queues(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error allocating admin queues\n");
+		return rc;
+	}
+
+	rc = pqi_create_admin_queues(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error creating admin queues\n");
+		return rc;
+	}
+
+	rc = pqi_report_device_capability(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"obtaining device capability failed\n");
+		return rc;
+	}
+
+	rc = pqi_validate_device_capability(ctrl_info);
+	if (rc)
+		return rc;
+
+	pqi_calculate_queue_resources(ctrl_info);
+
+	rc = pqi_enable_msix_interrupts(ctrl_info);
+	if (rc)
+		return rc;
+
+	if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
+		ctrl_info->max_msix_vectors =
+			ctrl_info->num_msix_vectors_enabled;
+		pqi_calculate_queue_resources(ctrl_info);
+	}
+
+	rc = pqi_alloc_io_resources(ctrl_info);
+	if (rc)
+		return rc;
+
+	rc = pqi_alloc_operational_queues(ctrl_info);
+	if (rc)
+		return rc;
+
+	pqi_init_operational_queues(ctrl_info);
+
+	rc = pqi_request_irqs(ctrl_info);
+	if (rc)
+		return rc;
+
+	pqi_irq_set_affinity_hint(ctrl_info);
+
+	rc = pqi_create_queues(ctrl_info);
+	if (rc)
+		return rc;
+
+	sis_enable_msix(ctrl_info);
+
+	rc = pqi_configure_events(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error configuring events\n");
+		return rc;
+	}
+
+	pqi_start_heartbeat_timer(ctrl_info);
+
+	ctrl_info->controller_online = true;
+
+	/* Register with the SCSI subsystem. */
+	rc = pqi_register_scsi(ctrl_info);
+	if (rc)
+		return rc;
+
+	rc = pqi_get_ctrl_firmware_version(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error obtaining firmware version\n");
+		return rc;
+	}
+
+	rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"error updating host wellness\n");
+		return rc;
+	}
+
+	pqi_schedule_update_time_worker(ctrl_info);
+
+	pqi_scan_scsi_devices(ctrl_info);
+
+	return 0;
+}
+
+static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
+{
+	int rc;
+	u64 mask;
+
+	rc = pci_enable_device(ctrl_info->pci_dev);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"failed to enable PCI device\n");
+		return rc;
+	}
+
+	if (sizeof(dma_addr_t) > 4)
+		mask = DMA_BIT_MASK(64);
+	else
+		mask = DMA_BIT_MASK(32);
+
+	rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
+		goto disable_device;
+	}
+
+	rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
+	if (rc) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"failed to obtain PCI resources\n");
+		goto disable_device;
+	}
+
+	ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
+		ctrl_info->pci_dev, 0),
+		sizeof(struct pqi_ctrl_registers));
+	if (!ctrl_info->iomem_base) {
+		dev_err(&ctrl_info->pci_dev->dev,
+			"failed to map memory for controller registers\n");
+		rc = -ENOMEM;
+		goto release_regions;
+	}
+
+	ctrl_info->registers = ctrl_info->iomem_base;
+	ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
+
+	/* Enable bus mastering. */
+	pci_set_master(ctrl_info->pci_dev);
+
+	pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
+
+	return 0;
+
+release_regions:
+	pci_release_regions(ctrl_info->pci_dev);
+disable_device:
+	pci_disable_device(ctrl_info->pci_dev);
+
+	return rc;
+}
+
+static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
+{
+	iounmap(ctrl_info->iomem_base);
+	pci_release_regions(ctrl_info->pci_dev);
+	pci_disable_device(ctrl_info->pci_dev);
+	pci_set_drvdata(ctrl_info->pci_dev, NULL);
+}
+
+static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
+{
+	struct pqi_ctrl_info *ctrl_info;
+
+	ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
+			GFP_KERNEL, numa_node);
+	if (!ctrl_info)
+		return NULL;
+
+	mutex_init(&ctrl_info->scan_mutex);
+
+	INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
+	spin_lock_init(&ctrl_info->scsi_device_list_lock);
+
+	INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
+	atomic_set(&ctrl_info->num_interrupts, 0);
+
+	INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
+	INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
+
+	sema_init(&ctrl_info->sync_request_sem,
+		PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
+	sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
+
+	ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
+	ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
+
+	return ctrl_info;
+}
+
+static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
+{
+	kfree(ctrl_info);
+}
+
+static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
+{
+	pqi_irq_unset_affinity_hint(ctrl_info);
+	pqi_free_irqs(ctrl_info);
+	if (ctrl_info->num_msix_vectors_enabled)
+		pci_disable_msix(ctrl_info->pci_dev);
+}
+
+static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
+{
+	pqi_stop_heartbeat_timer(ctrl_info);
+	pqi_free_interrupts(ctrl_info);
+	if (ctrl_info->queue_memory_base)
+		dma_free_coherent(&ctrl_info->pci_dev->dev,
+			ctrl_info->queue_memory_length,
+			ctrl_info->queue_memory_base,
+			ctrl_info->queue_memory_base_dma_handle);
+	if (ctrl_info->admin_queue_memory_base)
+		dma_free_coherent(&ctrl_info->pci_dev->dev,
+			ctrl_info->admin_queue_memory_length,
+			ctrl_info->admin_queue_memory_base,
+			ctrl_info->admin_queue_memory_base_dma_handle);
+	pqi_free_all_io_requests(ctrl_info);
+	if (ctrl_info->error_buffer)
+		dma_free_coherent(&ctrl_info->pci_dev->dev,
+			ctrl_info->error_buffer_length,
+			ctrl_info->error_buffer,
+			ctrl_info->error_buffer_dma_handle);
+	if (ctrl_info->iomem_base)
+		pqi_cleanup_pci_init(ctrl_info);
+	pqi_free_ctrl_info(ctrl_info);
+}
+
+static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
+{
+	cancel_delayed_work_sync(&ctrl_info->rescan_work);
+	cancel_delayed_work_sync(&ctrl_info->update_time_work);
+	pqi_remove_all_scsi_devices(ctrl_info);
+	pqi_unregister_scsi(ctrl_info);
+
+	if (ctrl_info->pqi_mode_enabled) {
+		sis_disable_msix(ctrl_info);
+		if (pqi_reset(ctrl_info) == 0)
+			sis_reenable_sis_mode(ctrl_info);
+	}
+	pqi_free_ctrl_resources(ctrl_info);
+}
+
+static void pqi_print_ctrl_info(struct pci_dev *pdev,
+	const struct pci_device_id *id)
+{
+	char *ctrl_description;
+
+	if (id->driver_data) {
+		ctrl_description = (char *)id->driver_data;
+	} else {
+		switch (id->subvendor) {
+		case PCI_VENDOR_ID_HP:
+			ctrl_description = hpe_branded_controller;
+			break;
+		case PCI_VENDOR_ID_ADAPTEC2:
+		default:
+			ctrl_description = microsemi_branded_controller;
+			break;
+		}
+	}
+
+	dev_info(&pdev->dev, "%s found\n", ctrl_description);
+}
+
+static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int rc;
+	int node;
+	struct pqi_ctrl_info *ctrl_info;
+
+	pqi_print_ctrl_info(pdev, id);
+
+	if (pqi_disable_device_id_wildcards &&
+		id->subvendor == PCI_ANY_ID &&
+		id->subdevice == PCI_ANY_ID) {
+		dev_warn(&pdev->dev,
+			"controller not probed because device ID wildcards are disabled\n");
+		return -ENODEV;
+	}
+
+	if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
+		dev_warn(&pdev->dev,
+			"controller device ID matched using wildcards\n");
+
+	node = dev_to_node(&pdev->dev);
+	if (node == NUMA_NO_NODE)
+		set_dev_node(&pdev->dev, 0);
+
+	ctrl_info = pqi_alloc_ctrl_info(node);
+	if (!ctrl_info) {
+		dev_err(&pdev->dev,
+			"failed to allocate controller info block\n");
+		return -ENOMEM;
+	}
+
+	ctrl_info->pci_dev = pdev;
+
+	rc = pqi_pci_init(ctrl_info);
+	if (rc)
+		goto error;
+
+	rc = pqi_ctrl_init(ctrl_info);
+	if (rc)
+		goto error;
+
+	return 0;
+
+error:
+	pqi_remove_ctrl(ctrl_info);
+
+	return rc;
+}
+
+static void pqi_pci_remove(struct pci_dev *pdev)
+{
+	struct pqi_ctrl_info *ctrl_info;
+
+	ctrl_info = pci_get_drvdata(pdev);
+	if (!ctrl_info)
+		return;
+
+	pqi_remove_ctrl(ctrl_info);
+}
+
+static void pqi_shutdown(struct pci_dev *pdev)
+{
+	int rc;
+	struct pqi_ctrl_info *ctrl_info;
+
+	ctrl_info = pci_get_drvdata(pdev);
+	if (!ctrl_info)
+		goto error;
+
+	/*
+	 * Write all data in the controller's battery-backed cache to
+	 * storage.
+	 */
+	rc = pqi_flush_cache(ctrl_info);
+	if (rc == 0)
+		return;
+
+error:
+	dev_warn(&pdev->dev,
+		"unable to flush controller cache\n");
+}
+
+/* Define the PCI IDs for the controllers that we support. */
+static const struct pci_device_id pqi_pci_id_table[] = {
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0110)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x0600)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x0601)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x0602)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x0603)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x0650)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x0651)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x0652)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x0653)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x0654)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x0655)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x0700)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x0701)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0800)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0801)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0802)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0803)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0804)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0805)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0900)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0901)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0902)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0903)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0904)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0905)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_ADAPTEC2, 0x0906)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x1001)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x1100)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x1101)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x1102)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_VENDOR_ID_HP, 0x1150)
+	},
+	{
+		PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+			       PCI_ANY_ID, PCI_ANY_ID)
+	},
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
+
+static struct pci_driver pqi_pci_driver = {
+	.name = DRIVER_NAME_SHORT,
+	.id_table = pqi_pci_id_table,
+	.probe = pqi_pci_probe,
+	.remove = pqi_pci_remove,
+	.shutdown = pqi_shutdown,
+};
+
+static int __init pqi_init(void)
+{
+	int rc;
+
+	pr_info(DRIVER_NAME "\n");
+
+	pqi_sas_transport_template =
+		sas_attach_transport(&pqi_sas_transport_functions);
+	if (!pqi_sas_transport_template)
+		return -ENODEV;
+
+	rc = pci_register_driver(&pqi_pci_driver);
+	if (rc)
+		sas_release_transport(pqi_sas_transport_template);
+
+	return rc;
+}
+
+static void __exit pqi_cleanup(void)
+{
+	pci_unregister_driver(&pqi_pci_driver);
+	sas_release_transport(pqi_sas_transport_template);
+}
+
+module_init(pqi_init);
+module_exit(pqi_cleanup);
+
+static void __attribute__((unused)) verify_structures(void)
+{
+	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+		sis_host_to_ctrl_doorbell) != 0x20);
+	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+		sis_interrupt_mask) != 0x34);
+	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+		sis_ctrl_to_host_doorbell) != 0x9c);
+	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+		sis_ctrl_to_host_doorbell_clear) != 0xa0);
+	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+		sis_driver_scratch) != 0xb0);
+	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+		sis_firmware_status) != 0xbc);
+	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+		sis_mailbox) != 0x1000);
+	BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
+		pqi_registers) != 0x4000);
+
+	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
+		iu_type) != 0x0);
+	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
+		iu_length) != 0x2);
+	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
+		response_queue_id) != 0x4);
+	BUILD_BUG_ON(offsetof(struct pqi_iu_header,
+		work_area) != 0x6);
+	BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
+
+	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+		status) != 0x0);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+		service_response) != 0x1);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+		data_present) != 0x2);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+		reserved) != 0x3);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+		residual_count) != 0x4);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+		data_length) != 0x8);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+		reserved1) != 0xa);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
+		data) != 0xc);
+	BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
+
+	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+		data_in_result) != 0x0);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+		data_out_result) != 0x1);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+		reserved) != 0x2);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+		status) != 0x5);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+		status_qualifier) != 0x6);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+		sense_data_length) != 0x8);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+		response_data_length) != 0xa);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+		data_in_transferred) != 0xc);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+		data_out_transferred) != 0x10);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
+		data) != 0x14);
+	BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
+
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		signature) != 0x0);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		function_and_status_code) != 0x8);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		max_admin_iq_elements) != 0x10);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		max_admin_oq_elements) != 0x11);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		admin_iq_element_length) != 0x12);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		admin_oq_element_length) != 0x13);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		max_reset_timeout) != 0x14);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		legacy_intx_status) != 0x18);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		legacy_intx_mask_set) != 0x1c);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		legacy_intx_mask_clear) != 0x20);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		device_status) != 0x40);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		admin_iq_pi_offset) != 0x48);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		admin_oq_ci_offset) != 0x50);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		admin_iq_element_array_addr) != 0x58);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		admin_oq_element_array_addr) != 0x60);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		admin_iq_ci_addr) != 0x68);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		admin_oq_pi_addr) != 0x70);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		admin_iq_num_elements) != 0x78);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		admin_oq_num_elements) != 0x79);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		admin_queue_int_msg_num) != 0x7a);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		device_error) != 0x80);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		error_details) != 0x88);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		device_reset) != 0x90);
+	BUILD_BUG_ON(offsetof(struct pqi_device_registers,
+		power_action) != 0x94);
+	BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
+
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		header.iu_type) != 0);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		header.iu_length) != 2);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		header.work_area) != 6);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		request_id) != 8);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		function_code) != 10);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.report_device_capability.buffer_length) != 44);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.report_device_capability.sg_descriptor) != 48);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_iq.queue_id) != 12);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_iq.element_array_addr) != 16);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_iq.ci_addr) != 24);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_iq.num_elements) != 32);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_iq.element_length) != 34);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_iq.queue_protocol) != 36);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_oq.queue_id) != 12);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_oq.element_array_addr) != 16);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_oq.pi_addr) != 24);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_oq.num_elements) != 32);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_oq.element_length) != 34);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_oq.queue_protocol) != 36);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_oq.int_msg_num) != 40);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_oq.coalescing_count) != 42);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_oq.min_coalescing_time) != 44);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.create_operational_oq.max_coalescing_time) != 48);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
+		data.delete_operational_queue.queue_id) != 12);
+	BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
+	BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+		data.create_operational_iq) != 64 - 11);
+	BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+		data.create_operational_oq) != 64 - 11);
+	BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
+		data.delete_operational_queue) != 64 - 11);
+
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+		header.iu_type) != 0);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+		header.iu_length) != 2);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+		header.work_area) != 6);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+		request_id) != 8);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+		function_code) != 10);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+		status) != 11);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+		data.create_operational_iq.status_descriptor) != 12);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+		data.create_operational_iq.iq_pi_offset) != 16);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+		data.create_operational_oq.status_descriptor) != 12);
+	BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+		data.create_operational_oq.oq_ci_offset) != 16);
+	BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
+
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		header.iu_type) != 0);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		header.iu_length) != 2);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		header.response_queue_id) != 4);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		header.work_area) != 6);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		request_id) != 8);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		nexus_id) != 10);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		buffer_length) != 12);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		lun_number) != 16);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		protocol_specific) != 24);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		error_index) != 27);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		cdb) != 32);
+	BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
+		sg_descriptors) != 64);
+	BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
+		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		header.iu_type) != 0);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		header.iu_length) != 2);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		header.response_queue_id) != 4);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		header.work_area) != 6);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		request_id) != 8);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		nexus_id) != 12);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		buffer_length) != 16);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		data_encryption_key_index) != 22);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		encrypt_tweak_lower) != 24);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		encrypt_tweak_upper) != 28);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		cdb) != 32);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		error_index) != 48);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		num_sg_descriptors) != 50);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		cdb_length) != 51);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		lun_number) != 52);
+	BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
+		sg_descriptors) != 64);
+	BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
+		PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
+
+	BUILD_BUG_ON(offsetof(struct pqi_io_response,
+		header.iu_type) != 0);
+	BUILD_BUG_ON(offsetof(struct pqi_io_response,
+		header.iu_length) != 2);
+	BUILD_BUG_ON(offsetof(struct pqi_io_response,
+		request_id) != 8);
+	BUILD_BUG_ON(offsetof(struct pqi_io_response,
+		error_index) != 10);
+
+	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+		header.iu_type) != 0);
+	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+		header.iu_length) != 2);
+	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+		header.response_queue_id) != 4);
+	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+		request_id) != 8);
+	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+		data.report_event_configuration.buffer_length) != 12);
+	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+		data.report_event_configuration.sg_descriptors) != 16);
+	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+		data.set_event_configuration.global_event_oq_id) != 10);
+	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+		data.set_event_configuration.buffer_length) != 12);
+	BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
+		data.set_event_configuration.sg_descriptors) != 16);
+
+	BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
+		max_inbound_iu_length) != 6);
+	BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
+		max_outbound_iu_length) != 14);
+	BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
+
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		data_length) != 0);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		iq_arbitration_priority_support_bitmask) != 8);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		maximum_aw_a) != 9);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		maximum_aw_b) != 10);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		maximum_aw_c) != 11);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		max_inbound_queues) != 16);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		max_elements_per_iq) != 18);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		max_iq_element_length) != 24);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		min_iq_element_length) != 26);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		max_outbound_queues) != 30);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		max_elements_per_oq) != 32);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		intr_coalescing_time_granularity) != 34);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		max_oq_element_length) != 36);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		min_oq_element_length) != 38);
+	BUILD_BUG_ON(offsetof(struct pqi_device_capability,
+		iu_layer_descriptors) != 64);
+	BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
+
+	BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
+		event_type) != 0);
+	BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
+		oq_id) != 2);
+	BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
+
+	BUILD_BUG_ON(offsetof(struct pqi_event_config,
+		num_event_descriptors) != 2);
+	BUILD_BUG_ON(offsetof(struct pqi_event_config,
+		descriptors) != 4);
+
+	BUILD_BUG_ON(offsetof(struct pqi_event_response,
+		header.iu_type) != 0);
+	BUILD_BUG_ON(offsetof(struct pqi_event_response,
+		header.iu_length) != 2);
+	BUILD_BUG_ON(offsetof(struct pqi_event_response,
+		event_type) != 8);
+	BUILD_BUG_ON(offsetof(struct pqi_event_response,
+		event_id) != 10);
+	BUILD_BUG_ON(offsetof(struct pqi_event_response,
+		additional_event_id) != 12);
+	BUILD_BUG_ON(offsetof(struct pqi_event_response,
+		data) != 16);
+	BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
+
+	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+		header.iu_type) != 0);
+	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+		header.iu_length) != 2);
+	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+		event_type) != 8);
+	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+		event_id) != 10);
+	BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
+		additional_event_id) != 12);
+	BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
+
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+		header.iu_type) != 0);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+		header.iu_length) != 2);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+		request_id) != 8);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+		nexus_id) != 10);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+		lun_number) != 16);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+		protocol_specific) != 24);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+		outbound_queue_id_to_manage) != 26);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+		request_id_to_manage) != 28);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
+		task_management_function) != 30);
+	BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
+
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+		header.iu_type) != 0);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+		header.iu_length) != 2);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+		request_id) != 8);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+		nexus_id) != 10);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+		additional_response_info) != 12);
+	BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
+		response_code) != 15);
+	BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
+
+	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+		configured_logical_drive_count) != 0);
+	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+		configuration_signature) != 1);
+	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+		firmware_version) != 5);
+	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+		extended_logical_unit_count) != 154);
+	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+		firmware_build_number) != 190);
+	BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
+		controller_mode) != 292);
+
+	BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
+	BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
+	BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
+		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
+	BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
+		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
+	BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
+	BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
+		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
+	BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
+	BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
+		PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
+
+	BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
+}

Some files were not shown because too many files changed in this diff