Browse Source

Merge branch 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block

* 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block: (149 commits)
  block: make sure that REQ_* types are seen even with CONFIG_BLOCK=n
  xen-blkfront: fix missing out label
  blkdev: fix blkdev_issue_zeroout return value
  block: update request stacking methods to support discards
  block: fix missing export of blk_types.h
  writeback: fix bad _bh spinlock nesting
  drbd: revert "delay probes", feature is being re-implemented differently
  drbd: Initialize all members of sync_conf to their defaults [Bugz 315]
  drbd: Disable delay probes for the upcomming release
  writeback: cleanup bdi_register
  writeback: add new tracepoints
  writeback: remove unnecessary init_timer call
  writeback: optimize periodic bdi thread wakeups
  writeback: prevent unnecessary bdi threads wakeups
  writeback: move bdi threads exiting logic to the forker thread
  writeback: restructure bdi forker loop a little
  writeback: move last_active to bdi
  writeback: do not remove bdi from bdi_list
  writeback: simplify bdi code a little
  writeback: do not lose wake-ups in bdi threads
  ...

Fixed up pretty trivial conflicts in drivers/block/virtio_blk.c and
drivers/scsi/scsi_error.c as per Jens.
Linus Torvalds 15 years ago
parent
commit
2f9e825d3e
100 changed files with 3121 additions and 2380 deletions
  1. 0 2
      arch/alpha/include/asm/scatterlist.h
  2. 0 2
      arch/avr32/include/asm/scatterlist.h
  3. 0 2
      arch/blackfin/include/asm/scatterlist.h
  4. 0 2
      arch/cris/include/asm/scatterlist.h
  5. 0 2
      arch/frv/include/asm/scatterlist.h
  6. 0 2
      arch/h8300/include/asm/scatterlist.h
  7. 0 9
      arch/ia64/include/asm/scatterlist.h
  8. 0 2
      arch/m32r/include/asm/scatterlist.h
  9. 0 3
      arch/m68k/include/asm/scatterlist.h
  10. 0 2
      arch/microblaze/include/asm/scatterlist.h
  11. 0 2
      arch/mips/include/asm/scatterlist.h
  12. 0 2
      arch/mn10300/include/asm/scatterlist.h
  13. 0 1
      arch/parisc/include/asm/scatterlist.h
  14. 0 3
      arch/powerpc/include/asm/scatterlist.h
  15. 0 2
      arch/s390/include/asm/scatterlist.h
  16. 0 2
      arch/score/include/asm/scatterlist.h
  17. 0 2
      arch/sh/include/asm/scatterlist.h
  18. 0 1
      arch/sparc/include/asm/scatterlist.h
  19. 6 1
      arch/um/drivers/ubd_kern.c
  20. 0 1
      arch/x86/include/asm/scatterlist.h
  21. 0 2
      arch/xtensa/include/asm/scatterlist.h
  22. 18 17
      block/blk-barrier.c
  23. 82 35
      block/blk-core.c
  24. 1 1
      block/blk-exec.c
  25. 24 32
      block/blk-lib.c
  26. 1 1
      block/blk-map.c
  27. 4 5
      block/blk-merge.c
  28. 17 0
      block/blk-settings.c
  29. 39 43
      block/blk-sysfs.c
  30. 4 2
      block/blk.h
  31. 12 9
      block/cfq-iosched.c
  32. 0 56
      block/compat_ioctl.c
  33. 11 8
      block/elevator.c
  34. 1 20
      block/ioctl.c
  35. 2 2
      drivers/ata/libata-scsi.c
  36. 9 4
      drivers/block/DAC960.c
  37. 25 4
      drivers/block/amiflop.c
  38. 5 1
      drivers/block/aoe/aoeblk.c
  39. 28 4
      drivers/block/ataflop.c
  40. 6 3
      drivers/block/brd.c
  41. 1281 884
      drivers/block/cciss.c
  42. 124 11
      drivers/block/cciss.h
  43. 33 3
      drivers/block/cciss_cmd.h
  44. 315 355
      drivers/block/cciss_scsi.c
  45. 49 29
      drivers/block/cpqarray.c
  46. 4 4
      drivers/block/drbd/drbd_actlog.c
  47. 1 15
      drivers/block/drbd/drbd_int.h
  48. 21 81
      drivers/block/drbd/drbd_main.c
  49. 0 4
      drivers/block/drbd/drbd_nl.c
  50. 2 17
      drivers/block/drbd/drbd_proc.c
  51. 29 106
      drivers/block/drbd/drbd_receiver.c
  52. 1 1
      drivers/block/drbd/drbd_req.c
  53. 1 14
      drivers/block/drbd/drbd_worker.c
  54. 70 112
      drivers/block/floppy.c
  55. 1 1
      drivers/block/hd.c
  56. 7 2
      drivers/block/loop.c
  57. 2 2
      drivers/block/mg_disk.c
  58. 5 2
      drivers/block/nbd.c
  59. 4 11
      drivers/block/osdblk.c
  60. 18 3
      drivers/block/paride/pcd.c
  61. 9 2
      drivers/block/paride/pd.c
  62. 20 6
      drivers/block/paride/pf.c
  63. 15 5
      drivers/block/pktcdvd.c
  64. 6 19
      drivers/block/ps3disk.c
  65. 18 2
      drivers/block/swim.c
  66. 29 3
      drivers/block/swim3.c
  67. 28 7
      drivers/block/ub.c
  68. 1 1
      drivers/block/umem.c
  69. 19 2
      drivers/block/viodasd.c
  70. 49 39
      drivers/block/virtio_blk.c
  71. 16 3
      drivers/block/xd.c
  72. 300 103
      drivers/block/xen-blkfront.c
  73. 7 1
      drivers/block/xsysace.c
  74. 10 3
      drivers/block/z2ram.c
  75. 26 20
      drivers/cdrom/cdrom.c
  76. 30 18
      drivers/cdrom/gdrom.c
  77. 53 53
      drivers/cdrom/viocd.c
  78. 10 7
      drivers/ide/ide-atapi.c
  79. 61 37
      drivers/ide/ide-cd.c
  80. 1 1
      drivers/ide/ide-cd_ioctl.c
  81. 12 6
      drivers/ide/ide-disk.c
  82. 7 2
      drivers/ide/ide-disk_ioctl.c
  83. 3 2
      drivers/ide/ide-eh.c
  84. 17 10
      drivers/ide/ide-floppy.c
  85. 9 3
      drivers/ide/ide-floppy_ioctl.c
  86. 17 2
      drivers/ide/ide-gd.c
  87. 4 4
      drivers/ide/ide-io.c
  88. 4 4
      drivers/ide/ide-pm.c
  89. 18 4
      drivers/ide/ide-tape.c
  90. 6 6
      drivers/md/dm-io.c
  91. 1 1
      drivers/md/dm-kcopyd.c
  92. 1 1
      drivers/md/dm-raid1.c
  93. 1 1
      drivers/md/dm-stripe.c
  94. 21 26
      drivers/md/dm.c
  95. 1 1
      drivers/md/linear.c
  96. 11 5
      drivers/md/md.c
  97. 2 2
      drivers/md/md.h
  98. 4 4
      drivers/md/multipath.c
  99. 1 1
      drivers/md/raid0.c
  100. 10 12
      drivers/md/raid1.c

+ 0 - 2
arch/alpha/include/asm/scatterlist.h

@@ -3,6 +3,4 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD (~0UL)
-
 #endif /* !(_ALPHA_SCATTERLIST_H) */
 #endif /* !(_ALPHA_SCATTERLIST_H) */

+ 0 - 2
arch/avr32/include/asm/scatterlist.h

@@ -3,6 +3,4 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD (0xffffffff)
-
 #endif /* __ASM_AVR32_SCATTERLIST_H */
 #endif /* __ASM_AVR32_SCATTERLIST_H */

+ 0 - 2
arch/blackfin/include/asm/scatterlist.h

@@ -3,6 +3,4 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD	(0xffffffff)
-
 #endif				/* !(_BLACKFIN_SCATTERLIST_H) */
 #endif				/* !(_BLACKFIN_SCATTERLIST_H) */

+ 0 - 2
arch/cris/include/asm/scatterlist.h

@@ -3,6 +3,4 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD (0x1fffffff)
-
 #endif /* !(__ASM_CRIS_SCATTERLIST_H) */
 #endif /* !(__ASM_CRIS_SCATTERLIST_H) */

+ 0 - 2
arch/frv/include/asm/scatterlist.h

@@ -3,6 +3,4 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD (0xffffffffUL)
-
 #endif /* !_ASM_SCATTERLIST_H */
 #endif /* !_ASM_SCATTERLIST_H */

+ 0 - 2
arch/h8300/include/asm/scatterlist.h

@@ -3,6 +3,4 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD	(0xffffffff)
-
 #endif /* !(_H8300_SCATTERLIST_H) */
 #endif /* !(_H8300_SCATTERLIST_H) */

+ 0 - 9
arch/ia64/include/asm/scatterlist.h

@@ -2,15 +2,6 @@
 #define _ASM_IA64_SCATTERLIST_H
 #define _ASM_IA64_SCATTERLIST_H
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
-/*
- * It used to be that ISA_DMA_THRESHOLD had something to do with the
- * DMA-limits of ISA-devices.  Nowadays, its only remaining use (apart
- * from the aha1542.c driver, which isn't 64-bit clean anyhow) is to
- * tell the block-layer (via BLK_BOUNCE_ISA) what the max. physical
- * address of a page is that is allocated with GFP_DMA.  On IA-64,
- * that's 4GB - 1.
- */
-#define ISA_DMA_THRESHOLD	0xffffffff
 #define ARCH_HAS_SG_CHAIN
 #define ARCH_HAS_SG_CHAIN
 
 
 #endif /* _ASM_IA64_SCATTERLIST_H */
 #endif /* _ASM_IA64_SCATTERLIST_H */

+ 0 - 2
arch/m32r/include/asm/scatterlist.h

@@ -3,6 +3,4 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD (0x1fffffff)
-
 #endif /* _ASM_M32R_SCATTERLIST_H */
 #endif /* _ASM_M32R_SCATTERLIST_H */

+ 0 - 3
arch/m68k/include/asm/scatterlist.h

@@ -3,7 +3,4 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-/* This is bogus and should go away. */
-#define ISA_DMA_THRESHOLD (0x00ffffff)
-
 #endif /* !(_M68K_SCATTERLIST_H) */
 #endif /* !(_M68K_SCATTERLIST_H) */

+ 0 - 2
arch/microblaze/include/asm/scatterlist.h

@@ -1,3 +1 @@
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
-
-#define ISA_DMA_THRESHOLD	(~0UL)

+ 0 - 2
arch/mips/include/asm/scatterlist.h

@@ -3,6 +3,4 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD (0x00ffffffUL)
-
 #endif /* __ASM_SCATTERLIST_H */
 #endif /* __ASM_SCATTERLIST_H */

+ 0 - 2
arch/mn10300/include/asm/scatterlist.h

@@ -13,6 +13,4 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD (0x00ffffff)
-
 #endif /* _ASM_SCATTERLIST_H */
 #endif /* _ASM_SCATTERLIST_H */

+ 0 - 1
arch/parisc/include/asm/scatterlist.h

@@ -5,7 +5,6 @@
 #include <asm/types.h>
 #include <asm/types.h>
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD (~0UL)
 #define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
 #define sg_virt_addr(sg) ((unsigned long)sg_virt(sg))
 
 
 #endif /* _ASM_PARISC_SCATTERLIST_H */
 #endif /* _ASM_PARISC_SCATTERLIST_H */

+ 0 - 3
arch/powerpc/include/asm/scatterlist.h

@@ -12,9 +12,6 @@
 #include <asm/dma.h>
 #include <asm/dma.h>
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#ifdef __powerpc64__
-#define ISA_DMA_THRESHOLD	(~0UL)
-#endif
 #define ARCH_HAS_SG_CHAIN
 #define ARCH_HAS_SG_CHAIN
 
 
 #endif /* _ASM_POWERPC_SCATTERLIST_H */
 #endif /* _ASM_POWERPC_SCATTERLIST_H */

+ 0 - 2
arch/s390/include/asm/scatterlist.h

@@ -1,3 +1 @@
-#define ISA_DMA_THRESHOLD	(~0UL)
-
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>

+ 0 - 2
arch/score/include/asm/scatterlist.h

@@ -1,8 +1,6 @@
 #ifndef _ASM_SCORE_SCATTERLIST_H
 #ifndef _ASM_SCORE_SCATTERLIST_H
 #define _ASM_SCORE_SCATTERLIST_H
 #define _ASM_SCORE_SCATTERLIST_H
 
 
-#define ISA_DMA_THRESHOLD	(~0UL)
-
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
 #endif /* _ASM_SCORE_SCATTERLIST_H */
 #endif /* _ASM_SCORE_SCATTERLIST_H */

+ 0 - 2
arch/sh/include/asm/scatterlist.h

@@ -1,8 +1,6 @@
 #ifndef __ASM_SH_SCATTERLIST_H
 #ifndef __ASM_SH_SCATTERLIST_H
 #define __ASM_SH_SCATTERLIST_H
 #define __ASM_SH_SCATTERLIST_H
 
 
-#define ISA_DMA_THRESHOLD	phys_addr_mask()
-
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
 #endif /* __ASM_SH_SCATTERLIST_H */
 #endif /* __ASM_SH_SCATTERLIST_H */

+ 0 - 1
arch/sparc/include/asm/scatterlist.h

@@ -3,7 +3,6 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD	(~0UL)
 #define ARCH_HAS_SG_CHAIN
 #define ARCH_HAS_SG_CHAIN
 
 
 #endif /* !(_SPARC_SCATTERLIST_H) */
 #endif /* !(_SPARC_SCATTERLIST_H) */

+ 6 - 1
arch/um/drivers/ubd_kern.c

@@ -33,6 +33,7 @@
 #include "linux/mm.h"
 #include "linux/mm.h"
 #include "linux/slab.h"
 #include "linux/slab.h"
 #include "linux/vmalloc.h"
 #include "linux/vmalloc.h"
+#include "linux/smp_lock.h"
 #include "linux/blkpg.h"
 #include "linux/blkpg.h"
 #include "linux/genhd.h"
 #include "linux/genhd.h"
 #include "linux/spinlock.h"
 #include "linux/spinlock.h"
@@ -1098,6 +1099,7 @@ static int ubd_open(struct block_device *bdev, fmode_t mode)
 	struct ubd *ubd_dev = disk->private_data;
 	struct ubd *ubd_dev = disk->private_data;
 	int err = 0;
 	int err = 0;
 
 
+	lock_kernel();
 	if(ubd_dev->count == 0){
 	if(ubd_dev->count == 0){
 		err = ubd_open_dev(ubd_dev);
 		err = ubd_open_dev(ubd_dev);
 		if(err){
 		if(err){
@@ -1115,7 +1117,8 @@ static int ubd_open(struct block_device *bdev, fmode_t mode)
 	        if(--ubd_dev->count == 0) ubd_close_dev(ubd_dev);
 	        if(--ubd_dev->count == 0) ubd_close_dev(ubd_dev);
 	        err = -EROFS;
 	        err = -EROFS;
 	}*/
 	}*/
- out:
+out:
+	unlock_kernel();
 	return err;
 	return err;
 }
 }
 
 
@@ -1123,8 +1126,10 @@ static int ubd_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct ubd *ubd_dev = disk->private_data;
 	struct ubd *ubd_dev = disk->private_data;
 
 
+	lock_kernel();
 	if(--ubd_dev->count == 0)
 	if(--ubd_dev->count == 0)
 		ubd_close_dev(ubd_dev);
 		ubd_close_dev(ubd_dev);
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 

+ 0 - 1
arch/x86/include/asm/scatterlist.h

@@ -3,7 +3,6 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD (0x00ffffff)
 #define ARCH_HAS_SG_CHAIN
 #define ARCH_HAS_SG_CHAIN
 
 
 #endif /* _ASM_X86_SCATTERLIST_H */
 #endif /* _ASM_X86_SCATTERLIST_H */

+ 0 - 2
arch/xtensa/include/asm/scatterlist.h

@@ -13,6 +13,4 @@
 
 
 #include <asm-generic/scatterlist.h>
 #include <asm-generic/scatterlist.h>
 
 
-#define ISA_DMA_THRESHOLD (~0UL)
-
 #endif	/* _XTENSA_SCATTERLIST_H */
 #endif	/* _XTENSA_SCATTERLIST_H */

+ 18 - 17
block/blk-barrier.c

@@ -13,7 +13,6 @@
  * blk_queue_ordered - does this queue support ordered writes
  * blk_queue_ordered - does this queue support ordered writes
  * @q:        the request queue
  * @q:        the request queue
  * @ordered:  one of QUEUE_ORDERED_*
  * @ordered:  one of QUEUE_ORDERED_*
- * @prepare_flush_fn: rq setup helper for cache flush ordered writes
  *
  *
  * Description:
  * Description:
  *   For journalled file systems, doing ordered writes on a commit
  *   For journalled file systems, doing ordered writes on a commit
@@ -22,15 +21,8 @@
  *   feature should call this function and indicate so.
  *   feature should call this function and indicate so.
  *
  *
  **/
  **/
-int blk_queue_ordered(struct request_queue *q, unsigned ordered,
-		      prepare_flush_fn *prepare_flush_fn)
+int blk_queue_ordered(struct request_queue *q, unsigned ordered)
 {
 {
-	if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
-					     QUEUE_ORDERED_DO_POSTFLUSH))) {
-		printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
-		return -EINVAL;
-	}
-
 	if (ordered != QUEUE_ORDERED_NONE &&
 	if (ordered != QUEUE_ORDERED_NONE &&
 	    ordered != QUEUE_ORDERED_DRAIN &&
 	    ordered != QUEUE_ORDERED_DRAIN &&
 	    ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
 	    ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
@@ -44,7 +36,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
 
 
 	q->ordered = ordered;
 	q->ordered = ordered;
 	q->next_ordered = ordered;
 	q->next_ordered = ordered;
-	q->prepare_flush_fn = prepare_flush_fn;
 
 
 	return 0;
 	return 0;
 }
 }
@@ -79,7 +70,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
 	 *
 	 *
 	 * http://thread.gmane.org/gmane.linux.kernel/537473
 	 * http://thread.gmane.org/gmane.linux.kernel/537473
 	 */
 	 */
-	if (!blk_fs_request(rq))
+	if (rq->cmd_type != REQ_TYPE_FS)
 		return QUEUE_ORDSEQ_DRAIN;
 		return QUEUE_ORDSEQ_DRAIN;
 
 
 	if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
 	if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
@@ -143,10 +134,10 @@ static void queue_flush(struct request_queue *q, unsigned which)
 	}
 	}
 
 
 	blk_rq_init(q, rq);
 	blk_rq_init(q, rq);
-	rq->cmd_flags = REQ_HARDBARRIER;
-	rq->rq_disk = q->bar_rq.rq_disk;
+	rq->cmd_type = REQ_TYPE_FS;
+	rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
+	rq->rq_disk = q->orig_bar_rq->rq_disk;
 	rq->end_io = end_io;
 	rq->end_io = end_io;
-	q->prepare_flush_fn(q, rq);
 
 
 	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
 }
 }
@@ -203,7 +194,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
 		/* initialize proxy request and queue it */
 		/* initialize proxy request and queue it */
 		blk_rq_init(q, rq);
 		blk_rq_init(q, rq);
 		if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
 		if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
-			rq->cmd_flags |= REQ_RW;
+			rq->cmd_flags |= REQ_WRITE;
 		if (q->ordered & QUEUE_ORDERED_DO_FUA)
 		if (q->ordered & QUEUE_ORDERED_DO_FUA)
 			rq->cmd_flags |= REQ_FUA;
 			rq->cmd_flags |= REQ_FUA;
 		init_request_from_bio(rq, q->orig_bar_rq->bio);
 		init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -236,7 +227,8 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
 bool blk_do_ordered(struct request_queue *q, struct request **rqp)
 bool blk_do_ordered(struct request_queue *q, struct request **rqp)
 {
 {
 	struct request *rq = *rqp;
 	struct request *rq = *rqp;
-	const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
+	const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
+				(rq->cmd_flags & REQ_HARDBARRIER);
 
 
 	if (!q->ordseq) {
 	if (!q->ordseq) {
 		if (!is_barrier)
 		if (!is_barrier)
@@ -261,7 +253,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
 	 */
 	 */
 
 
 	/* Special requests are not subject to ordering rules. */
 	/* Special requests are not subject to ordering rules. */
-	if (!blk_fs_request(rq) &&
+	if (rq->cmd_type != REQ_TYPE_FS &&
 	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
 	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
 		return true;
 		return true;
 
 
@@ -319,6 +311,15 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
 	if (!q)
 	if (!q)
 		return -ENXIO;
 		return -ENXIO;
 
 
+	/*
+	 * some block devices may not have their queue correctly set up here
+	 * (e.g. loop device without a backing file) and so issuing a flush
+	 * here will panic. Ensure there is a request function before issuing
+	 * the barrier.
+	 */
+	if (!q->make_request_fn)
+		return -ENXIO;
+
 	bio = bio_alloc(gfp_mask, 0);
 	bio = bio_alloc(gfp_mask, 0);
 	bio->bi_end_io = bio_end_empty_barrier;
 	bio->bi_end_io = bio_end_empty_barrier;
 	bio->bi_bdev = bdev;
 	bio->bi_bdev = bdev;

+ 82 - 35
block/blk-core.c

@@ -184,7 +184,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
 	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n",
 	printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n",
 	       rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
 	       rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
 
 
-	if (blk_pc_request(rq)) {
+	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 		printk(KERN_INFO "  cdb: ");
 		printk(KERN_INFO "  cdb: ");
 		for (bit = 0; bit < BLK_MAX_CDB; bit++)
 		for (bit = 0; bit < BLK_MAX_CDB; bit++)
 			printk("%02x ", rq->cmd[bit]);
 			printk("%02x ", rq->cmd[bit]);
@@ -608,6 +608,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
 
 
 	q->request_fn		= rfn;
 	q->request_fn		= rfn;
 	q->prep_rq_fn		= NULL;
 	q->prep_rq_fn		= NULL;
+	q->unprep_rq_fn		= NULL;
 	q->unplug_fn		= generic_unplug_device;
 	q->unplug_fn		= generic_unplug_device;
 	q->queue_flags		= QUEUE_FLAG_DEFAULT;
 	q->queue_flags		= QUEUE_FLAG_DEFAULT;
 	q->queue_lock		= lock;
 	q->queue_lock		= lock;
@@ -1135,30 +1136,46 @@ void blk_put_request(struct request *req)
 }
 }
 EXPORT_SYMBOL(blk_put_request);
 EXPORT_SYMBOL(blk_put_request);
 
 
+/**
+ * blk_add_request_payload - add a payload to a request
+ * @rq: request to update
+ * @page: page backing the payload
+ * @len: length of the payload.
+ *
+ * This allows to later add a payload to an already submitted request by
+ * a block driver.  The driver needs to take care of freeing the payload
+ * itself.
+ *
+ * Note that this is a quite horrible hack and nothing but handling of
+ * discard requests should ever use it.
+ */
+void blk_add_request_payload(struct request *rq, struct page *page,
+		unsigned int len)
+{
+	struct bio *bio = rq->bio;
+
+	bio->bi_io_vec->bv_page = page;
+	bio->bi_io_vec->bv_offset = 0;
+	bio->bi_io_vec->bv_len = len;
+
+	bio->bi_size = len;
+	bio->bi_vcnt = 1;
+	bio->bi_phys_segments = 1;
+
+	rq->__data_len = rq->resid_len = len;
+	rq->nr_phys_segments = 1;
+	rq->buffer = bio_data(bio);
+}
+EXPORT_SYMBOL_GPL(blk_add_request_payload);
+
 void init_request_from_bio(struct request *req, struct bio *bio)
 void init_request_from_bio(struct request *req, struct bio *bio)
 {
 {
 	req->cpu = bio->bi_comp_cpu;
 	req->cpu = bio->bi_comp_cpu;
 	req->cmd_type = REQ_TYPE_FS;
 	req->cmd_type = REQ_TYPE_FS;
 
 
-	/*
-	 * Inherit FAILFAST from bio (for read-ahead, and explicit
-	 * FAILFAST).  FAILFAST flags are identical for req and bio.
-	 */
-	if (bio_rw_flagged(bio, BIO_RW_AHEAD))
+	req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
+	if (bio->bi_rw & REQ_RAHEAD)
 		req->cmd_flags |= REQ_FAILFAST_MASK;
 		req->cmd_flags |= REQ_FAILFAST_MASK;
-	else
-		req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
-
-	if (bio_rw_flagged(bio, BIO_RW_DISCARD))
-		req->cmd_flags |= REQ_DISCARD;
-	if (bio_rw_flagged(bio, BIO_RW_BARRIER))
-		req->cmd_flags |= REQ_HARDBARRIER;
-	if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
-		req->cmd_flags |= REQ_RW_SYNC;
-	if (bio_rw_flagged(bio, BIO_RW_META))
-		req->cmd_flags |= REQ_RW_META;
-	if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
-		req->cmd_flags |= REQ_NOIDLE;
 
 
 	req->errors = 0;
 	req->errors = 0;
 	req->__sector = bio->bi_sector;
 	req->__sector = bio->bi_sector;
@@ -1181,12 +1198,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 	int el_ret;
 	int el_ret;
 	unsigned int bytes = bio->bi_size;
 	unsigned int bytes = bio->bi_size;
 	const unsigned short prio = bio_prio(bio);
 	const unsigned short prio = bio_prio(bio);
-	const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
-	const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
+	const bool sync = (bio->bi_rw & REQ_SYNC);
+	const bool unplug = (bio->bi_rw & REQ_UNPLUG);
 	const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
 	const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
 	int rw_flags;
 	int rw_flags;
 
 
-	if (bio_rw_flagged(bio, BIO_RW_BARRIER) &&
+	if ((bio->bi_rw & REQ_HARDBARRIER) &&
 	    (q->next_ordered == QUEUE_ORDERED_NONE)) {
 	    (q->next_ordered == QUEUE_ORDERED_NONE)) {
 		bio_endio(bio, -EOPNOTSUPP);
 		bio_endio(bio, -EOPNOTSUPP);
 		return 0;
 		return 0;
@@ -1200,7 +1217,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
 
 	spin_lock_irq(q->queue_lock);
 	spin_lock_irq(q->queue_lock);
 
 
-	if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
+	if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q))
 		goto get_rq;
 		goto get_rq;
 
 
 	el_ret = elv_merge(q, &req, bio);
 	el_ret = elv_merge(q, &req, bio);
@@ -1275,7 +1292,7 @@ get_rq:
 	 */
 	 */
 	rw_flags = bio_data_dir(bio);
 	rw_flags = bio_data_dir(bio);
 	if (sync)
 	if (sync)
-		rw_flags |= REQ_RW_SYNC;
+		rw_flags |= REQ_SYNC;
 
 
 	/*
 	/*
 	 * Grab a free request. This is might sleep but can not fail.
 	 * Grab a free request. This is might sleep but can not fail.
@@ -1464,7 +1481,7 @@ static inline void __generic_make_request(struct bio *bio)
 			goto end_io;
 			goto end_io;
 		}
 		}
 
 
-		if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+		if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
 			     nr_sectors > queue_max_hw_sectors(q))) {
 			     nr_sectors > queue_max_hw_sectors(q))) {
 			printk(KERN_ERR "bio too big device %s (%u > %u)\n",
 			printk(KERN_ERR "bio too big device %s (%u > %u)\n",
 			       bdevname(bio->bi_bdev, b),
 			       bdevname(bio->bi_bdev, b),
@@ -1497,8 +1514,7 @@ static inline void __generic_make_request(struct bio *bio)
 		if (bio_check_eod(bio, nr_sectors))
 		if (bio_check_eod(bio, nr_sectors))
 			goto end_io;
 			goto end_io;
 
 
-		if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
-		    !blk_queue_discard(q)) {
+		if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) {
 			err = -EOPNOTSUPP;
 			err = -EOPNOTSUPP;
 			goto end_io;
 			goto end_io;
 		}
 		}
@@ -1583,7 +1599,7 @@ void submit_bio(int rw, struct bio *bio)
 	 * If it's a regular read/write or a barrier with data attached,
 	 * If it's a regular read/write or a barrier with data attached,
 	 * go through the normal accounting stuff before submission.
 	 * go through the normal accounting stuff before submission.
 	 */
 	 */
-	if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) {
+	if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
 		if (rw & WRITE) {
 		if (rw & WRITE) {
 			count_vm_events(PGPGOUT, count);
 			count_vm_events(PGPGOUT, count);
 		} else {
 		} else {
@@ -1628,6 +1644,9 @@ EXPORT_SYMBOL(submit_bio);
  */
  */
 int blk_rq_check_limits(struct request_queue *q, struct request *rq)
 int blk_rq_check_limits(struct request_queue *q, struct request *rq)
 {
 {
+	if (rq->cmd_flags & REQ_DISCARD)
+		return 0;
+
 	if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
 	if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
 	    blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
 	    blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
 		printk(KERN_ERR "%s: over max size limit.\n", __func__);
 		printk(KERN_ERR "%s: over max size limit.\n", __func__);
@@ -1796,7 +1815,7 @@ struct request *blk_peek_request(struct request_queue *q)
 			 * sees this request (possibly after
 			 * sees this request (possibly after
 			 * requeueing).  Notify IO scheduler.
 			 * requeueing).  Notify IO scheduler.
 			 */
 			 */
-			if (blk_sorted_rq(rq))
+			if (rq->cmd_flags & REQ_SORTED)
 				elv_activate_rq(q, rq);
 				elv_activate_rq(q, rq);
 
 
 			/*
 			/*
@@ -1984,10 +2003,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 	 * TODO: tj: This is too subtle.  It would be better to let
 	 * TODO: tj: This is too subtle.  It would be better to let
 	 * low level drivers do what they see fit.
 	 * low level drivers do what they see fit.
 	 */
 	 */
-	if (blk_fs_request(req))
+	if (req->cmd_type == REQ_TYPE_FS)
 		req->errors = 0;
 		req->errors = 0;
 
 
-	if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
+	if (error && req->cmd_type == REQ_TYPE_FS &&
+	    !(req->cmd_flags & REQ_QUIET)) {
 		printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
 		printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
 				req->rq_disk ? req->rq_disk->disk_name : "?",
 				req->rq_disk ? req->rq_disk->disk_name : "?",
 				(unsigned long long)blk_rq_pos(req));
 				(unsigned long long)blk_rq_pos(req));
@@ -2074,7 +2094,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 	req->buffer = bio_data(req->bio);
 	req->buffer = bio_data(req->bio);
 
 
 	/* update sector only for requests with clear definition of sector */
 	/* update sector only for requests with clear definition of sector */
-	if (blk_fs_request(req) || blk_discard_rq(req))
+	if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
 		req->__sector += total_bytes >> 9;
 		req->__sector += total_bytes >> 9;
 
 
 	/* mixed attributes always follow the first bio */
 	/* mixed attributes always follow the first bio */
@@ -2111,11 +2131,32 @@ static bool blk_update_bidi_request(struct request *rq, int error,
 	    blk_update_request(rq->next_rq, error, bidi_bytes))
 	    blk_update_request(rq->next_rq, error, bidi_bytes))
 		return true;
 		return true;
 
 
-	add_disk_randomness(rq->rq_disk);
+	if (blk_queue_add_random(rq->q))
+		add_disk_randomness(rq->rq_disk);
 
 
 	return false;
 	return false;
 }
 }
 
 
+/**
+ * blk_unprep_request - unprepare a request
+ * @req:	the request
+ *
+ * This function makes a request ready for complete resubmission (or
+ * completion).  It happens only after all error handling is complete,
+ * so represents the appropriate moment to deallocate any resources
+ * that were allocated to the request in the prep_rq_fn.  The queue
+ * lock is held when calling this.
+ */
+void blk_unprep_request(struct request *req)
+{
+	struct request_queue *q = req->q;
+
+	req->cmd_flags &= ~REQ_DONTPREP;
+	if (q->unprep_rq_fn)
+		q->unprep_rq_fn(q, req);
+}
+EXPORT_SYMBOL_GPL(blk_unprep_request);
+
 /*
 /*
  * queue lock must be held
  * queue lock must be held
  */
  */
@@ -2126,11 +2167,15 @@ static void blk_finish_request(struct request *req, int error)
 
 
 	BUG_ON(blk_queued_rq(req));
 	BUG_ON(blk_queued_rq(req));
 
 
-	if (unlikely(laptop_mode) && blk_fs_request(req))
+	if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
 		laptop_io_completion(&req->q->backing_dev_info);
 		laptop_io_completion(&req->q->backing_dev_info);
 
 
 	blk_delete_timer(req);
 	blk_delete_timer(req);
 
 
+	if (req->cmd_flags & REQ_DONTPREP)
+		blk_unprep_request(req);
+
+
 	blk_account_io_done(req);
 	blk_account_io_done(req);
 
 
 	if (req->end_io)
 	if (req->end_io)
@@ -2363,7 +2408,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 		     struct bio *bio)
 		     struct bio *bio)
 {
 {
 	/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
 	/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
-	rq->cmd_flags |= bio->bi_rw & REQ_RW;
+	rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
 
 
 	if (bio_has_data(bio)) {
 	if (bio_has_data(bio)) {
 		rq->nr_phys_segments = bio_phys_segments(q, bio);
 		rq->nr_phys_segments = bio_phys_segments(q, bio);
@@ -2450,6 +2495,8 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
 {
 {
 	dst->cpu = src->cpu;
 	dst->cpu = src->cpu;
 	dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE);
 	dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE);
+	if (src->cmd_flags & REQ_DISCARD)
+		dst->cmd_flags |= REQ_DISCARD;
 	dst->cmd_type = src->cmd_type;
 	dst->cmd_type = src->cmd_type;
 	dst->__sector = blk_rq_pos(src);
 	dst->__sector = blk_rq_pos(src);
 	dst->__data_len = blk_rq_bytes(src);
 	dst->__data_len = blk_rq_bytes(src);

+ 1 - 1
block/blk-exec.c

@@ -57,7 +57,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 	__elv_add_request(q, rq, where, 1);
 	__elv_add_request(q, rq, where, 1);
 	__generic_unplug_device(q);
 	__generic_unplug_device(q);
 	/* the queue is stopped so it won't be plugged+unplugged */
 	/* the queue is stopped so it won't be plugged+unplugged */
-	if (blk_pm_resume_request(rq))
+	if (rq->cmd_type == REQ_TYPE_PM_RESUME)
 		q->request_fn(q);
 		q->request_fn(q);
 	spin_unlock_irq(q->queue_lock);
 	spin_unlock_irq(q->queue_lock);
 }
 }

+ 24 - 32
block/blk-lib.c

@@ -19,7 +19,6 @@ static void blkdev_discard_end_io(struct bio *bio, int err)
 
 
 	if (bio->bi_private)
 	if (bio->bi_private)
 		complete(bio->bi_private);
 		complete(bio->bi_private);
-	__free_page(bio_page(bio));
 
 
 	bio_put(bio);
 	bio_put(bio);
 }
 }
@@ -42,8 +41,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 	struct request_queue *q = bdev_get_queue(bdev);
 	struct request_queue *q = bdev_get_queue(bdev);
 	int type = flags & BLKDEV_IFL_BARRIER ?
 	int type = flags & BLKDEV_IFL_BARRIER ?
 		DISCARD_BARRIER : DISCARD_NOBARRIER;
 		DISCARD_BARRIER : DISCARD_NOBARRIER;
+	unsigned int max_discard_sectors;
 	struct bio *bio;
 	struct bio *bio;
-	struct page *page;
 	int ret = 0;
 	int ret = 0;
 
 
 	if (!q)
 	if (!q)
@@ -52,36 +51,30 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 	if (!blk_queue_discard(q))
 	if (!blk_queue_discard(q))
 		return -EOPNOTSUPP;
 		return -EOPNOTSUPP;
 
 
-	while (nr_sects && !ret) {
-		unsigned int sector_size = q->limits.logical_block_size;
-		unsigned int max_discard_sectors =
-			min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+	/*
+	 * Ensure that max_discard_sectors is of the proper
+	 * granularity
+	 */
+	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+	if (q->limits.discard_granularity) {
+		unsigned int disc_sects = q->limits.discard_granularity >> 9;
 
 
+		max_discard_sectors &= ~(disc_sects - 1);
+	}
+
+	while (nr_sects && !ret) {
 		bio = bio_alloc(gfp_mask, 1);
 		bio = bio_alloc(gfp_mask, 1);
-		if (!bio)
-			goto out;
+		if (!bio) {
+			ret = -ENOMEM;
+			break;
+		}
+
 		bio->bi_sector = sector;
 		bio->bi_sector = sector;
 		bio->bi_end_io = blkdev_discard_end_io;
 		bio->bi_end_io = blkdev_discard_end_io;
 		bio->bi_bdev = bdev;
 		bio->bi_bdev = bdev;
 		if (flags & BLKDEV_IFL_WAIT)
 		if (flags & BLKDEV_IFL_WAIT)
 			bio->bi_private = &wait;
 			bio->bi_private = &wait;
 
 
-		/*
-		 * Add a zeroed one-sector payload as that's what
-		 * our current implementations need.  If we'll ever need
-		 * more the interface will need revisiting.
-		 */
-		page = alloc_page(gfp_mask | __GFP_ZERO);
-		if (!page)
-			goto out_free_bio;
-		if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
-			goto out_free_page;
-
-		/*
-		 * And override the bio size - the way discard works we
-		 * touch many more blocks on disk than the actual payload
-		 * length.
-		 */
 		if (nr_sects > max_discard_sectors) {
 		if (nr_sects > max_discard_sectors) {
 			bio->bi_size = max_discard_sectors << 9;
 			bio->bi_size = max_discard_sectors << 9;
 			nr_sects -= max_discard_sectors;
 			nr_sects -= max_discard_sectors;
@@ -103,13 +96,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 			ret = -EIO;
 			ret = -EIO;
 		bio_put(bio);
 		bio_put(bio);
 	}
 	}
+
 	return ret;
 	return ret;
-out_free_page:
-	__free_page(page);
-out_free_bio:
-	bio_put(bio);
-out:
-	return -ENOMEM;
 }
 }
 EXPORT_SYMBOL(blkdev_issue_discard);
 EXPORT_SYMBOL(blkdev_issue_discard);
 
 
@@ -157,7 +145,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 			sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
 			sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
 {
 {
-	int ret = 0;
+	int ret;
 	struct bio *bio;
 	struct bio *bio;
 	struct bio_batch bb;
 	struct bio_batch bb;
 	unsigned int sz, issued = 0;
 	unsigned int sz, issued = 0;
@@ -175,11 +163,14 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 			return ret;
 			return ret;
 	}
 	}
 submit:
 submit:
+	ret = 0;
 	while (nr_sects != 0) {
 	while (nr_sects != 0) {
 		bio = bio_alloc(gfp_mask,
 		bio = bio_alloc(gfp_mask,
 				min(nr_sects, (sector_t)BIO_MAX_PAGES));
 				min(nr_sects, (sector_t)BIO_MAX_PAGES));
-		if (!bio)
+		if (!bio) {
+			ret = -ENOMEM;
 			break;
 			break;
+		}
 
 
 		bio->bi_sector = sector;
 		bio->bi_sector = sector;
 		bio->bi_bdev   = bdev;
 		bio->bi_bdev   = bdev;
@@ -198,6 +189,7 @@ submit:
 			if (ret < (sz << 9))
 			if (ret < (sz << 9))
 				break;
 				break;
 		}
 		}
+		ret = 0;
 		issued++;
 		issued++;
 		submit_bio(WRITE, bio);
 		submit_bio(WRITE, bio);
 	}
 	}

+ 1 - 1
block/blk-map.c

@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 		return PTR_ERR(bio);
 		return PTR_ERR(bio);
 
 
 	if (rq_data_dir(rq) == WRITE)
 	if (rq_data_dir(rq) == WRITE)
-		bio->bi_rw |= (1 << BIO_RW);
+		bio->bi_rw |= (1 << REQ_WRITE);
 
 
 	if (do_copy)
 	if (do_copy)
 		rq->cmd_flags |= REQ_COPY_USER;
 		rq->cmd_flags |= REQ_COPY_USER;

+ 4 - 5
block/blk-merge.c

@@ -12,7 +12,6 @@
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 					     struct bio *bio)
 					     struct bio *bio)
 {
 {
-	unsigned int phys_size;
 	struct bio_vec *bv, *bvprv = NULL;
 	struct bio_vec *bv, *bvprv = NULL;
 	int cluster, i, high, highprv = 1;
 	int cluster, i, high, highprv = 1;
 	unsigned int seg_size, nr_phys_segs;
 	unsigned int seg_size, nr_phys_segs;
@@ -24,7 +23,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
 	fbio = bio;
 	fbio = bio;
 	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
 	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
 	seg_size = 0;
 	seg_size = 0;
-	phys_size = nr_phys_segs = 0;
+	nr_phys_segs = 0;
 	for_each_bio(bio) {
 	for_each_bio(bio) {
 		bio_for_each_segment(bv, bio, i) {
 		bio_for_each_segment(bv, bio, i) {
 			/*
 			/*
@@ -180,7 +179,7 @@ new_segment:
 	}
 	}
 
 
 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
-		if (rq->cmd_flags & REQ_RW)
+		if (rq->cmd_flags & REQ_WRITE)
 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 
 
 		sg->page_link &= ~0x02;
 		sg->page_link &= ~0x02;
@@ -226,7 +225,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
 {
 {
 	unsigned short max_sectors;
 	unsigned short max_sectors;
 
 
-	if (unlikely(blk_pc_request(req)))
+	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
 		max_sectors = queue_max_hw_sectors(q);
 		max_sectors = queue_max_hw_sectors(q);
 	else
 	else
 		max_sectors = queue_max_sectors(q);
 		max_sectors = queue_max_sectors(q);
@@ -250,7 +249,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
 {
 {
 	unsigned short max_sectors;
 	unsigned short max_sectors;
 
 
-	if (unlikely(blk_pc_request(req)))
+	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
 		max_sectors = queue_max_hw_sectors(q);
 		max_sectors = queue_max_hw_sectors(q);
 	else
 	else
 		max_sectors = queue_max_sectors(q);
 		max_sectors = queue_max_sectors(q);

+ 17 - 0
block/blk-settings.c

@@ -36,6 +36,23 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
 }
 }
 EXPORT_SYMBOL(blk_queue_prep_rq);
 EXPORT_SYMBOL(blk_queue_prep_rq);
 
 
+/**
+ * blk_queue_unprep_rq - set an unprepare_request function for queue
+ * @q:		queue
+ * @ufn:	unprepare_request function
+ *
+ * It's possible for a queue to register an unprepare_request callback
+ * which is invoked before the request is finally completed. The goal
+ * of the function is to deallocate any data that was allocated in the
+ * prepare_request callback.
+ *
+ */
+void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
+{
+	q->unprep_rq_fn = ufn;
+}
+EXPORT_SYMBOL(blk_queue_unprep_rq);
+
 /**
 /**
  * blk_queue_merge_bvec - set a merge_bvec function for queue
  * blk_queue_merge_bvec - set a merge_bvec function for queue
  * @q:		queue
  * @q:		queue

+ 39 - 43
block/blk-sysfs.c

@@ -180,26 +180,36 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
 	return queue_var_show(max_hw_sectors_kb, (page));
 	return queue_var_show(max_hw_sectors_kb, (page));
 }
 }
 
 
-static ssize_t queue_nonrot_show(struct request_queue *q, char *page)
-{
-	return queue_var_show(!blk_queue_nonrot(q), page);
+#define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
+static ssize_t								\
+queue_show_##name(struct request_queue *q, char *page)			\
+{									\
+	int bit;							\
+	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
+	return queue_var_show(neg ? !bit : bit, page);			\
+}									\
+static ssize_t								\
+queue_store_##name(struct request_queue *q, const char *page, size_t count) \
+{									\
+	unsigned long val;						\
+	ssize_t ret;							\
+	ret = queue_var_store(&val, page, count);			\
+	if (neg)							\
+		val = !val;						\
+									\
+	spin_lock_irq(q->queue_lock);					\
+	if (val)							\
+		queue_flag_set(QUEUE_FLAG_##flag, q);			\
+	else								\
+		queue_flag_clear(QUEUE_FLAG_##flag, q);			\
+	spin_unlock_irq(q->queue_lock);					\
+	return ret;							\
 }
 }
 
 
-static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
-				  size_t count)
-{
-	unsigned long nm;
-	ssize_t ret = queue_var_store(&nm, page, count);
-
-	spin_lock_irq(q->queue_lock);
-	if (nm)
-		queue_flag_clear(QUEUE_FLAG_NONROT, q);
-	else
-		queue_flag_set(QUEUE_FLAG_NONROT, q);
-	spin_unlock_irq(q->queue_lock);
-
-	return ret;
-}
+QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
+QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
+QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
+#undef QUEUE_SYSFS_BIT_FNS
 
 
 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
 {
 {
@@ -250,27 +260,6 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
 	return ret;
 	return ret;
 }
 }
 
 
-static ssize_t queue_iostats_show(struct request_queue *q, char *page)
-{
-	return queue_var_show(blk_queue_io_stat(q), page);
-}
-
-static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
-				   size_t count)
-{
-	unsigned long stats;
-	ssize_t ret = queue_var_store(&stats, page, count);
-
-	spin_lock_irq(q->queue_lock);
-	if (stats)
-		queue_flag_set(QUEUE_FLAG_IO_STAT, q);
-	else
-		queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
-	spin_unlock_irq(q->queue_lock);
-
-	return ret;
-}
-
 static struct queue_sysfs_entry queue_requests_entry = {
 static struct queue_sysfs_entry queue_requests_entry = {
 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
 	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
 	.show = queue_requests_show,
 	.show = queue_requests_show,
@@ -352,8 +341,8 @@ static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
 
 
 static struct queue_sysfs_entry queue_nonrot_entry = {
 static struct queue_sysfs_entry queue_nonrot_entry = {
 	.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
 	.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
-	.show = queue_nonrot_show,
-	.store = queue_nonrot_store,
+	.show = queue_show_nonrot,
+	.store = queue_store_nonrot,
 };
 };
 
 
 static struct queue_sysfs_entry queue_nomerges_entry = {
 static struct queue_sysfs_entry queue_nomerges_entry = {
@@ -370,8 +359,14 @@ static struct queue_sysfs_entry queue_rq_affinity_entry = {
 
 
 static struct queue_sysfs_entry queue_iostats_entry = {
 static struct queue_sysfs_entry queue_iostats_entry = {
 	.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
 	.attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
-	.show = queue_iostats_show,
-	.store = queue_iostats_store,
+	.show = queue_show_iostats,
+	.store = queue_store_iostats,
+};
+
+static struct queue_sysfs_entry queue_random_entry = {
+	.attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
+	.show = queue_show_random,
+	.store = queue_store_random,
 };
 };
 
 
 static struct attribute *default_attrs[] = {
 static struct attribute *default_attrs[] = {
@@ -394,6 +389,7 @@ static struct attribute *default_attrs[] = {
 	&queue_nomerges_entry.attr,
 	&queue_nomerges_entry.attr,
 	&queue_rq_affinity_entry.attr,
 	&queue_rq_affinity_entry.attr,
 	&queue_iostats_entry.attr,
 	&queue_iostats_entry.attr,
+	&queue_random_entry.attr,
 	NULL,
 	NULL,
 };
 };
 
 

+ 4 - 2
block/blk.h

@@ -161,8 +161,10 @@ static inline int blk_cpu_to_group(int cpu)
  */
  */
 static inline int blk_do_io_stat(struct request *rq)
 static inline int blk_do_io_stat(struct request *rq)
 {
 {
-	return rq->rq_disk && blk_rq_io_stat(rq) &&
-	       (blk_fs_request(rq) || blk_discard_rq(rq));
+	return rq->rq_disk &&
+	       (rq->cmd_flags & REQ_IO_STAT) &&
+	       (rq->cmd_type == REQ_TYPE_FS ||
+	        (rq->cmd_flags & REQ_DISCARD));
 }
 }
 
 
 #endif
 #endif

+ 12 - 9
block/cfq-iosched.c

@@ -458,7 +458,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
  */
  */
 static inline bool cfq_bio_sync(struct bio *bio)
 static inline bool cfq_bio_sync(struct bio *bio)
 {
 {
-	return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
+	return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
 }
 }
 
 
 /*
 /*
@@ -646,9 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
 		return rq1;
 		return rq1;
 	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
 	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
 		return rq2;
 		return rq2;
-	if (rq_is_meta(rq1) && !rq_is_meta(rq2))
+	if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
 		return rq1;
 		return rq1;
-	else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
+	else if ((rq2->cmd_flags & REQ_META) &&
+		 !(rq1->cmd_flags & REQ_META))
 		return rq2;
 		return rq2;
 
 
 	s1 = blk_rq_pos(rq1);
 	s1 = blk_rq_pos(rq1);
@@ -1484,7 +1485,7 @@ static void cfq_remove_request(struct request *rq)
 	cfqq->cfqd->rq_queued--;
 	cfqq->cfqd->rq_queued--;
 	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
 	cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
 					rq_data_dir(rq), rq_is_sync(rq));
 					rq_data_dir(rq), rq_is_sync(rq));
-	if (rq_is_meta(rq)) {
+	if (rq->cmd_flags & REQ_META) {
 		WARN_ON(!cfqq->meta_pending);
 		WARN_ON(!cfqq->meta_pending);
 		cfqq->meta_pending--;
 		cfqq->meta_pending--;
 	}
 	}
@@ -3176,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
 	 * So both queues are sync. Let the new request get disk time if
 	 * So both queues are sync. Let the new request get disk time if
 	 * it's a metadata request and the current queue is doing regular IO.
 	 * it's a metadata request and the current queue is doing regular IO.
 	 */
 	 */
-	if (rq_is_meta(rq) && !cfqq->meta_pending)
+	if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
 		return true;
 		return true;
 
 
 	/*
 	/*
@@ -3230,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 	struct cfq_io_context *cic = RQ_CIC(rq);
 	struct cfq_io_context *cic = RQ_CIC(rq);
 
 
 	cfqd->rq_queued++;
 	cfqd->rq_queued++;
-	if (rq_is_meta(rq))
+	if (rq->cmd_flags & REQ_META)
 		cfqq->meta_pending++;
 		cfqq->meta_pending++;
 
 
 	cfq_update_io_thinktime(cfqd, cic);
 	cfq_update_io_thinktime(cfqd, cic);
@@ -3365,7 +3366,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
 	unsigned long now;
 	unsigned long now;
 
 
 	now = jiffies;
 	now = jiffies;
-	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq));
+	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
+		     !!(rq->cmd_flags & REQ_NOIDLE));
 
 
 	cfq_update_hw_tag(cfqd);
 	cfq_update_hw_tag(cfqd);
 
 
@@ -3419,11 +3421,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
 			cfq_slice_expired(cfqd, 1);
 			cfq_slice_expired(cfqd, 1);
 		else if (sync && cfqq_empty &&
 		else if (sync && cfqq_empty &&
 			 !cfq_close_cooperator(cfqd, cfqq)) {
 			 !cfq_close_cooperator(cfqd, cfqq)) {
-			cfqd->noidle_tree_requires_idle |= !rq_noidle(rq);
+			cfqd->noidle_tree_requires_idle |=
+				!(rq->cmd_flags & REQ_NOIDLE);
 			/*
 			/*
 			 * Idling is enabled for SYNC_WORKLOAD.
 			 * Idling is enabled for SYNC_WORKLOAD.
 			 * SYNC_NOIDLE_WORKLOAD idles at the end of the tree
 			 * SYNC_NOIDLE_WORKLOAD idles at the end of the tree
-			 * only if we processed at least one !rq_noidle request
+			 * only if we processed at least one !REQ_NOIDLE request
 			 */
 			 */
 			if (cfqd->serving_type == SYNC_WORKLOAD
 			if (cfqd->serving_type == SYNC_WORKLOAD
 			    || cfqd->noidle_tree_requires_idle
 			    || cfqd->noidle_tree_requires_idle

+ 0 - 56
block/compat_ioctl.c

@@ -535,56 +535,6 @@ out:
 	return err;
 	return err;
 }
 }
 
 
-struct compat_blk_user_trace_setup {
-	char name[32];
-	u16 act_mask;
-	u32 buf_size;
-	u32 buf_nr;
-	compat_u64 start_lba;
-	compat_u64 end_lba;
-	u32 pid;
-};
-#define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup)
-
-static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg)
-{
-	struct blk_user_trace_setup buts;
-	struct compat_blk_user_trace_setup cbuts;
-	struct request_queue *q;
-	char b[BDEVNAME_SIZE];
-	int ret;
-
-	q = bdev_get_queue(bdev);
-	if (!q)
-		return -ENXIO;
-
-	if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
-		return -EFAULT;
-
-	bdevname(bdev, b);
-
-	buts = (struct blk_user_trace_setup) {
-		.act_mask = cbuts.act_mask,
-		.buf_size = cbuts.buf_size,
-		.buf_nr = cbuts.buf_nr,
-		.start_lba = cbuts.start_lba,
-		.end_lba = cbuts.end_lba,
-		.pid = cbuts.pid,
-	};
-	memcpy(&buts.name, &cbuts.name, 32);
-
-	mutex_lock(&bdev->bd_mutex);
-	ret = do_blk_trace_setup(q, b, bdev->bd_dev, bdev, &buts);
-	mutex_unlock(&bdev->bd_mutex);
-	if (ret)
-		return ret;
-
-	if (copy_to_user(arg, &buts.name, 32))
-		return -EFAULT;
-
-	return 0;
-}
-
 static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
 static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
 			unsigned cmd, unsigned long arg)
 			unsigned cmd, unsigned long arg)
 {
 {
@@ -802,16 +752,10 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
 		return compat_put_u64(arg, bdev->bd_inode->i_size);
 		return compat_put_u64(arg, bdev->bd_inode->i_size);
 
 
 	case BLKTRACESETUP32:
 	case BLKTRACESETUP32:
-		lock_kernel();
-		ret = compat_blk_trace_setup(bdev, compat_ptr(arg));
-		unlock_kernel();
-		return ret;
 	case BLKTRACESTART: /* compatible */
 	case BLKTRACESTART: /* compatible */
 	case BLKTRACESTOP:  /* compatible */
 	case BLKTRACESTOP:  /* compatible */
 	case BLKTRACETEARDOWN: /* compatible */
 	case BLKTRACETEARDOWN: /* compatible */
-		lock_kernel();
 		ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
 		ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
-		unlock_kernel();
 		return ret;
 		return ret;
 	default:
 	default:
 		if (disk->fops->compat_ioctl)
 		if (disk->fops->compat_ioctl)

+ 11 - 8
block/elevator.c

@@ -79,8 +79,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
 	/*
 	/*
 	 * Don't merge file system requests and discard requests
 	 * Don't merge file system requests and discard requests
 	 */
 	 */
-	if (bio_rw_flagged(bio, BIO_RW_DISCARD) !=
-	    bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
+	if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
 		return 0;
 		return 0;
 
 
 	/*
 	/*
@@ -428,7 +427,8 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
 	list_for_each_prev(entry, &q->queue_head) {
 	list_for_each_prev(entry, &q->queue_head) {
 		struct request *pos = list_entry_rq(entry);
 		struct request *pos = list_entry_rq(entry);
 
 
-		if (blk_discard_rq(rq) != blk_discard_rq(pos))
+		if ((rq->cmd_flags & REQ_DISCARD) !=
+		    (pos->cmd_flags & REQ_DISCARD))
 			break;
 			break;
 		if (rq_data_dir(rq) != rq_data_dir(pos))
 		if (rq_data_dir(rq) != rq_data_dir(pos))
 			break;
 			break;
@@ -558,7 +558,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
 	 */
 	 */
 	if (blk_account_rq(rq)) {
 	if (blk_account_rq(rq)) {
 		q->in_flight[rq_is_sync(rq)]--;
 		q->in_flight[rq_is_sync(rq)]--;
-		if (blk_sorted_rq(rq))
+		if (rq->cmd_flags & REQ_SORTED)
 			elv_deactivate_rq(q, rq);
 			elv_deactivate_rq(q, rq);
 	}
 	}
 
 
@@ -644,7 +644,8 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
 		break;
 		break;
 
 
 	case ELEVATOR_INSERT_SORT:
 	case ELEVATOR_INSERT_SORT:
-		BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
+		BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
+		       !(rq->cmd_flags & REQ_DISCARD));
 		rq->cmd_flags |= REQ_SORTED;
 		rq->cmd_flags |= REQ_SORTED;
 		q->nr_sorted++;
 		q->nr_sorted++;
 		if (rq_mergeable(rq)) {
 		if (rq_mergeable(rq)) {
@@ -716,7 +717,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
 		/*
 		/*
 		 * toggle ordered color
 		 * toggle ordered color
 		 */
 		 */
-		if (blk_barrier_rq(rq))
+		if (rq->cmd_flags & REQ_HARDBARRIER)
 			q->ordcolor ^= 1;
 			q->ordcolor ^= 1;
 
 
 		/*
 		/*
@@ -729,7 +730,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where,
 		 * this request is scheduling boundary, update
 		 * this request is scheduling boundary, update
 		 * end_sector
 		 * end_sector
 		 */
 		 */
-		if (blk_fs_request(rq) || blk_discard_rq(rq)) {
+		if (rq->cmd_type == REQ_TYPE_FS ||
+		    (rq->cmd_flags & REQ_DISCARD)) {
 			q->end_sector = rq_end_sector(rq);
 			q->end_sector = rq_end_sector(rq);
 			q->boundary_rq = rq;
 			q->boundary_rq = rq;
 		}
 		}
@@ -843,7 +845,8 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
 	 */
 	 */
 	if (blk_account_rq(rq)) {
 	if (blk_account_rq(rq)) {
 		q->in_flight[rq_is_sync(rq)]--;
 		q->in_flight[rq_is_sync(rq)]--;
-		if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
+		if ((rq->cmd_flags & REQ_SORTED) &&
+		    e->ops->elevator_completed_req_fn)
 			e->ops->elevator_completed_req_fn(q, rq);
 			e->ops->elevator_completed_req_fn(q, rq);
 	}
 	}
 
 

+ 1 - 20
block/ioctl.c

@@ -163,18 +163,10 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
 			unsigned cmd, unsigned long arg)
 			unsigned cmd, unsigned long arg)
 {
 {
 	struct gendisk *disk = bdev->bd_disk;
 	struct gendisk *disk = bdev->bd_disk;
-	int ret;
 
 
 	if (disk->fops->ioctl)
 	if (disk->fops->ioctl)
 		return disk->fops->ioctl(bdev, mode, cmd, arg);
 		return disk->fops->ioctl(bdev, mode, cmd, arg);
 
 
-	if (disk->fops->locked_ioctl) {
-		lock_kernel();
-		ret = disk->fops->locked_ioctl(bdev, mode, cmd, arg);
-		unlock_kernel();
-		return ret;
-	}
-
 	return -ENOTTY;
 	return -ENOTTY;
 }
 }
 /*
 /*
@@ -185,8 +177,7 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
 EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
 EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
 
 
 /*
 /*
- * always keep this in sync with compat_blkdev_ioctl() and
- * compat_blkdev_locked_ioctl()
+ * always keep this in sync with compat_blkdev_ioctl()
  */
  */
 int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
 int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
 			unsigned long arg)
 			unsigned long arg)
@@ -206,10 +197,8 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
 		if (ret != -EINVAL && ret != -ENOTTY)
 		if (ret != -EINVAL && ret != -ENOTTY)
 			return ret;
 			return ret;
 
 
-		lock_kernel();
 		fsync_bdev(bdev);
 		fsync_bdev(bdev);
 		invalidate_bdev(bdev);
 		invalidate_bdev(bdev);
-		unlock_kernel();
 		return 0;
 		return 0;
 
 
 	case BLKROSET:
 	case BLKROSET:
@@ -221,9 +210,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
 			return -EACCES;
 			return -EACCES;
 		if (get_user(n, (int __user *)(arg)))
 		if (get_user(n, (int __user *)(arg)))
 			return -EFAULT;
 			return -EFAULT;
-		lock_kernel();
 		set_device_ro(bdev, n);
 		set_device_ro(bdev, n);
-		unlock_kernel();
 		return 0;
 		return 0;
 
 
 	case BLKDISCARD: {
 	case BLKDISCARD: {
@@ -309,14 +296,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
 			bd_release(bdev);
 			bd_release(bdev);
 		return ret;
 		return ret;
 	case BLKPG:
 	case BLKPG:
-		lock_kernel();
 		ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
 		ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
-		unlock_kernel();
 		break;
 		break;
 	case BLKRRPART:
 	case BLKRRPART:
-		lock_kernel();
 		ret = blkdev_reread_part(bdev);
 		ret = blkdev_reread_part(bdev);
-		unlock_kernel();
 		break;
 		break;
 	case BLKGETSIZE:
 	case BLKGETSIZE:
 		size = bdev->bd_inode->i_size;
 		size = bdev->bd_inode->i_size;
@@ -329,9 +312,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
 	case BLKTRACESTOP:
 	case BLKTRACESTOP:
 	case BLKTRACESETUP:
 	case BLKTRACESETUP:
 	case BLKTRACETEARDOWN:
 	case BLKTRACETEARDOWN:
-		lock_kernel();
 		ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg);
 		ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg);
-		unlock_kernel();
 		break;
 		break;
 	default:
 	default:
 		ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
 		ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);

+ 2 - 2
drivers/ata/libata-scsi.c

@@ -1111,10 +1111,10 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
  */
  */
 static int atapi_drain_needed(struct request *rq)
 static int atapi_drain_needed(struct request *rq)
 {
 {
-	if (likely(!blk_pc_request(rq)))
+	if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
 		return 0;
 		return 0;
 
 
-	if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
+	if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
 		return 0;
 		return 0;
 
 
 	return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
 	return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;

+ 9 - 4
drivers/block/DAC960.c

@@ -79,23 +79,28 @@ static int DAC960_open(struct block_device *bdev, fmode_t mode)
 	struct gendisk *disk = bdev->bd_disk;
 	struct gendisk *disk = bdev->bd_disk;
 	DAC960_Controller_T *p = disk->queue->queuedata;
 	DAC960_Controller_T *p = disk->queue->queuedata;
 	int drive_nr = (long)disk->private_data;
 	int drive_nr = (long)disk->private_data;
+	int ret = -ENXIO;
 
 
+	lock_kernel();
 	if (p->FirmwareType == DAC960_V1_Controller) {
 	if (p->FirmwareType == DAC960_V1_Controller) {
 		if (p->V1.LogicalDriveInformation[drive_nr].
 		if (p->V1.LogicalDriveInformation[drive_nr].
 		    LogicalDriveState == DAC960_V1_LogicalDrive_Offline)
 		    LogicalDriveState == DAC960_V1_LogicalDrive_Offline)
-			return -ENXIO;
+			goto out;
 	} else {
 	} else {
 		DAC960_V2_LogicalDeviceInfo_T *i =
 		DAC960_V2_LogicalDeviceInfo_T *i =
 			p->V2.LogicalDeviceInformation[drive_nr];
 			p->V2.LogicalDeviceInformation[drive_nr];
 		if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline)
 		if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline)
-			return -ENXIO;
+			goto out;
 	}
 	}
 
 
 	check_disk_change(bdev);
 	check_disk_change(bdev);
 
 
 	if (!get_capacity(p->disks[drive_nr]))
 	if (!get_capacity(p->disks[drive_nr]))
-		return -ENXIO;
-	return 0;
+		goto out;
+	ret = 0;
+out:
+	unlock_kernel();
+	return ret;
 }
 }
 
 
 static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo)

+ 25 - 4
drivers/block/amiflop.c

@@ -60,6 +60,7 @@
 #include <linux/hdreg.h>
 #include <linux/hdreg.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/init.h>
+#include <linux/smp_lock.h>
 #include <linux/amifdreg.h>
 #include <linux/amifdreg.h>
 #include <linux/amifd.h>
 #include <linux/amifd.h>
 #include <linux/buffer_head.h>
 #include <linux/buffer_head.h>
@@ -1423,7 +1424,7 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 	return 0;
 	return 0;
 }
 }
 
 
-static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
 		    unsigned int cmd, unsigned long param)
 		    unsigned int cmd, unsigned long param)
 {
 {
 	struct amiga_floppy_struct *p = bdev->bd_disk->private_data;
 	struct amiga_floppy_struct *p = bdev->bd_disk->private_data;
@@ -1500,6 +1501,18 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
 	return 0;
 	return 0;
 }
 }
 
 
+static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+			     unsigned int cmd, unsigned long param)
+{
+	int ret;
+
+	lock_kernel();
+	ret = fd_locked_ioctl(bdev, mode, cmd, param);
+	unlock_kernel();
+
+	return ret;
+}
+
 static void fd_probe(int dev)
 static void fd_probe(int dev)
 {
 {
 	unsigned long code;
 	unsigned long code;
@@ -1542,10 +1555,13 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
 	int old_dev;
 	int old_dev;
 	unsigned long flags;
 	unsigned long flags;
 
 
+	lock_kernel();
 	old_dev = fd_device[drive];
 	old_dev = fd_device[drive];
 
 
-	if (fd_ref[drive] && old_dev != system)
+	if (fd_ref[drive] && old_dev != system) {
+		unlock_kernel();
 		return -EBUSY;
 		return -EBUSY;
+	}
 
 
 	if (mode & (FMODE_READ|FMODE_WRITE)) {
 	if (mode & (FMODE_READ|FMODE_WRITE)) {
 		check_disk_change(bdev);
 		check_disk_change(bdev);
@@ -1558,8 +1574,10 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
 			fd_deselect (drive);
 			fd_deselect (drive);
 			rel_fdc();
 			rel_fdc();
 
 
-			if (wrprot)
+			if (wrprot) {
+				unlock_kernel();
 				return -EROFS;
 				return -EROFS;
+			}
 		}
 		}
 	}
 	}
 
 
@@ -1576,6 +1594,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
 	printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive,
 	printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive,
 	       unit[drive].type->name, data_types[system].name);
 	       unit[drive].type->name, data_types[system].name);
 
 
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1584,6 +1603,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
 	struct amiga_floppy_struct *p = disk->private_data;
 	struct amiga_floppy_struct *p = disk->private_data;
 	int drive = p - unit;
 	int drive = p - unit;
 
 
+	lock_kernel();
 	if (unit[drive].dirty == 1) {
 	if (unit[drive].dirty == 1) {
 		del_timer (flush_track_timer + drive);
 		del_timer (flush_track_timer + drive);
 		non_int_flush_track (drive);
 		non_int_flush_track (drive);
@@ -1597,6 +1617,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
 /* the mod_use counter is handled this way */
 /* the mod_use counter is handled this way */
 	floppy_off (drive | 0x40000000);
 	floppy_off (drive | 0x40000000);
 #endif
 #endif
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1638,7 +1659,7 @@ static const struct block_device_operations floppy_fops = {
 	.owner		= THIS_MODULE,
 	.owner		= THIS_MODULE,
 	.open		= floppy_open,
 	.open		= floppy_open,
 	.release	= floppy_release,
 	.release	= floppy_release,
-	.locked_ioctl	= fd_ioctl,
+	.ioctl		= fd_ioctl,
 	.getgeo		= fd_getgeo,
 	.getgeo		= fd_getgeo,
 	.media_changed	= amiga_floppy_change,
 	.media_changed	= amiga_floppy_change,
 };
 };

+ 5 - 1
drivers/block/aoe/aoeblk.c

@@ -12,6 +12,7 @@
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/genhd.h>
 #include <linux/genhd.h>
 #include <linux/netdevice.h>
 #include <linux/netdevice.h>
+#include <linux/smp_lock.h>
 #include "aoe.h"
 #include "aoe.h"
 
 
 static struct kmem_cache *buf_pool_cache;
 static struct kmem_cache *buf_pool_cache;
@@ -124,13 +125,16 @@ aoeblk_open(struct block_device *bdev, fmode_t mode)
 	struct aoedev *d = bdev->bd_disk->private_data;
 	struct aoedev *d = bdev->bd_disk->private_data;
 	ulong flags;
 	ulong flags;
 
 
+	lock_kernel();
 	spin_lock_irqsave(&d->lock, flags);
 	spin_lock_irqsave(&d->lock, flags);
 	if (d->flags & DEVFL_UP) {
 	if (d->flags & DEVFL_UP) {
 		d->nopen++;
 		d->nopen++;
 		spin_unlock_irqrestore(&d->lock, flags);
 		spin_unlock_irqrestore(&d->lock, flags);
+		unlock_kernel();
 		return 0;
 		return 0;
 	}
 	}
 	spin_unlock_irqrestore(&d->lock, flags);
 	spin_unlock_irqrestore(&d->lock, flags);
+	unlock_kernel();
 	return -ENODEV;
 	return -ENODEV;
 }
 }
 
 
@@ -173,7 +177,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
 		BUG();
 		BUG();
 		bio_endio(bio, -ENXIO);
 		bio_endio(bio, -ENXIO);
 		return 0;
 		return 0;
-	} else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
+	} else if (bio->bi_rw & REQ_HARDBARRIER) {
 		bio_endio(bio, -EOPNOTSUPP);
 		bio_endio(bio, -EOPNOTSUPP);
 		return 0;
 		return 0;
 	} else if (bio->bi_io_vec == NULL) {
 	} else if (bio->bi_io_vec == NULL) {

+ 28 - 4
drivers/block/ataflop.c

@@ -67,6 +67,7 @@
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
+#include <linux/smp_lock.h>
 
 
 #include <asm/atafd.h>
 #include <asm/atafd.h>
 #include <asm/atafdreg.h>
 #include <asm/atafdreg.h>
@@ -359,7 +360,7 @@ static void finish_fdc( void );
 static void finish_fdc_done( int dummy );
 static void finish_fdc_done( int dummy );
 static void setup_req_params( int drive );
 static void setup_req_params( int drive );
 static void redo_fd_request( void);
 static void redo_fd_request( void);
-static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
+static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
                      cmd, unsigned long param);
                      cmd, unsigned long param);
 static void fd_probe( int drive );
 static void fd_probe( int drive );
 static int fd_test_drive_present( int drive );
 static int fd_test_drive_present( int drive );
@@ -1480,7 +1481,7 @@ void do_fd_request(struct request_queue * q)
 	atari_enable_irq( IRQ_MFP_FDC );
 	atari_enable_irq( IRQ_MFP_FDC );
 }
 }
 
 
-static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
 		    unsigned int cmd, unsigned long param)
 		    unsigned int cmd, unsigned long param)
 {
 {
 	struct gendisk *disk = bdev->bd_disk;
 	struct gendisk *disk = bdev->bd_disk;
@@ -1665,6 +1666,17 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode,
 	}
 	}
 }
 }
 
 
+static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+			     unsigned int cmd, unsigned long arg)
+{
+	int ret;
+
+	lock_kernel();
+	ret = fd_locked_ioctl(bdev, mode, cmd, arg);
+	unlock_kernel();
+
+	return ret;
+}
 
 
 /* Initialize the 'unit' variable for drive 'drive' */
 /* Initialize the 'unit' variable for drive 'drive' */
 
 
@@ -1838,24 +1850,36 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
 	return 0;
 	return 0;
 }
 }
 
 
+static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+	int ret;
+
+	lock_kernel();
+	ret = floppy_open(bdev, mode);
+	unlock_kernel();
+
+	return ret;
+}
 
 
 static int floppy_release(struct gendisk *disk, fmode_t mode)
 static int floppy_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct atari_floppy_struct *p = disk->private_data;
 	struct atari_floppy_struct *p = disk->private_data;
+	lock_kernel();
 	if (p->ref < 0)
 	if (p->ref < 0)
 		p->ref = 0;
 		p->ref = 0;
 	else if (!p->ref--) {
 	else if (!p->ref--) {
 		printk(KERN_ERR "floppy_release with fd_ref == 0");
 		printk(KERN_ERR "floppy_release with fd_ref == 0");
 		p->ref = 0;
 		p->ref = 0;
 	}
 	}
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 
 static const struct block_device_operations floppy_fops = {
 static const struct block_device_operations floppy_fops = {
 	.owner		= THIS_MODULE,
 	.owner		= THIS_MODULE,
-	.open		= floppy_open,
+	.open		= floppy_unlocked_open,
 	.release	= floppy_release,
 	.release	= floppy_release,
-	.locked_ioctl	= fd_ioctl,
+	.ioctl		= fd_ioctl,
 	.media_changed	= check_floppy_change,
 	.media_changed	= check_floppy_change,
 	.revalidate_disk= floppy_revalidate,
 	.revalidate_disk= floppy_revalidate,
 };
 };

+ 6 - 3
drivers/block/brd.c

@@ -15,6 +15,7 @@
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
 #include <linux/bio.h>
 #include <linux/bio.h>
 #include <linux/highmem.h>
 #include <linux/highmem.h>
+#include <linux/smp_lock.h>
 #include <linux/radix-tree.h>
 #include <linux/radix-tree.h>
 #include <linux/buffer_head.h> /* invalidate_bh_lrus() */
 #include <linux/buffer_head.h> /* invalidate_bh_lrus() */
 #include <linux/slab.h>
 #include <linux/slab.h>
@@ -340,7 +341,7 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
 						get_capacity(bdev->bd_disk))
 						get_capacity(bdev->bd_disk))
 		goto out;
 		goto out;
 
 
-	if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
+	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
 		err = 0;
 		err = 0;
 		discard_from_brd(brd, sector, bio->bi_size);
 		discard_from_brd(brd, sector, bio->bi_size);
 		goto out;
 		goto out;
@@ -401,6 +402,7 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode,
 	 * ram device BLKFLSBUF has special semantics, we want to actually
 	 * ram device BLKFLSBUF has special semantics, we want to actually
 	 * release and destroy the ramdisk data.
 	 * release and destroy the ramdisk data.
 	 */
 	 */
+	lock_kernel();
 	mutex_lock(&bdev->bd_mutex);
 	mutex_lock(&bdev->bd_mutex);
 	error = -EBUSY;
 	error = -EBUSY;
 	if (bdev->bd_openers <= 1) {
 	if (bdev->bd_openers <= 1) {
@@ -417,13 +419,14 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode,
 		error = 0;
 		error = 0;
 	}
 	}
 	mutex_unlock(&bdev->bd_mutex);
 	mutex_unlock(&bdev->bd_mutex);
+	unlock_kernel();
 
 
 	return error;
 	return error;
 }
 }
 
 
 static const struct block_device_operations brd_fops = {
 static const struct block_device_operations brd_fops = {
 	.owner =		THIS_MODULE,
 	.owner =		THIS_MODULE,
-	.locked_ioctl =		brd_ioctl,
+	.ioctl =		brd_ioctl,
 #ifdef CONFIG_BLK_DEV_XIP
 #ifdef CONFIG_BLK_DEV_XIP
 	.direct_access =	brd_direct_access,
 	.direct_access =	brd_direct_access,
 #endif
 #endif
@@ -479,7 +482,7 @@ static struct brd_device *brd_alloc(int i)
 	if (!brd->brd_queue)
 	if (!brd->brd_queue)
 		goto out_free_dev;
 		goto out_free_dev;
 	blk_queue_make_request(brd->brd_queue, brd_make_request);
 	blk_queue_make_request(brd->brd_queue, brd_make_request);
-	blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL);
+	blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG);
 	blk_queue_max_hw_sectors(brd->brd_queue, 1024);
 	blk_queue_max_hw_sectors(brd->brd_queue, 1024);
 	blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
 	blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
 
 

+ 1281 - 884
drivers/block/cciss.c

@@ -56,16 +56,14 @@
 #include <linux/kthread.h>
 #include <linux/kthread.h>
 
 
 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-#define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
-#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20)
+#define DRIVER_NAME "HP CISS Driver (v 3.6.26)"
+#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26)
 
 
 /* Embedded module documentation macros - see modules.h */
 /* Embedded module documentation macros - see modules.h */
 MODULE_AUTHOR("Hewlett-Packard Company");
 MODULE_AUTHOR("Hewlett-Packard Company");
 MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
 MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
-MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
-			" SA6i P600 P800 P400 P400i E200 E200i E500 P700m"
-			" Smart Array G2 Series SAS/SATA Controllers");
-MODULE_VERSION("3.6.20");
+MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
+MODULE_VERSION("3.6.26");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 
 
 static int cciss_allow_hpsa;
 static int cciss_allow_hpsa;
@@ -107,6 +105,11 @@ static const struct pci_device_id cciss_pci_device_id[] = {
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
 	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3250},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3251},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3252},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3253},
+	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3254},
 	{0,}
 	{0,}
 };
 };
 
 
@@ -146,6 +149,11 @@ static struct board_type products[] = {
 	{0x3249103C, "Smart Array P812", &SA5_access},
 	{0x3249103C, "Smart Array P812", &SA5_access},
 	{0x324A103C, "Smart Array P712m", &SA5_access},
 	{0x324A103C, "Smart Array P712m", &SA5_access},
 	{0x324B103C, "Smart Array P711m", &SA5_access},
 	{0x324B103C, "Smart Array P711m", &SA5_access},
+	{0x3250103C, "Smart Array", &SA5_access},
+	{0x3251103C, "Smart Array", &SA5_access},
+	{0x3252103C, "Smart Array", &SA5_access},
+	{0x3253103C, "Smart Array", &SA5_access},
+	{0x3254103C, "Smart Array", &SA5_access},
 };
 };
 
 
 /* How long to wait (in milliseconds) for board to go into simple mode */
 /* How long to wait (in milliseconds) for board to go into simple mode */
@@ -167,9 +175,13 @@ static DEFINE_MUTEX(scan_mutex);
 static LIST_HEAD(scan_q);
 static LIST_HEAD(scan_q);
 
 
 static void do_cciss_request(struct request_queue *q);
 static void do_cciss_request(struct request_queue *q);
-static irqreturn_t do_cciss_intr(int irq, void *dev_id);
+static irqreturn_t do_cciss_intx(int irq, void *dev_id);
+static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
 static int cciss_open(struct block_device *bdev, fmode_t mode);
 static int cciss_open(struct block_device *bdev, fmode_t mode);
+static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
 static int cciss_release(struct gendisk *disk, fmode_t mode);
 static int cciss_release(struct gendisk *disk, fmode_t mode);
+static int do_ioctl(struct block_device *bdev, fmode_t mode,
+		    unsigned int cmd, unsigned long arg);
 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 		       unsigned int cmd, unsigned long arg);
 		       unsigned int cmd, unsigned long arg);
 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -179,25 +191,23 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl);
 static int deregister_disk(ctlr_info_t *h, int drv_index,
 static int deregister_disk(ctlr_info_t *h, int drv_index,
 			   int clear_all, int via_ioctl);
 			   int clear_all, int via_ioctl);
 
 
-static void cciss_read_capacity(int ctlr, int logvol,
+static void cciss_read_capacity(ctlr_info_t *h, int logvol,
 			sector_t *total_size, unsigned int *block_size);
 			sector_t *total_size, unsigned int *block_size);
-static void cciss_read_capacity_16(int ctlr, int logvol,
+static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
 			sector_t *total_size, unsigned int *block_size);
 			sector_t *total_size, unsigned int *block_size);
-static void cciss_geometry_inquiry(int ctlr, int logvol,
+static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
 			sector_t total_size,
 			sector_t total_size,
 			unsigned int block_size, InquiryData_struct *inq_buff,
 			unsigned int block_size, InquiryData_struct *inq_buff,
 				   drive_info_struct *drv);
 				   drive_info_struct *drv);
-static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *,
-					   __u32);
+static void __devinit cciss_interrupt_mode(ctlr_info_t *);
 static void start_io(ctlr_info_t *h);
 static void start_io(ctlr_info_t *h);
-static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
+static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
 			__u8 page_code, unsigned char scsi3addr[],
 			__u8 page_code, unsigned char scsi3addr[],
 			int cmd_type);
 			int cmd_type);
 static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
 static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
 	int attempt_retry);
 	int attempt_retry);
 static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
 static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
 
 
-static void fail_all_cmds(unsigned long ctlr);
 static int add_to_scan_list(struct ctlr_info *h);
 static int add_to_scan_list(struct ctlr_info *h);
 static int scan_thread(void *data);
 static int scan_thread(void *data);
 static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
 static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
@@ -205,11 +215,23 @@ static void cciss_hba_release(struct device *dev);
 static void cciss_device_release(struct device *dev);
 static void cciss_device_release(struct device *dev);
 static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
 static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
 static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
 static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
+static inline u32 next_command(ctlr_info_t *h);
+static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
+	void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+	u64 *cfg_offset);
+static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
+	unsigned long *memory_bar);
+
+
+/* performant mode helper functions */
+static void  calc_bucket_map(int *bucket, int num_buckets, int nsgs,
+				int *bucket_map);
+static void cciss_put_controller_into_performant_mode(ctlr_info_t *h);
 
 
 #ifdef CONFIG_PROC_FS
 #ifdef CONFIG_PROC_FS
-static void cciss_procinit(int i);
+static void cciss_procinit(ctlr_info_t *h);
 #else
 #else
-static void cciss_procinit(int i)
+static void cciss_procinit(ctlr_info_t *h)
 {
 {
 }
 }
 #endif				/* CONFIG_PROC_FS */
 #endif				/* CONFIG_PROC_FS */
@@ -221,9 +243,9 @@ static int cciss_compat_ioctl(struct block_device *, fmode_t,
 
 
 static const struct block_device_operations cciss_fops = {
 static const struct block_device_operations cciss_fops = {
 	.owner = THIS_MODULE,
 	.owner = THIS_MODULE,
-	.open = cciss_open,
+	.open = cciss_unlocked_open,
 	.release = cciss_release,
 	.release = cciss_release,
-	.locked_ioctl = cciss_ioctl,
+	.ioctl = do_ioctl,
 	.getgeo = cciss_getgeo,
 	.getgeo = cciss_getgeo,
 #ifdef CONFIG_COMPAT
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = cciss_compat_ioctl,
 	.compat_ioctl = cciss_compat_ioctl,
@@ -231,6 +253,16 @@ static const struct block_device_operations cciss_fops = {
 	.revalidate_disk = cciss_revalidate,
 	.revalidate_disk = cciss_revalidate,
 };
 };
 
 
+/* set_performant_mode: Modify the tag for cciss performant
+ * set bit 0 for pull model, bits 3-1 for block fetch
+ * register number
+ */
+static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
+{
+	if (likely(h->transMethod == CFGTBL_Trans_Performant))
+		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+}
+
 /*
 /*
  * Enqueuing and dequeuing functions for cmdlists.
  * Enqueuing and dequeuing functions for cmdlists.
  */
  */
@@ -257,6 +289,18 @@ static inline void removeQ(CommandList_struct *c)
 	hlist_del_init(&c->list);
 	hlist_del_init(&c->list);
 }
 }
 
 
+static void enqueue_cmd_and_start_io(ctlr_info_t *h,
+	CommandList_struct *c)
+{
+	unsigned long flags;
+	set_performant_mode(h, c);
+	spin_lock_irqsave(&h->lock, flags);
+	addQ(&h->reqQ, c);
+	h->Qdepth++;
+	start_io(h);
+	spin_unlock_irqrestore(&h->lock, flags);
+}
+
 static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list,
 static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list,
 	int nr_cmds)
 	int nr_cmds)
 {
 {
@@ -366,32 +410,31 @@ static void cciss_seq_show_header(struct seq_file *seq)
 		h->product_name,
 		h->product_name,
 		(unsigned long)h->board_id,
 		(unsigned long)h->board_id,
 		h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
 		h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
-		h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
+		h->firm_ver[3], (unsigned int)h->intr[PERF_MODE_INT],
 		h->num_luns,
 		h->num_luns,
 		h->Qdepth, h->commands_outstanding,
 		h->Qdepth, h->commands_outstanding,
 		h->maxQsinceinit, h->max_outstanding, h->maxSG);
 		h->maxQsinceinit, h->max_outstanding, h->maxSG);
 
 
 #ifdef CONFIG_CISS_SCSI_TAPE
 #ifdef CONFIG_CISS_SCSI_TAPE
-	cciss_seq_tape_report(seq, h->ctlr);
+	cciss_seq_tape_report(seq, h);
 #endif /* CONFIG_CISS_SCSI_TAPE */
 #endif /* CONFIG_CISS_SCSI_TAPE */
 }
 }
 
 
 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
 {
 {
 	ctlr_info_t *h = seq->private;
 	ctlr_info_t *h = seq->private;
-	unsigned ctlr = h->ctlr;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	/* prevent displaying bogus info during configuration
 	/* prevent displaying bogus info during configuration
 	 * or deconfiguration of a logical volume
 	 * or deconfiguration of a logical volume
 	 */
 	 */
-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
 	if (h->busy_configuring) {
 	if (h->busy_configuring) {
-		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+		spin_unlock_irqrestore(&h->lock, flags);
 		return ERR_PTR(-EBUSY);
 		return ERR_PTR(-EBUSY);
 	}
 	}
 	h->busy_configuring = 1;
 	h->busy_configuring = 1;
-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+	spin_unlock_irqrestore(&h->lock, flags);
 
 
 	if (*pos == 0)
 	if (*pos == 0)
 		cciss_seq_show_header(seq);
 		cciss_seq_show_header(seq);
@@ -499,7 +542,7 @@ cciss_proc_write(struct file *file, const char __user *buf,
 		struct seq_file *seq = file->private_data;
 		struct seq_file *seq = file->private_data;
 		ctlr_info_t *h = seq->private;
 		ctlr_info_t *h = seq->private;
 
 
-		err = cciss_engage_scsi(h->ctlr);
+		err = cciss_engage_scsi(h);
 		if (err == 0)
 		if (err == 0)
 			err = length;
 			err = length;
 	} else
 	} else
@@ -522,7 +565,7 @@ static const struct file_operations cciss_proc_fops = {
 	.write	 = cciss_proc_write,
 	.write	 = cciss_proc_write,
 };
 };
 
 
-static void __devinit cciss_procinit(int i)
+static void __devinit cciss_procinit(ctlr_info_t *h)
 {
 {
 	struct proc_dir_entry *pde;
 	struct proc_dir_entry *pde;
 
 
@@ -530,9 +573,9 @@ static void __devinit cciss_procinit(int i)
 		proc_cciss = proc_mkdir("driver/cciss", NULL);
 		proc_cciss = proc_mkdir("driver/cciss", NULL);
 	if (!proc_cciss)
 	if (!proc_cciss)
 		return;
 		return;
-	pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
+	pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP |
 					S_IROTH, proc_cciss,
 					S_IROTH, proc_cciss,
-					&cciss_proc_fops, hba[i]);
+					&cciss_proc_fops, h);
 }
 }
 #endif				/* CONFIG_PROC_FS */
 #endif				/* CONFIG_PROC_FS */
 
 
@@ -565,12 +608,12 @@ static ssize_t dev_show_unique_id(struct device *dev,
 	unsigned long flags;
 	unsigned long flags;
 	int ret = 0;
 	int ret = 0;
 
 
-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
 	if (h->busy_configuring)
 	if (h->busy_configuring)
 		ret = -EBUSY;
 		ret = -EBUSY;
 	else
 	else
 		memcpy(sn, drv->serial_no, sizeof(sn));
 		memcpy(sn, drv->serial_no, sizeof(sn));
-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+	spin_unlock_irqrestore(&h->lock, flags);
 
 
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -595,12 +638,12 @@ static ssize_t dev_show_vendor(struct device *dev,
 	unsigned long flags;
 	unsigned long flags;
 	int ret = 0;
 	int ret = 0;
 
 
-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
 	if (h->busy_configuring)
 	if (h->busy_configuring)
 		ret = -EBUSY;
 		ret = -EBUSY;
 	else
 	else
 		memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
 		memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+	spin_unlock_irqrestore(&h->lock, flags);
 
 
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -619,12 +662,12 @@ static ssize_t dev_show_model(struct device *dev,
 	unsigned long flags;
 	unsigned long flags;
 	int ret = 0;
 	int ret = 0;
 
 
-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
 	if (h->busy_configuring)
 	if (h->busy_configuring)
 		ret = -EBUSY;
 		ret = -EBUSY;
 	else
 	else
 		memcpy(model, drv->model, MODEL_LEN + 1);
 		memcpy(model, drv->model, MODEL_LEN + 1);
-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+	spin_unlock_irqrestore(&h->lock, flags);
 
 
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -643,12 +686,12 @@ static ssize_t dev_show_rev(struct device *dev,
 	unsigned long flags;
 	unsigned long flags;
 	int ret = 0;
 	int ret = 0;
 
 
-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
 	if (h->busy_configuring)
 	if (h->busy_configuring)
 		ret = -EBUSY;
 		ret = -EBUSY;
 	else
 	else
 		memcpy(rev, drv->rev, REV_LEN + 1);
 		memcpy(rev, drv->rev, REV_LEN + 1);
-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+	spin_unlock_irqrestore(&h->lock, flags);
 
 
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
@@ -665,17 +708,17 @@ static ssize_t cciss_show_lunid(struct device *dev,
 	unsigned long flags;
 	unsigned long flags;
 	unsigned char lunid[8];
 	unsigned char lunid[8];
 
 
-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
 	if (h->busy_configuring) {
 	if (h->busy_configuring) {
-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+		spin_unlock_irqrestore(&h->lock, flags);
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 	if (!drv->heads) {
 	if (!drv->heads) {
-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+		spin_unlock_irqrestore(&h->lock, flags);
 		return -ENOTTY;
 		return -ENOTTY;
 	}
 	}
 	memcpy(lunid, drv->LunID, sizeof(lunid));
 	memcpy(lunid, drv->LunID, sizeof(lunid));
-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+	spin_unlock_irqrestore(&h->lock, flags);
 	return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
 	return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
 		lunid[0], lunid[1], lunid[2], lunid[3],
 		lunid[0], lunid[1], lunid[2], lunid[3],
 		lunid[4], lunid[5], lunid[6], lunid[7]);
 		lunid[4], lunid[5], lunid[6], lunid[7]);
@@ -690,13 +733,13 @@ static ssize_t cciss_show_raid_level(struct device *dev,
 	int raid;
 	int raid;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
 	if (h->busy_configuring) {
 	if (h->busy_configuring) {
-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+		spin_unlock_irqrestore(&h->lock, flags);
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 	raid = drv->raid_level;
 	raid = drv->raid_level;
-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+	spin_unlock_irqrestore(&h->lock, flags);
 	if (raid < 0 || raid > RAID_UNKNOWN)
 	if (raid < 0 || raid > RAID_UNKNOWN)
 		raid = RAID_UNKNOWN;
 		raid = RAID_UNKNOWN;
 
 
@@ -713,13 +756,13 @@ static ssize_t cciss_show_usage_count(struct device *dev,
 	unsigned long flags;
 	unsigned long flags;
 	int count;
 	int count;
 
 
-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
 	if (h->busy_configuring) {
 	if (h->busy_configuring) {
-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+		spin_unlock_irqrestore(&h->lock, flags);
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 	count = drv->usage_count;
 	count = drv->usage_count;
-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+	spin_unlock_irqrestore(&h->lock, flags);
 	return snprintf(buf, 20, "%d\n", count);
 	return snprintf(buf, 20, "%d\n", count);
 }
 }
 static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
 static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
@@ -864,60 +907,70 @@ static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index,
 /*
 /*
  * For operations that cannot sleep, a command block is allocated at init,
  * For operations that cannot sleep, a command block is allocated at init,
  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
- * which ones are free or in use.  For operations that can wait for kmalloc
- * to possible sleep, this routine can be called with get_from_pool set to 0.
- * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was.
+ * which ones are free or in use.
  */
  */
-static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
+static CommandList_struct *cmd_alloc(ctlr_info_t *h)
 {
 {
 	CommandList_struct *c;
 	CommandList_struct *c;
 	int i;
 	int i;
 	u64bit temp64;
 	u64bit temp64;
 	dma_addr_t cmd_dma_handle, err_dma_handle;
 	dma_addr_t cmd_dma_handle, err_dma_handle;
 
 
-	if (!get_from_pool) {
-		c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
-			sizeof(CommandList_struct), &cmd_dma_handle);
-		if (c == NULL)
+	do {
+		i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
+		if (i == h->nr_cmds)
 			return NULL;
 			return NULL;
-		memset(c, 0, sizeof(CommandList_struct));
+	} while (test_and_set_bit(i & (BITS_PER_LONG - 1),
+		  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
+	c = h->cmd_pool + i;
+	memset(c, 0, sizeof(CommandList_struct));
+	cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct);
+	c->err_info = h->errinfo_pool + i;
+	memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+	err_dma_handle = h->errinfo_pool_dhandle
+	    + i * sizeof(ErrorInfo_struct);
+	h->nr_allocs++;
 
 
-		c->cmdindex = -1;
+	c->cmdindex = i;
 
 
-		c->err_info = (ErrorInfo_struct *)
-		    pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
-			    &err_dma_handle);
+	INIT_HLIST_NODE(&c->list);
+	c->busaddr = (__u32) cmd_dma_handle;
+	temp64.val = (__u64) err_dma_handle;
+	c->ErrDesc.Addr.lower = temp64.val32.lower;
+	c->ErrDesc.Addr.upper = temp64.val32.upper;
+	c->ErrDesc.Len = sizeof(ErrorInfo_struct);
 
 
-		if (c->err_info == NULL) {
-			pci_free_consistent(h->pdev,
-				sizeof(CommandList_struct), c, cmd_dma_handle);
-			return NULL;
-		}
-		memset(c->err_info, 0, sizeof(ErrorInfo_struct));
-	} else {		/* get it out of the controllers pool */
-
-		do {
-			i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
-			if (i == h->nr_cmds)
-				return NULL;
-		} while (test_and_set_bit
-			 (i & (BITS_PER_LONG - 1),
-			  h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
-#ifdef CCISS_DEBUG
-		printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
-#endif
-		c = h->cmd_pool + i;
-		memset(c, 0, sizeof(CommandList_struct));
-		cmd_dma_handle = h->cmd_pool_dhandle
-		    + i * sizeof(CommandList_struct);
-		c->err_info = h->errinfo_pool + i;
-		memset(c->err_info, 0, sizeof(ErrorInfo_struct));
-		err_dma_handle = h->errinfo_pool_dhandle
-		    + i * sizeof(ErrorInfo_struct);
-		h->nr_allocs++;
+	c->ctlr = h->ctlr;
+	return c;
+}
 
 
-		c->cmdindex = i;
+/* allocate a command using pci_alloc_consistent, used for ioctls,
+ * etc., not for the main i/o path.
+ */
+static CommandList_struct *cmd_special_alloc(ctlr_info_t *h)
+{
+	CommandList_struct *c;
+	u64bit temp64;
+	dma_addr_t cmd_dma_handle, err_dma_handle;
+
+	c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
+		sizeof(CommandList_struct), &cmd_dma_handle);
+	if (c == NULL)
+		return NULL;
+	memset(c, 0, sizeof(CommandList_struct));
+
+	c->cmdindex = -1;
+
+	c->err_info = (ErrorInfo_struct *)
+	    pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
+		    &err_dma_handle);
+
+	if (c->err_info == NULL) {
+		pci_free_consistent(h->pdev,
+			sizeof(CommandList_struct), c, cmd_dma_handle);
+		return NULL;
 	}
 	}
+	memset(c->err_info, 0, sizeof(ErrorInfo_struct));
 
 
 	INIT_HLIST_NODE(&c->list);
 	INIT_HLIST_NODE(&c->list);
 	c->busaddr = (__u32) cmd_dma_handle;
 	c->busaddr = (__u32) cmd_dma_handle;
@@ -930,27 +983,26 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
 	return c;
 	return c;
 }
 }
 
 
-/*
- * Frees a command block that was previously allocated with cmd_alloc().
- */
-static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
+static void cmd_free(ctlr_info_t *h, CommandList_struct *c)
 {
 {
 	int i;
 	int i;
+
+	i = c - h->cmd_pool;
+	clear_bit(i & (BITS_PER_LONG - 1),
+		  h->cmd_pool_bits + (i / BITS_PER_LONG));
+	h->nr_frees++;
+}
+
+static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c)
+{
 	u64bit temp64;
 	u64bit temp64;
 
 
-	if (!got_from_pool) {
-		temp64.val32.lower = c->ErrDesc.Addr.lower;
-		temp64.val32.upper = c->ErrDesc.Addr.upper;
-		pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
-				    c->err_info, (dma_addr_t) temp64.val);
-		pci_free_consistent(h->pdev, sizeof(CommandList_struct),
-				    c, (dma_addr_t) c->busaddr);
-	} else {
-		i = c - h->cmd_pool;
-		clear_bit(i & (BITS_PER_LONG - 1),
-			  h->cmd_pool_bits + (i / BITS_PER_LONG));
-		h->nr_frees++;
-	}
+	temp64.val32.lower = c->ErrDesc.Addr.lower;
+	temp64.val32.upper = c->ErrDesc.Addr.upper;
+	pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
+			    c->err_info, (dma_addr_t) temp64.val);
+	pci_free_consistent(h->pdev, sizeof(CommandList_struct),
+			    c, (dma_addr_t) c->busaddr);
 }
 }
 
 
 static inline ctlr_info_t *get_host(struct gendisk *disk)
 static inline ctlr_info_t *get_host(struct gendisk *disk)
@@ -968,13 +1020,10 @@ static inline drive_info_struct *get_drv(struct gendisk *disk)
  */
  */
 static int cciss_open(struct block_device *bdev, fmode_t mode)
 static int cciss_open(struct block_device *bdev, fmode_t mode)
 {
 {
-	ctlr_info_t *host = get_host(bdev->bd_disk);
+	ctlr_info_t *h = get_host(bdev->bd_disk);
 	drive_info_struct *drv = get_drv(bdev->bd_disk);
 	drive_info_struct *drv = get_drv(bdev->bd_disk);
 
 
-#ifdef CCISS_DEBUG
-	printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
-#endif				/* CCISS_DEBUG */
-
+	dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name);
 	if (drv->busy_configuring)
 	if (drv->busy_configuring)
 		return -EBUSY;
 		return -EBUSY;
 	/*
 	/*
@@ -1000,29 +1049,39 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
 			return -EPERM;
 			return -EPERM;
 	}
 	}
 	drv->usage_count++;
 	drv->usage_count++;
-	host->usage_count++;
+	h->usage_count++;
 	return 0;
 	return 0;
 }
 }
 
 
+static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+	int ret;
+
+	lock_kernel();
+	ret = cciss_open(bdev, mode);
+	unlock_kernel();
+
+	return ret;
+}
+
 /*
 /*
  * Close.  Sync first.
  * Close.  Sync first.
  */
  */
 static int cciss_release(struct gendisk *disk, fmode_t mode)
 static int cciss_release(struct gendisk *disk, fmode_t mode)
 {
 {
-	ctlr_info_t *host = get_host(disk);
-	drive_info_struct *drv = get_drv(disk);
-
-#ifdef CCISS_DEBUG
-	printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name);
-#endif				/* CCISS_DEBUG */
+	ctlr_info_t *h;
+	drive_info_struct *drv;
 
 
+	lock_kernel();
+	h = get_host(disk);
+	drv = get_drv(disk);
+	dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name);
 	drv->usage_count--;
 	drv->usage_count--;
-	host->usage_count--;
+	h->usage_count--;
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 
-#ifdef CONFIG_COMPAT
-
 static int do_ioctl(struct block_device *bdev, fmode_t mode,
 static int do_ioctl(struct block_device *bdev, fmode_t mode,
 		    unsigned cmd, unsigned long arg)
 		    unsigned cmd, unsigned long arg)
 {
 {
@@ -1033,6 +1092,8 @@ static int do_ioctl(struct block_device *bdev, fmode_t mode,
 	return ret;
 	return ret;
 }
 }
 
 
+#ifdef CONFIG_COMPAT
+
 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
 				  unsigned cmd, unsigned long arg);
 				  unsigned cmd, unsigned long arg);
 static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
 static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
@@ -1163,11 +1224,11 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 	return 0;
 	return 0;
 }
 }
 
 
-static void check_ioctl_unit_attention(ctlr_info_t *host, CommandList_struct *c)
+static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c)
 {
 {
 	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
 	if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
 			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
 			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
-		(void)check_for_unit_attention(host, c);
+		(void)check_for_unit_attention(h, c);
 }
 }
 /*
 /*
  * ioctl
  * ioctl
@@ -1176,15 +1237,12 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 		       unsigned int cmd, unsigned long arg)
 		       unsigned int cmd, unsigned long arg)
 {
 {
 	struct gendisk *disk = bdev->bd_disk;
 	struct gendisk *disk = bdev->bd_disk;
-	ctlr_info_t *host = get_host(disk);
+	ctlr_info_t *h = get_host(disk);
 	drive_info_struct *drv = get_drv(disk);
 	drive_info_struct *drv = get_drv(disk);
-	int ctlr = host->ctlr;
 	void __user *argp = (void __user *)arg;
 	void __user *argp = (void __user *)arg;
 
 
-#ifdef CCISS_DEBUG
-	printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
-#endif				/* CCISS_DEBUG */
-
+	dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n",
+		cmd, arg);
 	switch (cmd) {
 	switch (cmd) {
 	case CCISS_GETPCIINFO:
 	case CCISS_GETPCIINFO:
 		{
 		{
@@ -1192,10 +1250,10 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 
 
 			if (!arg)
 			if (!arg)
 				return -EINVAL;
 				return -EINVAL;
-			pciinfo.domain = pci_domain_nr(host->pdev->bus);
-			pciinfo.bus = host->pdev->bus->number;
-			pciinfo.dev_fn = host->pdev->devfn;
-			pciinfo.board_id = host->board_id;
+			pciinfo.domain = pci_domain_nr(h->pdev->bus);
+			pciinfo.bus = h->pdev->bus->number;
+			pciinfo.dev_fn = h->pdev->devfn;
+			pciinfo.board_id = h->board_id;
 			if (copy_to_user
 			if (copy_to_user
 			    (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
 			    (argp, &pciinfo, sizeof(cciss_pci_info_struct)))
 				return -EFAULT;
 				return -EFAULT;
@@ -1207,9 +1265,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 			if (!arg)
 			if (!arg)
 				return -EINVAL;
 				return -EINVAL;
 			intinfo.delay =
 			intinfo.delay =
-			    readl(&host->cfgtable->HostWrite.CoalIntDelay);
+			    readl(&h->cfgtable->HostWrite.CoalIntDelay);
 			intinfo.count =
 			intinfo.count =
-			    readl(&host->cfgtable->HostWrite.CoalIntCount);
+			    readl(&h->cfgtable->HostWrite.CoalIntCount);
 			if (copy_to_user
 			if (copy_to_user
 			    (argp, &intinfo, sizeof(cciss_coalint_struct)))
 			    (argp, &intinfo, sizeof(cciss_coalint_struct)))
 				return -EFAULT;
 				return -EFAULT;
@@ -1229,26 +1287,23 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 			    (&intinfo, argp, sizeof(cciss_coalint_struct)))
 			    (&intinfo, argp, sizeof(cciss_coalint_struct)))
 				return -EFAULT;
 				return -EFAULT;
 			if ((intinfo.delay == 0) && (intinfo.count == 0))
 			if ((intinfo.delay == 0) && (intinfo.count == 0))
-			{
-//                      printk("cciss_ioctl: delay and count cannot be 0\n");
 				return -EINVAL;
 				return -EINVAL;
-			}
-			spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+			spin_lock_irqsave(&h->lock, flags);
 			/* Update the field, and then ring the doorbell */
 			/* Update the field, and then ring the doorbell */
 			writel(intinfo.delay,
 			writel(intinfo.delay,
-			       &(host->cfgtable->HostWrite.CoalIntDelay));
+			       &(h->cfgtable->HostWrite.CoalIntDelay));
 			writel(intinfo.count,
 			writel(intinfo.count,
-			       &(host->cfgtable->HostWrite.CoalIntCount));
-			writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
+			       &(h->cfgtable->HostWrite.CoalIntCount));
+			writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 
 
 			for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
 			for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
-				if (!(readl(host->vaddr + SA5_DOORBELL)
+				if (!(readl(h->vaddr + SA5_DOORBELL)
 				      & CFGTBL_ChangeReq))
 				      & CFGTBL_ChangeReq))
 					break;
 					break;
 				/* delay and try again */
 				/* delay and try again */
 				udelay(1000);
 				udelay(1000);
 			}
 			}
-			spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+			spin_unlock_irqrestore(&h->lock, flags);
 			if (i >= MAX_IOCTL_CONFIG_WAIT)
 			if (i >= MAX_IOCTL_CONFIG_WAIT)
 				return -EAGAIN;
 				return -EAGAIN;
 			return 0;
 			return 0;
@@ -1262,7 +1317,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 				return -EINVAL;
 				return -EINVAL;
 			for (i = 0; i < 16; i++)
 			for (i = 0; i < 16; i++)
 				NodeName[i] =
 				NodeName[i] =
-				    readb(&host->cfgtable->ServerName[i]);
+				    readb(&h->cfgtable->ServerName[i]);
 			if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
 			if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
 				return -EFAULT;
 				return -EFAULT;
 			return 0;
 			return 0;
@@ -1282,23 +1337,23 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 			    (NodeName, argp, sizeof(NodeName_type)))
 			    (NodeName, argp, sizeof(NodeName_type)))
 				return -EFAULT;
 				return -EFAULT;
 
 
-			spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+			spin_lock_irqsave(&h->lock, flags);
 
 
 			/* Update the field, and then ring the doorbell */
 			/* Update the field, and then ring the doorbell */
 			for (i = 0; i < 16; i++)
 			for (i = 0; i < 16; i++)
 				writeb(NodeName[i],
 				writeb(NodeName[i],
-				       &host->cfgtable->ServerName[i]);
+				       &h->cfgtable->ServerName[i]);
 
 
-			writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
+			writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 
 
 			for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
 			for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
-				if (!(readl(host->vaddr + SA5_DOORBELL)
+				if (!(readl(h->vaddr + SA5_DOORBELL)
 				      & CFGTBL_ChangeReq))
 				      & CFGTBL_ChangeReq))
 					break;
 					break;
 				/* delay and try again */
 				/* delay and try again */
 				udelay(1000);
 				udelay(1000);
 			}
 			}
-			spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+			spin_unlock_irqrestore(&h->lock, flags);
 			if (i >= MAX_IOCTL_CONFIG_WAIT)
 			if (i >= MAX_IOCTL_CONFIG_WAIT)
 				return -EAGAIN;
 				return -EAGAIN;
 			return 0;
 			return 0;
@@ -1310,7 +1365,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 
 
 			if (!arg)
 			if (!arg)
 				return -EINVAL;
 				return -EINVAL;
-			heartbeat = readl(&host->cfgtable->HeartBeat);
+			heartbeat = readl(&h->cfgtable->HeartBeat);
 			if (copy_to_user
 			if (copy_to_user
 			    (argp, &heartbeat, sizeof(Heartbeat_type)))
 			    (argp, &heartbeat, sizeof(Heartbeat_type)))
 				return -EFAULT;
 				return -EFAULT;
@@ -1322,7 +1377,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 
 
 			if (!arg)
 			if (!arg)
 				return -EINVAL;
 				return -EINVAL;
-			BusTypes = readl(&host->cfgtable->BusTypes);
+			BusTypes = readl(&h->cfgtable->BusTypes);
 			if (copy_to_user
 			if (copy_to_user
 			    (argp, &BusTypes, sizeof(BusTypes_type)))
 			    (argp, &BusTypes, sizeof(BusTypes_type)))
 				return -EFAULT;
 				return -EFAULT;
@@ -1334,7 +1389,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 
 
 			if (!arg)
 			if (!arg)
 				return -EINVAL;
 				return -EINVAL;
-			memcpy(firmware, host->firm_ver, 4);
+			memcpy(firmware, h->firm_ver, 4);
 
 
 			if (copy_to_user
 			if (copy_to_user
 			    (argp, firmware, sizeof(FirmwareVer_type)))
 			    (argp, firmware, sizeof(FirmwareVer_type)))
@@ -1357,7 +1412,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 	case CCISS_DEREGDISK:
 	case CCISS_DEREGDISK:
 	case CCISS_REGNEWD:
 	case CCISS_REGNEWD:
 	case CCISS_REVALIDVOLS:
 	case CCISS_REVALIDVOLS:
-		return rebuild_lun_table(host, 0, 1);
+		return rebuild_lun_table(h, 0, 1);
 
 
 	case CCISS_GETLUNINFO:{
 	case CCISS_GETLUNINFO:{
 			LogvolInfo_struct luninfo;
 			LogvolInfo_struct luninfo;
@@ -1377,7 +1432,6 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 			CommandList_struct *c;
 			CommandList_struct *c;
 			char *buff = NULL;
 			char *buff = NULL;
 			u64bit temp64;
 			u64bit temp64;
-			unsigned long flags;
 			DECLARE_COMPLETION_ONSTACK(wait);
 			DECLARE_COMPLETION_ONSTACK(wait);
 
 
 			if (!arg)
 			if (!arg)
@@ -1413,7 +1467,8 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 			} else {
 			} else {
 				memset(buff, 0, iocommand.buf_size);
 				memset(buff, 0, iocommand.buf_size);
 			}
 			}
-			if ((c = cmd_alloc(host, 0)) == NULL) {
+			c = cmd_special_alloc(h);
+			if (!c) {
 				kfree(buff);
 				kfree(buff);
 				return -ENOMEM;
 				return -ENOMEM;
 			}
 			}
@@ -1439,7 +1494,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 
 
 			/* Fill in the scatter gather information */
 			/* Fill in the scatter gather information */
 			if (iocommand.buf_size > 0) {
 			if (iocommand.buf_size > 0) {
-				temp64.val = pci_map_single(host->pdev, buff,
+				temp64.val = pci_map_single(h->pdev, buff,
 					iocommand.buf_size,
 					iocommand.buf_size,
 					PCI_DMA_BIDIRECTIONAL);
 					PCI_DMA_BIDIRECTIONAL);
 				c->SG[0].Addr.lower = temp64.val32.lower;
 				c->SG[0].Addr.lower = temp64.val32.lower;
@@ -1449,30 +1504,24 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 			}
 			}
 			c->waiting = &wait;
 			c->waiting = &wait;
 
 
-			/* Put the request on the tail of the request queue */
-			spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-			addQ(&host->reqQ, c);
-			host->Qdepth++;
-			start_io(host);
-			spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-
+			enqueue_cmd_and_start_io(h, c);
 			wait_for_completion(&wait);
 			wait_for_completion(&wait);
 
 
 			/* unlock the buffers from DMA */
 			/* unlock the buffers from DMA */
 			temp64.val32.lower = c->SG[0].Addr.lower;
 			temp64.val32.lower = c->SG[0].Addr.lower;
 			temp64.val32.upper = c->SG[0].Addr.upper;
 			temp64.val32.upper = c->SG[0].Addr.upper;
-			pci_unmap_single(host->pdev, (dma_addr_t) temp64.val,
+			pci_unmap_single(h->pdev, (dma_addr_t) temp64.val,
 					 iocommand.buf_size,
 					 iocommand.buf_size,
 					 PCI_DMA_BIDIRECTIONAL);
 					 PCI_DMA_BIDIRECTIONAL);
 
 
-			check_ioctl_unit_attention(host, c);
+			check_ioctl_unit_attention(h, c);
 
 
 			/* Copy the error information out */
 			/* Copy the error information out */
 			iocommand.error_info = *(c->err_info);
 			iocommand.error_info = *(c->err_info);
 			if (copy_to_user
 			if (copy_to_user
 			    (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
 			    (argp, &iocommand, sizeof(IOCTL_Command_struct))) {
 				kfree(buff);
 				kfree(buff);
-				cmd_free(host, c, 0);
+				cmd_special_free(h, c);
 				return -EFAULT;
 				return -EFAULT;
 			}
 			}
 
 
@@ -1481,12 +1530,12 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 				if (copy_to_user
 				if (copy_to_user
 				    (iocommand.buf, buff, iocommand.buf_size)) {
 				    (iocommand.buf, buff, iocommand.buf_size)) {
 					kfree(buff);
 					kfree(buff);
-					cmd_free(host, c, 0);
+					cmd_special_free(h, c);
 					return -EFAULT;
 					return -EFAULT;
 				}
 				}
 			}
 			}
 			kfree(buff);
 			kfree(buff);
-			cmd_free(host, c, 0);
+			cmd_special_free(h, c);
 			return 0;
 			return 0;
 		}
 		}
 	case CCISS_BIG_PASSTHRU:{
 	case CCISS_BIG_PASSTHRU:{
@@ -1495,7 +1544,6 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 			unsigned char **buff = NULL;
 			unsigned char **buff = NULL;
 			int *buff_size = NULL;
 			int *buff_size = NULL;
 			u64bit temp64;
 			u64bit temp64;
-			unsigned long flags;
 			BYTE sg_used = 0;
 			BYTE sg_used = 0;
 			int status = 0;
 			int status = 0;
 			int i;
 			int i;
@@ -1569,7 +1617,8 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 				data_ptr += sz;
 				data_ptr += sz;
 				sg_used++;
 				sg_used++;
 			}
 			}
-			if ((c = cmd_alloc(host, 0)) == NULL) {
+			c = cmd_special_alloc(h);
+			if (!c) {
 				status = -ENOMEM;
 				status = -ENOMEM;
 				goto cleanup1;
 				goto cleanup1;
 			}
 			}
@@ -1590,7 +1639,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 			if (ioc->buf_size > 0) {
 			if (ioc->buf_size > 0) {
 				for (i = 0; i < sg_used; i++) {
 				for (i = 0; i < sg_used; i++) {
 					temp64.val =
 					temp64.val =
-					    pci_map_single(host->pdev, buff[i],
+					    pci_map_single(h->pdev, buff[i],
 						    buff_size[i],
 						    buff_size[i],
 						    PCI_DMA_BIDIRECTIONAL);
 						    PCI_DMA_BIDIRECTIONAL);
 					c->SG[i].Addr.lower =
 					c->SG[i].Addr.lower =
@@ -1602,26 +1651,21 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 				}
 				}
 			}
 			}
 			c->waiting = &wait;
 			c->waiting = &wait;
-			/* Put the request on the tail of the request queue */
-			spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-			addQ(&host->reqQ, c);
-			host->Qdepth++;
-			start_io(host);
-			spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+			enqueue_cmd_and_start_io(h, c);
 			wait_for_completion(&wait);
 			wait_for_completion(&wait);
 			/* unlock the buffers from DMA */
 			/* unlock the buffers from DMA */
 			for (i = 0; i < sg_used; i++) {
 			for (i = 0; i < sg_used; i++) {
 				temp64.val32.lower = c->SG[i].Addr.lower;
 				temp64.val32.lower = c->SG[i].Addr.lower;
 				temp64.val32.upper = c->SG[i].Addr.upper;
 				temp64.val32.upper = c->SG[i].Addr.upper;
-				pci_unmap_single(host->pdev,
+				pci_unmap_single(h->pdev,
 					(dma_addr_t) temp64.val, buff_size[i],
 					(dma_addr_t) temp64.val, buff_size[i],
 					PCI_DMA_BIDIRECTIONAL);
 					PCI_DMA_BIDIRECTIONAL);
 			}
 			}
-			check_ioctl_unit_attention(host, c);
+			check_ioctl_unit_attention(h, c);
 			/* Copy the error information out */
 			/* Copy the error information out */
 			ioc->error_info = *(c->err_info);
 			ioc->error_info = *(c->err_info);
 			if (copy_to_user(argp, ioc, sizeof(*ioc))) {
 			if (copy_to_user(argp, ioc, sizeof(*ioc))) {
-				cmd_free(host, c, 0);
+				cmd_special_free(h, c);
 				status = -EFAULT;
 				status = -EFAULT;
 				goto cleanup1;
 				goto cleanup1;
 			}
 			}
@@ -1631,14 +1675,14 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
 				for (i = 0; i < sg_used; i++) {
 				for (i = 0; i < sg_used; i++) {
 					if (copy_to_user
 					if (copy_to_user
 					    (ptr, buff[i], buff_size[i])) {
 					    (ptr, buff[i], buff_size[i])) {
-						cmd_free(host, c, 0);
+						cmd_special_free(h, c);
 						status = -EFAULT;
 						status = -EFAULT;
 						goto cleanup1;
 						goto cleanup1;
 					}
 					}
 					ptr += buff_size[i];
 					ptr += buff_size[i];
 				}
 				}
 			}
 			}
-			cmd_free(host, c, 0);
+			cmd_special_free(h, c);
 			status = 0;
 			status = 0;
 		      cleanup1:
 		      cleanup1:
 			if (buff) {
 			if (buff) {
@@ -1726,26 +1770,26 @@ static void cciss_check_queues(ctlr_info_t *h)
 
 
 static void cciss_softirq_done(struct request *rq)
 static void cciss_softirq_done(struct request *rq)
 {
 {
-	CommandList_struct *cmd = rq->completion_data;
-	ctlr_info_t *h = hba[cmd->ctlr];
-	SGDescriptor_struct *curr_sg = cmd->SG;
-	unsigned long flags;
+	CommandList_struct *c = rq->completion_data;
+	ctlr_info_t *h = hba[c->ctlr];
+	SGDescriptor_struct *curr_sg = c->SG;
 	u64bit temp64;
 	u64bit temp64;
+	unsigned long flags;
 	int i, ddir;
 	int i, ddir;
 	int sg_index = 0;
 	int sg_index = 0;
 
 
-	if (cmd->Request.Type.Direction == XFER_READ)
+	if (c->Request.Type.Direction == XFER_READ)
 		ddir = PCI_DMA_FROMDEVICE;
 		ddir = PCI_DMA_FROMDEVICE;
 	else
 	else
 		ddir = PCI_DMA_TODEVICE;
 		ddir = PCI_DMA_TODEVICE;
 
 
 	/* command did not need to be retried */
 	/* command did not need to be retried */
 	/* unmap the DMA mapping for all the scatter gather elements */
 	/* unmap the DMA mapping for all the scatter gather elements */
-	for (i = 0; i < cmd->Header.SGList; i++) {
+	for (i = 0; i < c->Header.SGList; i++) {
 		if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) {
 		if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) {
-			cciss_unmap_sg_chain_block(h, cmd);
+			cciss_unmap_sg_chain_block(h, c);
 			/* Point to the next block */
 			/* Point to the next block */
-			curr_sg = h->cmd_sg_list[cmd->cmdindex];
+			curr_sg = h->cmd_sg_list[c->cmdindex];
 			sg_index = 0;
 			sg_index = 0;
 		}
 		}
 		temp64.val32.lower = curr_sg[sg_index].Addr.lower;
 		temp64.val32.lower = curr_sg[sg_index].Addr.lower;
@@ -1755,18 +1799,16 @@ static void cciss_softirq_done(struct request *rq)
 		++sg_index;
 		++sg_index;
 	}
 	}
 
 
-#ifdef CCISS_DEBUG
-	printk("Done with %p\n", rq);
-#endif				/* CCISS_DEBUG */
+	dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
 
 
 	/* set the residual count for pc requests */
 	/* set the residual count for pc requests */
-	if (blk_pc_request(rq))
-		rq->resid_len = cmd->err_info->ResidualCnt;
+	if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
+		rq->resid_len = c->err_info->ResidualCnt;
 
 
 	blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
 	blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
 
 
 	spin_lock_irqsave(&h->lock, flags);
 	spin_lock_irqsave(&h->lock, flags);
-	cmd_free(h, cmd, 1);
+	cmd_free(h, c);
 	cciss_check_queues(h);
 	cciss_check_queues(h);
 	spin_unlock_irqrestore(&h->lock, flags);
 	spin_unlock_irqrestore(&h->lock, flags);
 }
 }
@@ -1782,7 +1824,7 @@ static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
  * via the inquiry page 0.  Model, vendor, and rev are set to empty strings if
  * via the inquiry page 0.  Model, vendor, and rev are set to empty strings if
  * they cannot be read.
  * they cannot be read.
  */
  */
-static void cciss_get_device_descr(int ctlr, int logvol,
+static void cciss_get_device_descr(ctlr_info_t *h, int logvol,
 				   char *vendor, char *model, char *rev)
 				   char *vendor, char *model, char *rev)
 {
 {
 	int rc;
 	int rc;
@@ -1797,8 +1839,8 @@ static void cciss_get_device_descr(int ctlr, int logvol,
 	if (!inq_buf)
 	if (!inq_buf)
 		return;
 		return;
 
 
-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
-	rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf, sizeof(*inq_buf), 0,
+	log_unit_to_scsi3addr(h, scsi3addr, logvol);
+	rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0,
 			scsi3addr, TYPE_CMD);
 			scsi3addr, TYPE_CMD);
 	if (rc == IO_OK) {
 	if (rc == IO_OK) {
 		memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
 		memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
@@ -1818,7 +1860,7 @@ static void cciss_get_device_descr(int ctlr, int logvol,
  * number cannot be had, for whatever reason, 16 bytes of 0xff
  * number cannot be had, for whatever reason, 16 bytes of 0xff
  * are returned instead.
  * are returned instead.
  */
  */
-static void cciss_get_serial_no(int ctlr, int logvol,
+static void cciss_get_serial_no(ctlr_info_t *h, int logvol,
 				unsigned char *serial_no, int buflen)
 				unsigned char *serial_no, int buflen)
 {
 {
 #define PAGE_83_INQ_BYTES 64
 #define PAGE_83_INQ_BYTES 64
@@ -1833,8 +1875,8 @@ static void cciss_get_serial_no(int ctlr, int logvol,
 	if (!buf)
 	if (!buf)
 		return;
 		return;
 	memset(serial_no, 0, buflen);
 	memset(serial_no, 0, buflen);
-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
-	rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf,
+	log_unit_to_scsi3addr(h, scsi3addr, logvol);
+	rc = sendcmd_withirq(h, CISS_INQUIRY, buf,
 		PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
 		PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
 	if (rc == IO_OK)
 	if (rc == IO_OK)
 		memcpy(serial_no, &buf[8], buflen);
 		memcpy(serial_no, &buf[8], buflen);
@@ -1900,10 +1942,9 @@ init_queue_failure:
  * is also the controller node.  Any changes to disk 0 will show up on
  * is also the controller node.  Any changes to disk 0 will show up on
  * the next reboot.
  * the next reboot.
  */
  */
-static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
-	int via_ioctl)
+static void cciss_update_drive_info(ctlr_info_t *h, int drv_index,
+	int first_time, int via_ioctl)
 {
 {
-	ctlr_info_t *h = hba[ctlr];
 	struct gendisk *disk;
 	struct gendisk *disk;
 	InquiryData_struct *inq_buff = NULL;
 	InquiryData_struct *inq_buff = NULL;
 	unsigned int block_size;
 	unsigned int block_size;
@@ -1920,16 +1961,16 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
 
 
 	/* testing to see if 16-byte CDBs are already being used */
 	/* testing to see if 16-byte CDBs are already being used */
 	if (h->cciss_read == CCISS_READ_16) {
 	if (h->cciss_read == CCISS_READ_16) {
-		cciss_read_capacity_16(h->ctlr, drv_index,
+		cciss_read_capacity_16(h, drv_index,
 			&total_size, &block_size);
 			&total_size, &block_size);
 
 
 	} else {
 	} else {
-		cciss_read_capacity(ctlr, drv_index, &total_size, &block_size);
+		cciss_read_capacity(h, drv_index, &total_size, &block_size);
 		/* if read_capacity returns all F's this volume is >2TB */
 		/* if read_capacity returns all F's this volume is >2TB */
 		/* in size so we switch to 16-byte CDB's for all */
 		/* in size so we switch to 16-byte CDB's for all */
 		/* read/write ops */
 		/* read/write ops */
 		if (total_size == 0xFFFFFFFFULL) {
 		if (total_size == 0xFFFFFFFFULL) {
-			cciss_read_capacity_16(ctlr, drv_index,
+			cciss_read_capacity_16(h, drv_index,
 			&total_size, &block_size);
 			&total_size, &block_size);
 			h->cciss_read = CCISS_READ_16;
 			h->cciss_read = CCISS_READ_16;
 			h->cciss_write = CCISS_WRITE_16;
 			h->cciss_write = CCISS_WRITE_16;
@@ -1939,14 +1980,14 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
 		}
 		}
 	}
 	}
 
 
-	cciss_geometry_inquiry(ctlr, drv_index, total_size, block_size,
+	cciss_geometry_inquiry(h, drv_index, total_size, block_size,
 			       inq_buff, drvinfo);
 			       inq_buff, drvinfo);
 	drvinfo->block_size = block_size;
 	drvinfo->block_size = block_size;
 	drvinfo->nr_blocks = total_size + 1;
 	drvinfo->nr_blocks = total_size + 1;
 
 
-	cciss_get_device_descr(ctlr, drv_index, drvinfo->vendor,
+	cciss_get_device_descr(h, drv_index, drvinfo->vendor,
 				drvinfo->model, drvinfo->rev);
 				drvinfo->model, drvinfo->rev);
-	cciss_get_serial_no(ctlr, drv_index, drvinfo->serial_no,
+	cciss_get_serial_no(h, drv_index, drvinfo->serial_no,
 			sizeof(drvinfo->serial_no));
 			sizeof(drvinfo->serial_no));
 	/* Save the lunid in case we deregister the disk, below. */
 	/* Save the lunid in case we deregister the disk, below. */
 	memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
 	memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
@@ -1971,10 +2012,10 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
 	 * (unless it's the first disk (for the controller node).
 	 * (unless it's the first disk (for the controller node).
 	 */
 	 */
 	if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
 	if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
-		printk(KERN_WARNING "disk %d has changed.\n", drv_index);
-		spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+		dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index);
+		spin_lock_irqsave(&h->lock, flags);
 		h->drv[drv_index]->busy_configuring = 1;
 		h->drv[drv_index]->busy_configuring = 1;
-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+		spin_unlock_irqrestore(&h->lock, flags);
 
 
 		/* deregister_disk sets h->drv[drv_index]->queue = NULL
 		/* deregister_disk sets h->drv[drv_index]->queue = NULL
 		 * which keeps the interrupt handler from starting
 		 * which keeps the interrupt handler from starting
@@ -2024,8 +2065,8 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
 		if (cciss_add_disk(h, disk, drv_index) != 0) {
 		if (cciss_add_disk(h, disk, drv_index) != 0) {
 			cciss_free_gendisk(h, drv_index);
 			cciss_free_gendisk(h, drv_index);
 			cciss_free_drive_info(h, drv_index);
 			cciss_free_drive_info(h, drv_index);
-			printk(KERN_WARNING "cciss:%d could not update "
-				"disk %d\n", h->ctlr, drv_index);
+			dev_warn(&h->pdev->dev, "could not update disk %d\n",
+				drv_index);
 			--h->num_luns;
 			--h->num_luns;
 		}
 		}
 	}
 	}
@@ -2035,7 +2076,7 @@ freeret:
 	kfree(drvinfo);
 	kfree(drvinfo);
 	return;
 	return;
 mem_msg:
 mem_msg:
-	printk(KERN_ERR "cciss: out of memory\n");
+	dev_err(&h->pdev->dev, "out of memory\n");
 	goto freeret;
 	goto freeret;
 }
 }
 
 
@@ -2127,9 +2168,9 @@ static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
 		h->gendisk[drv_index] =
 		h->gendisk[drv_index] =
 			alloc_disk(1 << NWD_SHIFT);
 			alloc_disk(1 << NWD_SHIFT);
 		if (!h->gendisk[drv_index]) {
 		if (!h->gendisk[drv_index]) {
-			printk(KERN_ERR "cciss%d: could not "
-				"allocate a new disk %d\n",
-				h->ctlr, drv_index);
+			dev_err(&h->pdev->dev,
+				"could not allocate a new disk %d\n",
+				drv_index);
 			goto err_free_drive_info;
 			goto err_free_drive_info;
 		}
 		}
 	}
 	}
@@ -2180,8 +2221,7 @@ static void cciss_add_controller_node(ctlr_info_t *h)
 	cciss_free_gendisk(h, drv_index);
 	cciss_free_gendisk(h, drv_index);
 	cciss_free_drive_info(h, drv_index);
 	cciss_free_drive_info(h, drv_index);
 error:
 error:
-	printk(KERN_WARNING "cciss%d: could not "
-		"add disk 0.\n", h->ctlr);
+	dev_warn(&h->pdev->dev, "could not add disk 0.\n");
 	return;
 	return;
 }
 }
 
 
@@ -2196,7 +2236,6 @@ error:
 static int rebuild_lun_table(ctlr_info_t *h, int first_time,
 static int rebuild_lun_table(ctlr_info_t *h, int first_time,
 	int via_ioctl)
 	int via_ioctl)
 {
 {
-	int ctlr = h->ctlr;
 	int num_luns;
 	int num_luns;
 	ReportLunData_struct *ld_buff = NULL;
 	ReportLunData_struct *ld_buff = NULL;
 	int return_code;
 	int return_code;
@@ -2211,27 +2250,27 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
 		return -EPERM;
 		return -EPERM;
 
 
 	/* Set busy_configuring flag for this operation */
 	/* Set busy_configuring flag for this operation */
-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
 	if (h->busy_configuring) {
 	if (h->busy_configuring) {
-		spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+		spin_unlock_irqrestore(&h->lock, flags);
 		return -EBUSY;
 		return -EBUSY;
 	}
 	}
 	h->busy_configuring = 1;
 	h->busy_configuring = 1;
-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+	spin_unlock_irqrestore(&h->lock, flags);
 
 
 	ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
 	ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
 	if (ld_buff == NULL)
 	if (ld_buff == NULL)
 		goto mem_msg;
 		goto mem_msg;
 
 
-	return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff,
+	return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff,
 				      sizeof(ReportLunData_struct),
 				      sizeof(ReportLunData_struct),
 				      0, CTLR_LUNID, TYPE_CMD);
 				      0, CTLR_LUNID, TYPE_CMD);
 
 
 	if (return_code == IO_OK)
 	if (return_code == IO_OK)
 		listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
 		listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
 	else {	/* reading number of logical volumes failed */
 	else {	/* reading number of logical volumes failed */
-		printk(KERN_WARNING "cciss: report logical volume"
-		       " command failed\n");
+		dev_warn(&h->pdev->dev,
+			"report logical volume command failed\n");
 		listlength = 0;
 		listlength = 0;
 		goto freeret;
 		goto freeret;
 	}
 	}
@@ -2239,7 +2278,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
 	num_luns = listlength / 8;	/* 8 bytes per entry */
 	num_luns = listlength / 8;	/* 8 bytes per entry */
 	if (num_luns > CISS_MAX_LUN) {
 	if (num_luns > CISS_MAX_LUN) {
 		num_luns = CISS_MAX_LUN;
 		num_luns = CISS_MAX_LUN;
-		printk(KERN_WARNING "cciss: more luns configured"
+		dev_warn(&h->pdev->dev, "more luns configured"
 		       " on controller than can be handled by"
 		       " on controller than can be handled by"
 		       " this driver.\n");
 		       " this driver.\n");
 	}
 	}
@@ -2270,9 +2309,9 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
 		}
 		}
 		if (!drv_found) {
 		if (!drv_found) {
 			/* Deregister it from the OS, it's gone. */
 			/* Deregister it from the OS, it's gone. */
-			spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+			spin_lock_irqsave(&h->lock, flags);
 			h->drv[i]->busy_configuring = 1;
 			h->drv[i]->busy_configuring = 1;
-			spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+			spin_unlock_irqrestore(&h->lock, flags);
 			return_code = deregister_disk(h, i, 1, via_ioctl);
 			return_code = deregister_disk(h, i, 1, via_ioctl);
 			if (h->drv[i] != NULL)
 			if (h->drv[i] != NULL)
 				h->drv[i]->busy_configuring = 0;
 				h->drv[i]->busy_configuring = 0;
@@ -2311,8 +2350,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time,
 			if (drv_index == -1)
 			if (drv_index == -1)
 				goto freeret;
 				goto freeret;
 		}
 		}
-		cciss_update_drive_info(ctlr, drv_index, first_time,
-			via_ioctl);
+		cciss_update_drive_info(h, drv_index, first_time, via_ioctl);
 	}		/* end for */
 	}		/* end for */
 
 
 freeret:
 freeret:
@@ -2324,7 +2362,7 @@ freeret:
 	 */
 	 */
 	return -1;
 	return -1;
 mem_msg:
 mem_msg:
-	printk(KERN_ERR "cciss: out of memory\n");
+	dev_err(&h->pdev->dev, "out of memory\n");
 	h->busy_configuring = 0;
 	h->busy_configuring = 0;
 	goto freeret;
 	goto freeret;
 }
 }
@@ -2444,11 +2482,10 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
 	return 0;
 	return 0;
 }
 }
 
 
-static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
 		size_t size, __u8 page_code, unsigned char *scsi3addr,
 		size_t size, __u8 page_code, unsigned char *scsi3addr,
 		int cmd_type)
 		int cmd_type)
 {
 {
-	ctlr_info_t *h = hba[ctlr];
 	u64bit buff_dma_handle;
 	u64bit buff_dma_handle;
 	int status = IO_OK;
 	int status = IO_OK;
 
 
@@ -2532,8 +2569,7 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
 			c->Request.Timeout = 0;
 			c->Request.Timeout = 0;
 			break;
 			break;
 		default:
 		default:
-			printk(KERN_WARNING
-			       "cciss%d:  Unknown Command 0x%c\n", ctlr, cmd);
+			dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd);
 			return IO_ERROR;
 			return IO_ERROR;
 		}
 		}
 	} else if (cmd_type == TYPE_MSG) {
 	} else if (cmd_type == TYPE_MSG) {
@@ -2565,13 +2601,12 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
 			c->Request.CDB[0] = cmd;
 			c->Request.CDB[0] = cmd;
 			break;
 			break;
 		default:
 		default:
-			printk(KERN_WARNING
-			       "cciss%d: unknown message type %d\n", ctlr, cmd);
+			dev_warn(&h->pdev->dev,
+				"unknown message type %d\n", cmd);
 			return IO_ERROR;
 			return IO_ERROR;
 		}
 		}
 	} else {
 	} else {
-		printk(KERN_WARNING
-		       "cciss%d: unknown command type %d\n", ctlr, cmd_type);
+		dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
 		return IO_ERROR;
 		return IO_ERROR;
 	}
 	}
 	/* Fill in the scatter gather information */
 	/* Fill in the scatter gather information */
@@ -2599,15 +2634,14 @@ static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
 		default:
 		default:
 			if (check_for_unit_attention(h, c))
 			if (check_for_unit_attention(h, c))
 				return IO_NEEDS_RETRY;
 				return IO_NEEDS_RETRY;
-			printk(KERN_WARNING "cciss%d: cmd 0x%02x "
+			dev_warn(&h->pdev->dev, "cmd 0x%02x "
 				"check condition, sense key = 0x%02x\n",
 				"check condition, sense key = 0x%02x\n",
-				h->ctlr, c->Request.CDB[0],
-				c->err_info->SenseInfo[2]);
+				c->Request.CDB[0], c->err_info->SenseInfo[2]);
 		}
 		}
 		break;
 		break;
 	default:
 	default:
-		printk(KERN_WARNING "cciss%d: cmd 0x%02x"
-			"scsi status = 0x%02x\n", h->ctlr,
+		dev_warn(&h->pdev->dev, "cmd 0x%02x"
+			"scsi status = 0x%02x\n",
 			c->Request.CDB[0], c->err_info->ScsiStatus);
 			c->Request.CDB[0], c->err_info->ScsiStatus);
 		break;
 		break;
 	}
 	}
@@ -2630,43 +2664,42 @@ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
 		/* expected for inquiry and report lun commands */
 		/* expected for inquiry and report lun commands */
 		break;
 		break;
 	case CMD_INVALID:
 	case CMD_INVALID:
-		printk(KERN_WARNING "cciss: cmd 0x%02x is "
+		dev_warn(&h->pdev->dev, "cmd 0x%02x is "
 		       "reported invalid\n", c->Request.CDB[0]);
 		       "reported invalid\n", c->Request.CDB[0]);
 		return_status = IO_ERROR;
 		return_status = IO_ERROR;
 		break;
 		break;
 	case CMD_PROTOCOL_ERR:
 	case CMD_PROTOCOL_ERR:
-		printk(KERN_WARNING "cciss: cmd 0x%02x has "
-		       "protocol error \n", c->Request.CDB[0]);
+		dev_warn(&h->pdev->dev, "cmd 0x%02x has "
+		       "protocol error\n", c->Request.CDB[0]);
 		return_status = IO_ERROR;
 		return_status = IO_ERROR;
 		break;
 		break;
 	case CMD_HARDWARE_ERR:
 	case CMD_HARDWARE_ERR:
-		printk(KERN_WARNING "cciss: cmd 0x%02x had "
+		dev_warn(&h->pdev->dev, "cmd 0x%02x had "
 		       " hardware error\n", c->Request.CDB[0]);
 		       " hardware error\n", c->Request.CDB[0]);
 		return_status = IO_ERROR;
 		return_status = IO_ERROR;
 		break;
 		break;
 	case CMD_CONNECTION_LOST:
 	case CMD_CONNECTION_LOST:
-		printk(KERN_WARNING "cciss: cmd 0x%02x had "
+		dev_warn(&h->pdev->dev, "cmd 0x%02x had "
 		       "connection lost\n", c->Request.CDB[0]);
 		       "connection lost\n", c->Request.CDB[0]);
 		return_status = IO_ERROR;
 		return_status = IO_ERROR;
 		break;
 		break;
 	case CMD_ABORTED:
 	case CMD_ABORTED:
-		printk(KERN_WARNING "cciss: cmd 0x%02x was "
+		dev_warn(&h->pdev->dev, "cmd 0x%02x was "
 		       "aborted\n", c->Request.CDB[0]);
 		       "aborted\n", c->Request.CDB[0]);
 		return_status = IO_ERROR;
 		return_status = IO_ERROR;
 		break;
 		break;
 	case CMD_ABORT_FAILED:
 	case CMD_ABORT_FAILED:
-		printk(KERN_WARNING "cciss: cmd 0x%02x reports "
+		dev_warn(&h->pdev->dev, "cmd 0x%02x reports "
 		       "abort failed\n", c->Request.CDB[0]);
 		       "abort failed\n", c->Request.CDB[0]);
 		return_status = IO_ERROR;
 		return_status = IO_ERROR;
 		break;
 		break;
 	case CMD_UNSOLICITED_ABORT:
 	case CMD_UNSOLICITED_ABORT:
-		printk(KERN_WARNING
-		       "cciss%d: unsolicited abort 0x%02x\n", h->ctlr,
+		dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n",
 			c->Request.CDB[0]);
 			c->Request.CDB[0]);
 		return_status = IO_NEEDS_RETRY;
 		return_status = IO_NEEDS_RETRY;
 		break;
 		break;
 	default:
 	default:
-		printk(KERN_WARNING "cciss: cmd 0x%02x returned "
+		dev_warn(&h->pdev->dev, "cmd 0x%02x returned "
 		       "unknown status %x\n", c->Request.CDB[0],
 		       "unknown status %x\n", c->Request.CDB[0],
 		       c->err_info->CommandStatus);
 		       c->err_info->CommandStatus);
 		return_status = IO_ERROR;
 		return_status = IO_ERROR;
@@ -2679,17 +2712,11 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
 {
 {
 	DECLARE_COMPLETION_ONSTACK(wait);
 	DECLARE_COMPLETION_ONSTACK(wait);
 	u64bit buff_dma_handle;
 	u64bit buff_dma_handle;
-	unsigned long flags;
 	int return_status = IO_OK;
 	int return_status = IO_OK;
 
 
 resend_cmd2:
 resend_cmd2:
 	c->waiting = &wait;
 	c->waiting = &wait;
-	/* Put the request on the tail of the queue and send it */
-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
-	addQ(&h->reqQ, c);
-	h->Qdepth++;
-	start_io(h);
-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+	enqueue_cmd_and_start_io(h, c);
 
 
 	wait_for_completion(&wait);
 	wait_for_completion(&wait);
 
 
@@ -2700,7 +2727,7 @@ resend_cmd2:
 
 
 	if (return_status == IO_NEEDS_RETRY &&
 	if (return_status == IO_NEEDS_RETRY &&
 		c->retry_count < MAX_CMD_RETRIES) {
 		c->retry_count < MAX_CMD_RETRIES) {
-		printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr,
+		dev_warn(&h->pdev->dev, "retrying 0x%02x\n",
 			c->Request.CDB[0]);
 			c->Request.CDB[0]);
 		c->retry_count++;
 		c->retry_count++;
 		/* erase the old error information */
 		/* erase the old error information */
@@ -2719,27 +2746,26 @@ command_done:
 	return return_status;
 	return return_status;
 }
 }
 
 
-static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
+static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
 			   __u8 page_code, unsigned char scsi3addr[],
 			   __u8 page_code, unsigned char scsi3addr[],
 			int cmd_type)
 			int cmd_type)
 {
 {
-	ctlr_info_t *h = hba[ctlr];
 	CommandList_struct *c;
 	CommandList_struct *c;
 	int return_status;
 	int return_status;
 
 
-	c = cmd_alloc(h, 0);
+	c = cmd_special_alloc(h);
 	if (!c)
 	if (!c)
 		return -ENOMEM;
 		return -ENOMEM;
-	return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code,
+	return_status = fill_cmd(h, c, cmd, buff, size, page_code,
 		scsi3addr, cmd_type);
 		scsi3addr, cmd_type);
 	if (return_status == IO_OK)
 	if (return_status == IO_OK)
 		return_status = sendcmd_withirq_core(h, c, 1);
 		return_status = sendcmd_withirq_core(h, c, 1);
 
 
-	cmd_free(h, c, 0);
+	cmd_special_free(h, c);
 	return return_status;
 	return return_status;
 }
 }
 
 
-static void cciss_geometry_inquiry(int ctlr, int logvol,
+static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
 				   sector_t total_size,
 				   sector_t total_size,
 				   unsigned int block_size,
 				   unsigned int block_size,
 				   InquiryData_struct *inq_buff,
 				   InquiryData_struct *inq_buff,
@@ -2750,13 +2776,13 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
 	unsigned char scsi3addr[8];
 	unsigned char scsi3addr[8];
 
 
 	memset(inq_buff, 0, sizeof(InquiryData_struct));
 	memset(inq_buff, 0, sizeof(InquiryData_struct));
-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
-	return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff,
+	log_unit_to_scsi3addr(h, scsi3addr, logvol);
+	return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
 			sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD);
 			sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD);
 	if (return_code == IO_OK) {
 	if (return_code == IO_OK) {
 		if (inq_buff->data_byte[8] == 0xFF) {
 		if (inq_buff->data_byte[8] == 0xFF) {
-			printk(KERN_WARNING
-			       "cciss: reading geometry failed, volume "
+			dev_warn(&h->pdev->dev,
+			       "reading geometry failed, volume "
 			       "does not support reading geometry\n");
 			       "does not support reading geometry\n");
 			drv->heads = 255;
 			drv->heads = 255;
 			drv->sectors = 32;	/* Sectors per track */
 			drv->sectors = 32;	/* Sectors per track */
@@ -2780,12 +2806,12 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
 			drv->cylinders = real_size;
 			drv->cylinders = real_size;
 		}
 		}
 	} else {		/* Get geometry failed */
 	} else {		/* Get geometry failed */
-		printk(KERN_WARNING "cciss: reading geometry failed\n");
+		dev_warn(&h->pdev->dev, "reading geometry failed\n");
 	}
 	}
 }
 }
 
 
 static void
 static void
-cciss_read_capacity(int ctlr, int logvol, sector_t *total_size,
+cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size,
 		    unsigned int *block_size)
 		    unsigned int *block_size)
 {
 {
 	ReadCapdata_struct *buf;
 	ReadCapdata_struct *buf;
@@ -2794,25 +2820,25 @@ cciss_read_capacity(int ctlr, int logvol, sector_t *total_size,
 
 
 	buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
 	buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
 	if (!buf) {
 	if (!buf) {
-		printk(KERN_WARNING "cciss: out of memory\n");
+		dev_warn(&h->pdev->dev, "out of memory\n");
 		return;
 		return;
 	}
 	}
 
 
-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
-	return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, buf,
+	log_unit_to_scsi3addr(h, scsi3addr, logvol);
+	return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf,
 		sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD);
 		sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD);
 	if (return_code == IO_OK) {
 	if (return_code == IO_OK) {
 		*total_size = be32_to_cpu(*(__be32 *) buf->total_size);
 		*total_size = be32_to_cpu(*(__be32 *) buf->total_size);
 		*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
 		*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
 	} else {		/* read capacity command failed */
 	} else {		/* read capacity command failed */
-		printk(KERN_WARNING "cciss: read capacity failed\n");
+		dev_warn(&h->pdev->dev, "read capacity failed\n");
 		*total_size = 0;
 		*total_size = 0;
 		*block_size = BLOCK_SIZE;
 		*block_size = BLOCK_SIZE;
 	}
 	}
 	kfree(buf);
 	kfree(buf);
 }
 }
 
 
-static void cciss_read_capacity_16(int ctlr, int logvol,
+static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
 	sector_t *total_size, unsigned int *block_size)
 	sector_t *total_size, unsigned int *block_size)
 {
 {
 	ReadCapdata_struct_16 *buf;
 	ReadCapdata_struct_16 *buf;
@@ -2821,23 +2847,23 @@ static void cciss_read_capacity_16(int ctlr, int logvol,
 
 
 	buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
 	buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
 	if (!buf) {
 	if (!buf) {
-		printk(KERN_WARNING "cciss: out of memory\n");
+		dev_warn(&h->pdev->dev, "out of memory\n");
 		return;
 		return;
 	}
 	}
 
 
-	log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol);
-	return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16,
-		ctlr, buf, sizeof(ReadCapdata_struct_16),
+	log_unit_to_scsi3addr(h, scsi3addr, logvol);
+	return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16,
+		buf, sizeof(ReadCapdata_struct_16),
 			0, scsi3addr, TYPE_CMD);
 			0, scsi3addr, TYPE_CMD);
 	if (return_code == IO_OK) {
 	if (return_code == IO_OK) {
 		*total_size = be64_to_cpu(*(__be64 *) buf->total_size);
 		*total_size = be64_to_cpu(*(__be64 *) buf->total_size);
 		*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
 		*block_size = be32_to_cpu(*(__be32 *) buf->block_size);
 	} else {		/* read capacity command failed */
 	} else {		/* read capacity command failed */
-		printk(KERN_WARNING "cciss: read capacity failed\n");
+		dev_warn(&h->pdev->dev, "read capacity failed\n");
 		*total_size = 0;
 		*total_size = 0;
 		*block_size = BLOCK_SIZE;
 		*block_size = BLOCK_SIZE;
 	}
 	}
-	printk(KERN_INFO "      blocks= %llu block_size= %d\n",
+	dev_info(&h->pdev->dev, "      blocks= %llu block_size= %d\n",
 	       (unsigned long long)*total_size+1, *block_size);
 	       (unsigned long long)*total_size+1, *block_size);
 	kfree(buf);
 	kfree(buf);
 }
 }
@@ -2865,17 +2891,17 @@ static int cciss_revalidate(struct gendisk *disk)
 
 
 	inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
 	inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
 	if (inq_buff == NULL) {
 	if (inq_buff == NULL) {
-		printk(KERN_WARNING "cciss: out of memory\n");
+		dev_warn(&h->pdev->dev, "out of memory\n");
 		return 1;
 		return 1;
 	}
 	}
 	if (h->cciss_read == CCISS_READ_10) {
 	if (h->cciss_read == CCISS_READ_10) {
-		cciss_read_capacity(h->ctlr, logvol,
+		cciss_read_capacity(h, logvol,
 					&total_size, &block_size);
 					&total_size, &block_size);
 	} else {
 	} else {
-		cciss_read_capacity_16(h->ctlr, logvol,
+		cciss_read_capacity_16(h, logvol,
 					&total_size, &block_size);
 					&total_size, &block_size);
 	}
 	}
-	cciss_geometry_inquiry(h->ctlr, logvol, total_size, block_size,
+	cciss_geometry_inquiry(h, logvol, total_size, block_size,
 			       inq_buff, drv);
 			       inq_buff, drv);
 
 
 	blk_queue_logical_block_size(drv->queue, drv->block_size);
 	blk_queue_logical_block_size(drv->queue, drv->block_size);
@@ -2909,7 +2935,7 @@ static void start_io(ctlr_info_t *h)
 		c = hlist_entry(h->reqQ.first, CommandList_struct, list);
 		c = hlist_entry(h->reqQ.first, CommandList_struct, list);
 		/* can't do anything if fifo is full */
 		/* can't do anything if fifo is full */
 		if ((h->access.fifo_full(h))) {
 		if ((h->access.fifo_full(h))) {
-			printk(KERN_WARNING "cciss: fifo full\n");
+			dev_warn(&h->pdev->dev, "fifo full\n");
 			break;
 			break;
 		}
 		}
 
 
@@ -2925,7 +2951,7 @@ static void start_io(ctlr_info_t *h)
 	}
 	}
 }
 }
 
 
-/* Assumes that CCISS_LOCK(h->ctlr) is held. */
+/* Assumes that h->lock is held. */
 /* Zeros out the error record and then resends the command back */
 /* Zeros out the error record and then resends the command back */
 /* to the controller */
 /* to the controller */
 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
@@ -2966,7 +2992,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
 	driver_byte = DRIVER_OK;
 	driver_byte = DRIVER_OK;
 	msg_byte = cmd->err_info->CommandStatus; /* correct?  seems too device specific */
 	msg_byte = cmd->err_info->CommandStatus; /* correct?  seems too device specific */
 
 
-	if (blk_pc_request(cmd->rq))
+	if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC)
 		host_byte = DID_PASSTHROUGH;
 		host_byte = DID_PASSTHROUGH;
 	else
 	else
 		host_byte = DID_OK;
 		host_byte = DID_OK;
@@ -2975,8 +3001,8 @@ static inline int evaluate_target_status(ctlr_info_t *h,
 		host_byte, driver_byte);
 		host_byte, driver_byte);
 
 
 	if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
 	if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
-		if (!blk_pc_request(cmd->rq))
-			printk(KERN_WARNING "cciss: cmd %p "
+		if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)
+			dev_warn(&h->pdev->dev, "cmd %p "
 			       "has SCSI Status 0x%x\n",
 			       "has SCSI Status 0x%x\n",
 			       cmd, cmd->err_info->ScsiStatus);
 			       cmd, cmd->err_info->ScsiStatus);
 		return error_value;
 		return error_value;
@@ -2985,17 +3011,19 @@ static inline int evaluate_target_status(ctlr_info_t *h,
 	/* check the sense key */
 	/* check the sense key */
 	sense_key = 0xf & cmd->err_info->SenseInfo[2];
 	sense_key = 0xf & cmd->err_info->SenseInfo[2];
 	/* no status or recovered error */
 	/* no status or recovered error */
-	if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
+	if (((sense_key == 0x0) || (sense_key == 0x1)) &&
+	    (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC))
 		error_value = 0;
 		error_value = 0;
 
 
 	if (check_for_unit_attention(h, cmd)) {
 	if (check_for_unit_attention(h, cmd)) {
-		*retry_cmd = !blk_pc_request(cmd->rq);
+		*retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC);
 		return 0;
 		return 0;
 	}
 	}
 
 
-	if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
+	/* Not SG_IO or similar? */
+	if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) {
 		if (error_value != 0)
 		if (error_value != 0)
-			printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
+			dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
 			       " sense key = 0x%x\n", cmd, sense_key);
 			       " sense key = 0x%x\n", cmd, sense_key);
 		return error_value;
 		return error_value;
 	}
 	}
@@ -3035,90 +3063,97 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
 		rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
 		rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
 		break;
 		break;
 	case CMD_DATA_UNDERRUN:
 	case CMD_DATA_UNDERRUN:
-		if (blk_fs_request(cmd->rq)) {
-			printk(KERN_WARNING "cciss: cmd %p has"
+		if (cmd->rq->cmd_type == REQ_TYPE_FS) {
+			dev_warn(&h->pdev->dev, "cmd %p has"
 			       " completed with data underrun "
 			       " completed with data underrun "
 			       "reported\n", cmd);
 			       "reported\n", cmd);
 			cmd->rq->resid_len = cmd->err_info->ResidualCnt;
 			cmd->rq->resid_len = cmd->err_info->ResidualCnt;
 		}
 		}
 		break;
 		break;
 	case CMD_DATA_OVERRUN:
 	case CMD_DATA_OVERRUN:
-		if (blk_fs_request(cmd->rq))
-			printk(KERN_WARNING "cciss: cmd %p has"
+		if (cmd->rq->cmd_type == REQ_TYPE_FS)
+			dev_warn(&h->pdev->dev, "cciss: cmd %p has"
 			       " completed with data overrun "
 			       " completed with data overrun "
 			       "reported\n", cmd);
 			       "reported\n", cmd);
 		break;
 		break;
 	case CMD_INVALID:
 	case CMD_INVALID:
-		printk(KERN_WARNING "cciss: cmd %p is "
+		dev_warn(&h->pdev->dev, "cciss: cmd %p is "
 		       "reported invalid\n", cmd);
 		       "reported invalid\n", cmd);
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 			cmd->err_info->CommandStatus, DRIVER_OK,
 			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+				DID_PASSTHROUGH : DID_ERROR);
 		break;
 		break;
 	case CMD_PROTOCOL_ERR:
 	case CMD_PROTOCOL_ERR:
-		printk(KERN_WARNING "cciss: cmd %p has "
-		       "protocol error \n", cmd);
+		dev_warn(&h->pdev->dev, "cciss: cmd %p has "
+		       "protocol error\n", cmd);
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 			cmd->err_info->CommandStatus, DRIVER_OK,
 			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+				DID_PASSTHROUGH : DID_ERROR);
 		break;
 		break;
 	case CMD_HARDWARE_ERR:
 	case CMD_HARDWARE_ERR:
-		printk(KERN_WARNING "cciss: cmd %p had "
+		dev_warn(&h->pdev->dev, "cciss: cmd %p had "
 		       " hardware error\n", cmd);
 		       " hardware error\n", cmd);
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 			cmd->err_info->CommandStatus, DRIVER_OK,
 			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+				DID_PASSTHROUGH : DID_ERROR);
 		break;
 		break;
 	case CMD_CONNECTION_LOST:
 	case CMD_CONNECTION_LOST:
-		printk(KERN_WARNING "cciss: cmd %p had "
+		dev_warn(&h->pdev->dev, "cciss: cmd %p had "
 		       "connection lost\n", cmd);
 		       "connection lost\n", cmd);
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 			cmd->err_info->CommandStatus, DRIVER_OK,
 			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+				DID_PASSTHROUGH : DID_ERROR);
 		break;
 		break;
 	case CMD_ABORTED:
 	case CMD_ABORTED:
-		printk(KERN_WARNING "cciss: cmd %p was "
+		dev_warn(&h->pdev->dev, "cciss: cmd %p was "
 		       "aborted\n", cmd);
 		       "aborted\n", cmd);
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 			cmd->err_info->CommandStatus, DRIVER_OK,
 			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
+			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+				DID_PASSTHROUGH : DID_ABORT);
 		break;
 		break;
 	case CMD_ABORT_FAILED:
 	case CMD_ABORT_FAILED:
-		printk(KERN_WARNING "cciss: cmd %p reports "
+		dev_warn(&h->pdev->dev, "cciss: cmd %p reports "
 		       "abort failed\n", cmd);
 		       "abort failed\n", cmd);
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 			cmd->err_info->CommandStatus, DRIVER_OK,
 			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+				DID_PASSTHROUGH : DID_ERROR);
 		break;
 		break;
 	case CMD_UNSOLICITED_ABORT:
 	case CMD_UNSOLICITED_ABORT:
-		printk(KERN_WARNING "cciss%d: unsolicited "
+		dev_warn(&h->pdev->dev, "cciss%d: unsolicited "
 		       "abort %p\n", h->ctlr, cmd);
 		       "abort %p\n", h->ctlr, cmd);
 		if (cmd->retry_count < MAX_CMD_RETRIES) {
 		if (cmd->retry_count < MAX_CMD_RETRIES) {
 			retry_cmd = 1;
 			retry_cmd = 1;
-			printk(KERN_WARNING
-			       "cciss%d: retrying %p\n", h->ctlr, cmd);
+			dev_warn(&h->pdev->dev, "retrying %p\n", cmd);
 			cmd->retry_count++;
 			cmd->retry_count++;
 		} else
 		} else
-			printk(KERN_WARNING
-			       "cciss%d: %p retried too "
-			       "many times\n", h->ctlr, cmd);
+			dev_warn(&h->pdev->dev,
+				"%p retried too many times\n", cmd);
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 			cmd->err_info->CommandStatus, DRIVER_OK,
 			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT);
+			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+				DID_PASSTHROUGH : DID_ABORT);
 		break;
 		break;
 	case CMD_TIMEOUT:
 	case CMD_TIMEOUT:
-		printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd);
+		dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 			cmd->err_info->CommandStatus, DRIVER_OK,
 			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+				DID_PASSTHROUGH : DID_ERROR);
 		break;
 		break;
 	default:
 	default:
-		printk(KERN_WARNING "cciss: cmd %p returned "
+		dev_warn(&h->pdev->dev, "cmd %p returned "
 		       "unknown status %x\n", cmd,
 		       "unknown status %x\n", cmd,
 		       cmd->err_info->CommandStatus);
 		       cmd->err_info->CommandStatus);
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 			cmd->err_info->CommandStatus, DRIVER_OK,
 			cmd->err_info->CommandStatus, DRIVER_OK,
-			blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR);
+			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
+				DID_PASSTHROUGH : DID_ERROR);
 	}
 	}
 
 
 after_error_processing:
 after_error_processing:
@@ -3132,6 +3167,34 @@ after_error_processing:
 	blk_complete_request(cmd->rq);
 	blk_complete_request(cmd->rq);
 }
 }
 
 
+static inline u32 cciss_tag_contains_index(u32 tag)
+{
+#define DIRECT_LOOKUP_BIT 0x10
+	return tag & DIRECT_LOOKUP_BIT;
+}
+
+static inline u32 cciss_tag_to_index(u32 tag)
+{
+#define DIRECT_LOOKUP_SHIFT 5
+	return tag >> DIRECT_LOOKUP_SHIFT;
+}
+
+static inline u32 cciss_tag_discard_error_bits(u32 tag)
+{
+#define CCISS_ERROR_BITS 0x03
+	return tag & ~CCISS_ERROR_BITS;
+}
+
+static inline void cciss_mark_tag_indexed(u32 *tag)
+{
+	*tag |= DIRECT_LOOKUP_BIT;
+}
+
+static inline void cciss_set_tag_index(u32 *tag, u32 index)
+{
+	*tag |= (index << DIRECT_LOOKUP_SHIFT);
+}
+
 /*
 /*
  * Get a request and submit it to the controller.
  * Get a request and submit it to the controller.
  */
  */
@@ -3163,7 +3226,8 @@ static void do_cciss_request(struct request_queue *q)
 
 
 	BUG_ON(creq->nr_phys_segments > h->maxsgentries);
 	BUG_ON(creq->nr_phys_segments > h->maxsgentries);
 
 
-	if ((c = cmd_alloc(h, 1)) == NULL)
+	c = cmd_alloc(h);
+	if (!c)
 		goto full;
 		goto full;
 
 
 	blk_start_request(creq);
 	blk_start_request(creq);
@@ -3180,8 +3244,8 @@ static void do_cciss_request(struct request_queue *q)
 	/* got command from pool, so use the command block index instead */
 	/* got command from pool, so use the command block index instead */
 	/* for direct lookups. */
 	/* for direct lookups. */
 	/* The first 2 bits are reserved for controller error reporting. */
 	/* The first 2 bits are reserved for controller error reporting. */
-	c->Header.Tag.lower = (c->cmdindex << 3);
-	c->Header.Tag.lower |= 0x04;	/* flag for direct lookup. */
+	cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex);
+	cciss_mark_tag_indexed(&c->Header.Tag.lower);
 	memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
 	memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
 	c->Request.CDBLen = 10;	/* 12 byte commands not in FW yet; */
 	c->Request.CDBLen = 10;	/* 12 byte commands not in FW yet; */
 	c->Request.Type.Type = TYPE_CMD;	/* It is a command. */
 	c->Request.Type.Type = TYPE_CMD;	/* It is a command. */
@@ -3192,11 +3256,8 @@ static void do_cciss_request(struct request_queue *q)
 	c->Request.CDB[0] =
 	c->Request.CDB[0] =
 	    (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
 	    (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
 	start_blk = blk_rq_pos(creq);
 	start_blk = blk_rq_pos(creq);
-#ifdef CCISS_DEBUG
-	printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",
+	dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n",
 	       (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
 	       (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
-#endif				/* CCISS_DEBUG */
-
 	sg_init_table(tmp_sg, h->maxsgentries);
 	sg_init_table(tmp_sg, h->maxsgentries);
 	seg = blk_rq_map_sg(q, creq, tmp_sg);
 	seg = blk_rq_map_sg(q, creq, tmp_sg);
 
 
@@ -3236,17 +3297,18 @@ static void do_cciss_request(struct request_queue *q)
 	if (seg > h->maxSG)
 	if (seg > h->maxSG)
 		h->maxSG = seg;
 		h->maxSG = seg;
 
 
-#ifdef CCISS_DEBUG
-	printk(KERN_DEBUG "cciss: Submitting %ld sectors in %d segments "
+	dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments "
 			"chained[%d]\n",
 			"chained[%d]\n",
 			blk_rq_sectors(creq), seg, chained);
 			blk_rq_sectors(creq), seg, chained);
-#endif				/* CCISS_DEBUG */
 
 
-	c->Header.SGList = c->Header.SGTotal = seg + chained;
-	if (seg > h->max_cmd_sgentries)
+	c->Header.SGTotal = seg + chained;
+	if (seg <= h->max_cmd_sgentries)
+		c->Header.SGList = c->Header.SGTotal;
+	else
 		c->Header.SGList = h->max_cmd_sgentries;
 		c->Header.SGList = h->max_cmd_sgentries;
+	set_performant_mode(h, c);
 
 
-	if (likely(blk_fs_request(creq))) {
+	if (likely(creq->cmd_type == REQ_TYPE_FS)) {
 		if(h->cciss_read == CCISS_READ_10) {
 		if(h->cciss_read == CCISS_READ_10) {
 			c->Request.CDB[1] = 0;
 			c->Request.CDB[1] = 0;
 			c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
 			c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
@@ -3276,11 +3338,12 @@ static void do_cciss_request(struct request_queue *q)
 			c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
 			c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
 			c->Request.CDB[14] = c->Request.CDB[15] = 0;
 			c->Request.CDB[14] = c->Request.CDB[15] = 0;
 		}
 		}
-	} else if (blk_pc_request(creq)) {
+	} else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) {
 		c->Request.CDBLen = creq->cmd_len;
 		c->Request.CDBLen = creq->cmd_len;
 		memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
 		memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
 	} else {
 	} else {
-		printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type);
+		dev_warn(&h->pdev->dev, "bad request type %d\n",
+			creq->cmd_type);
 		BUG();
 		BUG();
 	}
 	}
 
 
@@ -3313,72 +3376,131 @@ static inline int interrupt_pending(ctlr_info_t *h)
 
 
 static inline long interrupt_not_for_us(ctlr_info_t *h)
 static inline long interrupt_not_for_us(ctlr_info_t *h)
 {
 {
-	return (((h->access.intr_pending(h) == 0) ||
-		 (h->interrupts_enabled == 0)));
+	return ((h->access.intr_pending(h) == 0) ||
+		(h->interrupts_enabled == 0));
 }
 }
 
 
-static irqreturn_t do_cciss_intr(int irq, void *dev_id)
+static inline int bad_tag(ctlr_info_t *h, u32 tag_index,
+			u32 raw_tag)
 {
 {
-	ctlr_info_t *h = dev_id;
+	if (unlikely(tag_index >= h->nr_cmds)) {
+		dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
+		return 1;
+	}
+	return 0;
+}
+
+static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c,
+				u32 raw_tag)
+{
+	removeQ(c);
+	if (likely(c->cmd_type == CMD_RWREQ))
+		complete_command(h, c, 0);
+	else if (c->cmd_type == CMD_IOCTL_PEND)
+		complete(c->waiting);
+#ifdef CONFIG_CISS_SCSI_TAPE
+	else if (c->cmd_type == CMD_SCSI)
+		complete_scsi_command(c, 0, raw_tag);
+#endif
+}
+
+static inline u32 next_command(ctlr_info_t *h)
+{
+	u32 a;
+
+	if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
+		return h->access.command_completed(h);
+
+	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+		a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+		(h->reply_pool_head)++;
+		h->commands_outstanding--;
+	} else {
+		a = FIFO_EMPTY;
+	}
+	/* Check for wraparound */
+	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+		h->reply_pool_head = h->reply_pool;
+		h->reply_pool_wraparound ^= 1;
+	}
+	return a;
+}
+
+/* process completion of an indexed ("direct lookup") command */
+static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag)
+{
+	u32 tag_index;
 	CommandList_struct *c;
 	CommandList_struct *c;
+
+	tag_index = cciss_tag_to_index(raw_tag);
+	if (bad_tag(h, tag_index, raw_tag))
+		return next_command(h);
+	c = h->cmd_pool + tag_index;
+	finish_cmd(h, c, raw_tag);
+	return next_command(h);
+}
+
+/* process completion of a non-indexed command */
+static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
+{
+	u32 tag;
+	CommandList_struct *c = NULL;
+	struct hlist_node *tmp;
+	__u32 busaddr_masked, tag_masked;
+
+	tag = cciss_tag_discard_error_bits(raw_tag);
+	hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+		busaddr_masked = cciss_tag_discard_error_bits(c->busaddr);
+		tag_masked = cciss_tag_discard_error_bits(tag);
+		if (busaddr_masked == tag_masked) {
+			finish_cmd(h, c, raw_tag);
+			return next_command(h);
+		}
+	}
+	bad_tag(h, h->nr_cmds + 1, raw_tag);
+	return next_command(h);
+}
+
+static irqreturn_t do_cciss_intx(int irq, void *dev_id)
+{
+	ctlr_info_t *h = dev_id;
 	unsigned long flags;
 	unsigned long flags;
-	__u32 a, a1, a2;
+	u32 raw_tag;
 
 
 	if (interrupt_not_for_us(h))
 	if (interrupt_not_for_us(h))
 		return IRQ_NONE;
 		return IRQ_NONE;
-	/*
-	 * If there are completed commands in the completion queue,
-	 * we had better do something about it.
-	 */
-	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
 	while (interrupt_pending(h)) {
 	while (interrupt_pending(h)) {
-		while ((a = get_next_completion(h)) != FIFO_EMPTY) {
-			a1 = a;
-			if ((a & 0x04)) {
-				a2 = (a >> 3);
-				if (a2 >= h->nr_cmds) {
-					printk(KERN_WARNING
-					       "cciss: controller cciss%d failed, stopping.\n",
-					       h->ctlr);
-					spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
-					fail_all_cmds(h->ctlr);
-					return IRQ_HANDLED;
-				}
-
-				c = h->cmd_pool + a2;
-				a = c->busaddr;
-
-			} else {
-				struct hlist_node *tmp;
-
-				a &= ~3;
-				c = NULL;
-				hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
-					if (c->busaddr == a)
-						break;
-				}
-			}
-			/*
-			 * If we've found the command, take it off the
-			 * completion Q and free it
-			 */
-			if (c && c->busaddr == a) {
-				removeQ(c);
-				if (c->cmd_type == CMD_RWREQ) {
-					complete_command(h, c, 0);
-				} else if (c->cmd_type == CMD_IOCTL_PEND) {
-					complete(c->waiting);
-				}
-#				ifdef CONFIG_CISS_SCSI_TAPE
-				else if (c->cmd_type == CMD_SCSI)
-					complete_scsi_command(c, 0, a1);
-#				endif
-				continue;
-			}
+		raw_tag = get_next_completion(h);
+		while (raw_tag != FIFO_EMPTY) {
+			if (cciss_tag_contains_index(raw_tag))
+				raw_tag = process_indexed_cmd(h, raw_tag);
+			else
+				raw_tag = process_nonindexed_cmd(h, raw_tag);
 		}
 		}
 	}
 	}
+	spin_unlock_irqrestore(&h->lock, flags);
+	return IRQ_HANDLED;
+}
 
 
-	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+/* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never
+ * check the interrupt pending register because it is not set.
+ */
+static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id)
+{
+	ctlr_info_t *h = dev_id;
+	unsigned long flags;
+	u32 raw_tag;
+
+	spin_lock_irqsave(&h->lock, flags);
+	raw_tag = get_next_completion(h);
+	while (raw_tag != FIFO_EMPTY) {
+		if (cciss_tag_contains_index(raw_tag))
+			raw_tag = process_indexed_cmd(h, raw_tag);
+		else
+			raw_tag = process_nonindexed_cmd(h, raw_tag);
+	}
+	spin_unlock_irqrestore(&h->lock, flags);
 	return IRQ_HANDLED;
 	return IRQ_HANDLED;
 }
 }
 
 
@@ -3510,18 +3632,17 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
 
 
 	switch (c->err_info->SenseInfo[12]) {
 	switch (c->err_info->SenseInfo[12]) {
 	case STATE_CHANGED:
 	case STATE_CHANGED:
-		printk(KERN_WARNING "cciss%d: a state change "
-			"detected, command retried\n", h->ctlr);
+		dev_warn(&h->pdev->dev, "a state change "
+			"detected, command retried\n");
 		return 1;
 		return 1;
 	break;
 	break;
 	case LUN_FAILED:
 	case LUN_FAILED:
-		printk(KERN_WARNING "cciss%d: LUN failure "
-			"detected, action required\n", h->ctlr);
+		dev_warn(&h->pdev->dev, "LUN failure "
+			"detected, action required\n");
 		return 1;
 		return 1;
 	break;
 	break;
 	case REPORT_LUNS_CHANGED:
 	case REPORT_LUNS_CHANGED:
-		printk(KERN_WARNING "cciss%d: report LUN data "
-			"changed\n", h->ctlr);
+		dev_warn(&h->pdev->dev, "report LUN data changed\n");
 	/*
 	/*
 	 * Here, we could call add_to_scan_list and wake up the scan thread,
 	 * Here, we could call add_to_scan_list and wake up the scan thread,
 	 * except that it's quite likely that we will get more than one
 	 * except that it's quite likely that we will get more than one
@@ -3541,19 +3662,18 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
 		return 1;
 		return 1;
 	break;
 	break;
 	case POWER_OR_RESET:
 	case POWER_OR_RESET:
-		printk(KERN_WARNING "cciss%d: a power on "
-			"or device reset detected\n", h->ctlr);
+		dev_warn(&h->pdev->dev,
+			"a power on or device reset detected\n");
 		return 1;
 		return 1;
 	break;
 	break;
 	case UNIT_ATTENTION_CLEARED:
 	case UNIT_ATTENTION_CLEARED:
-		printk(KERN_WARNING "cciss%d: unit attention "
-		    "cleared by another initiator\n", h->ctlr);
+		dev_warn(&h->pdev->dev,
+			"unit attention cleared by another initiator\n");
 		return 1;
 		return 1;
 	break;
 	break;
 	default:
 	default:
-		printk(KERN_WARNING "cciss%d: unknown "
-			"unit attention detected\n", h->ctlr);
-				return 1;
+		dev_warn(&h->pdev->dev, "unknown unit attention detected\n");
+		return 1;
 	}
 	}
 }
 }
 
 
@@ -3562,39 +3682,41 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
  *   the io functions.
  *   the io functions.
  *   This is for debug only.
  *   This is for debug only.
  */
  */
-#ifdef CCISS_DEBUG
-static void print_cfg_table(CfgTable_struct *tb)
+static void print_cfg_table(ctlr_info_t *h)
 {
 {
 	int i;
 	int i;
 	char temp_name[17];
 	char temp_name[17];
+	CfgTable_struct *tb = h->cfgtable;
 
 
-	printk("Controller Configuration information\n");
-	printk("------------------------------------\n");
+	dev_dbg(&h->pdev->dev, "Controller Configuration information\n");
+	dev_dbg(&h->pdev->dev, "------------------------------------\n");
 	for (i = 0; i < 4; i++)
 	for (i = 0; i < 4; i++)
 		temp_name[i] = readb(&(tb->Signature[i]));
 		temp_name[i] = readb(&(tb->Signature[i]));
 	temp_name[4] = '\0';
 	temp_name[4] = '\0';
-	printk("   Signature = %s\n", temp_name);
-	printk("   Spec Number = %d\n", readl(&(tb->SpecValence)));
-	printk("   Transport methods supported = 0x%x\n",
+	dev_dbg(&h->pdev->dev, "   Signature = %s\n", temp_name);
+	dev_dbg(&h->pdev->dev, "   Spec Number = %d\n",
+		readl(&(tb->SpecValence)));
+	dev_dbg(&h->pdev->dev, "   Transport methods supported = 0x%x\n",
 	       readl(&(tb->TransportSupport)));
 	       readl(&(tb->TransportSupport)));
-	printk("   Transport methods active = 0x%x\n",
+	dev_dbg(&h->pdev->dev, "   Transport methods active = 0x%x\n",
 	       readl(&(tb->TransportActive)));
 	       readl(&(tb->TransportActive)));
-	printk("   Requested transport Method = 0x%x\n",
+	dev_dbg(&h->pdev->dev, "   Requested transport Method = 0x%x\n",
 	       readl(&(tb->HostWrite.TransportRequest)));
 	       readl(&(tb->HostWrite.TransportRequest)));
-	printk("   Coalesce Interrupt Delay = 0x%x\n",
+	dev_dbg(&h->pdev->dev, "   Coalesce Interrupt Delay = 0x%x\n",
 	       readl(&(tb->HostWrite.CoalIntDelay)));
 	       readl(&(tb->HostWrite.CoalIntDelay)));
-	printk("   Coalesce Interrupt Count = 0x%x\n",
+	dev_dbg(&h->pdev->dev, "   Coalesce Interrupt Count = 0x%x\n",
 	       readl(&(tb->HostWrite.CoalIntCount)));
 	       readl(&(tb->HostWrite.CoalIntCount)));
-	printk("   Max outstanding commands = 0x%d\n",
+	dev_dbg(&h->pdev->dev, "   Max outstanding commands = 0x%d\n",
 	       readl(&(tb->CmdsOutMax)));
 	       readl(&(tb->CmdsOutMax)));
-	printk("   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
+	dev_dbg(&h->pdev->dev, "   Bus Types = 0x%x\n",
+		readl(&(tb->BusTypes)));
 	for (i = 0; i < 16; i++)
 	for (i = 0; i < 16; i++)
 		temp_name[i] = readb(&(tb->ServerName[i]));
 		temp_name[i] = readb(&(tb->ServerName[i]));
 	temp_name[16] = '\0';
 	temp_name[16] = '\0';
-	printk("   Server Name = %s\n", temp_name);
-	printk("   Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat)));
+	dev_dbg(&h->pdev->dev, "   Server Name = %s\n", temp_name);
+	dev_dbg(&h->pdev->dev, "   Heartbeat Counter = 0x%x\n\n\n",
+		readl(&(tb->HeartBeat)));
 }
 }
-#endif				/* CCISS_DEBUG */
 
 
 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
 {
 {
@@ -3618,7 +3740,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
 				offset += 8;
 				offset += 8;
 				break;
 				break;
 			default:	/* reserved in PCI 2.2 */
 			default:	/* reserved in PCI 2.2 */
-				printk(KERN_WARNING
+				dev_warn(&pdev->dev,
 				       "Base address is invalid\n");
 				       "Base address is invalid\n");
 				return -1;
 				return -1;
 				break;
 				break;
@@ -3630,12 +3752,182 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
 	return -1;
 	return -1;
 }
 }
 
 
+/* Fill in bucket_map[], given nsgs (the max number of
+ * scatter gather elements supported) and bucket[],
+ * which is an array of 8 integers.  The bucket[] array
+ * contains 8 different DMA transfer sizes (in 16
+ * byte increments) which the controller uses to fetch
+ * commands.  This function fills in bucket_map[], which
+ * maps a given number of scatter gather elements to one of
+ * the 8 DMA transfer sizes.  The point of it is to allow the
+ * controller to only do as much DMA as needed to fetch the
+ * command, with the DMA transfer size encoded in the lower
+ * bits of the command address.
+ */
+static void  calc_bucket_map(int bucket[], int num_buckets,
+	int nsgs, int *bucket_map)
+{
+	int i, j, b, size;
+
+	/* even a command with 0 SGs requires 4 blocks */
+#define MINIMUM_TRANSFER_BLOCKS 4
+#define NUM_BUCKETS 8
+	/* Note, bucket_map must have nsgs+1 entries. */
+	for (i = 0; i <= nsgs; i++) {
+		/* Compute size of a command with i SG entries */
+		size = i + MINIMUM_TRANSFER_BLOCKS;
+		b = num_buckets; /* Assume the biggest bucket */
+		/* Find the bucket that is just big enough */
+		for (j = 0; j < 8; j++) {
+			if (bucket[j] >= size) {
+				b = j;
+				break;
+			}
+		}
+		/* for a command with i SG entries, use bucket b. */
+		bucket_map[i] = b;
+	}
+}
+
+static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
+{
+	int i;
+
+	/* under certain very rare conditions, this can take awhile.
+	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+	 * as we enter this code.) */
+	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
+		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+			break;
+		msleep(10);
+	}
+}
+
+static __devinit void cciss_enter_performant_mode(ctlr_info_t *h)
+{
+	/* This is a bit complicated.  There are 8 registers on
+	 * the controller which we write to to tell it 8 different
+	 * sizes of commands which there may be.  It's a way of
+	 * reducing the DMA done to fetch each command.  Encoded into
+	 * each command's tag are 3 bits which communicate to the controller
+	 * which of the eight sizes that command fits within.  The size of
+	 * each command depends on how many scatter gather entries there are.
+	 * Each SG entry requires 16 bytes.  The eight registers are programmed
+	 * with the number of 16-byte blocks a command of that size requires.
+	 * The smallest command possible requires 5 such 16 byte blocks.
+	 * the largest command possible requires MAXSGENTRIES + 4 16-byte
+	 * blocks.  Note, this only extends to the SG entries contained
+	 * within the command block, and does not extend to chained blocks
+	 * of SG elements.   bft[] contains the eight values we write to
+	 * the registers.  They are not evenly distributed, but have more
+	 * sizes for small commands, and fewer sizes for larger commands.
+	 */
+	__u32 trans_offset;
+	int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
+			/*
+			 *  5 = 1 s/g entry or 4k
+			 *  6 = 2 s/g entry or 8k
+			 *  8 = 4 s/g entry or 16k
+			 * 10 = 6 s/g entry or 24k
+			 */
+	unsigned long register_value;
+	BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
+
+	h->reply_pool_wraparound = 1; /* spec: init to 1 */
+
+	/* Controller spec: zero out this buffer. */
+	memset(h->reply_pool, 0, h->max_commands * sizeof(__u64));
+	h->reply_pool_head = h->reply_pool;
+
+	trans_offset = readl(&(h->cfgtable->TransMethodOffset));
+	calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries,
+				h->blockFetchTable);
+	writel(bft[0], &h->transtable->BlockFetch0);
+	writel(bft[1], &h->transtable->BlockFetch1);
+	writel(bft[2], &h->transtable->BlockFetch2);
+	writel(bft[3], &h->transtable->BlockFetch3);
+	writel(bft[4], &h->transtable->BlockFetch4);
+	writel(bft[5], &h->transtable->BlockFetch5);
+	writel(bft[6], &h->transtable->BlockFetch6);
+	writel(bft[7], &h->transtable->BlockFetch7);
+
+	/* size of controller ring buffer */
+	writel(h->max_commands, &h->transtable->RepQSize);
+	writel(1, &h->transtable->RepQCount);
+	writel(0, &h->transtable->RepQCtrAddrLow32);
+	writel(0, &h->transtable->RepQCtrAddrHigh32);
+	writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
+	writel(0, &h->transtable->RepQAddr0High32);
+	writel(CFGTBL_Trans_Performant,
+			&(h->cfgtable->HostWrite.TransportRequest));
+
+	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
+	cciss_wait_for_mode_change_ack(h);
+	register_value = readl(&(h->cfgtable->TransportActive));
+	if (!(register_value & CFGTBL_Trans_Performant))
+		dev_warn(&h->pdev->dev, "cciss: unable to get board into"
+					" performant mode\n");
+}
+
+static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
+{
+	__u32 trans_support;
+
+	dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n");
+	/* Attempt to put controller into performant mode if supported */
+	/* Does board support performant mode? */
+	trans_support = readl(&(h->cfgtable->TransportSupport));
+	if (!(trans_support & PERFORMANT_MODE))
+		return;
+
+	dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n");
+	/* Performant mode demands commands on a 32 byte boundary
+	 * pci_alloc_consistent aligns on page boundarys already.
+	 * Just need to check if divisible by 32
+	 */
+	if ((sizeof(CommandList_struct) % 32) != 0) {
+		dev_warn(&h->pdev->dev, "%s %d %s\n",
+			"cciss info: command size[",
+			(int)sizeof(CommandList_struct),
+			"] not divisible by 32, no performant mode..\n");
+		return;
+	}
+
+	/* Performant mode ring buffer and supporting data structures */
+	h->reply_pool = (__u64 *)pci_alloc_consistent(
+		h->pdev, h->max_commands * sizeof(__u64),
+		&(h->reply_pool_dhandle));
+
+	/* Need a block fetch table for performant mode */
+	h->blockFetchTable = kmalloc(((h->maxsgentries+1) *
+		sizeof(__u32)), GFP_KERNEL);
+
+	if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL))
+		goto clean_up;
+
+	cciss_enter_performant_mode(h);
+
+	/* Change the access methods to the performant access methods */
+	h->access = SA5_performant_access;
+	h->transMethod = CFGTBL_Trans_Performant;
+
+	return;
+clean_up:
+	kfree(h->blockFetchTable);
+	if (h->reply_pool)
+		pci_free_consistent(h->pdev,
+				h->max_commands * sizeof(__u64),
+				h->reply_pool,
+				h->reply_pool_dhandle);
+	return;
+
+} /* cciss_put_controller_into_performant_mode */
+
 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
  * controllers that are capable. If not, we use IO-APIC mode.
  * controllers that are capable. If not, we use IO-APIC mode.
  */
  */
 
 
-static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
-					   struct pci_dev *pdev, __u32 board_id)
+static void __devinit cciss_interrupt_mode(ctlr_info_t *h)
 {
 {
 #ifdef CONFIG_PCI_MSI
 #ifdef CONFIG_PCI_MSI
 	int err;
 	int err;
@@ -3644,268 +3936,283 @@ static void __devinit cciss_interrupt_mode(ctlr_info_t *c,
 	};
 	};
 
 
 	/* Some boards advertise MSI but don't really support it */
 	/* Some boards advertise MSI but don't really support it */
-	if ((board_id == 0x40700E11) ||
-	    (board_id == 0x40800E11) ||
-	    (board_id == 0x40820E11) || (board_id == 0x40830E11))
+	if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
+	    (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
 		goto default_int_mode;
 		goto default_int_mode;
 
 
-	if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) {
-		err = pci_enable_msix(pdev, cciss_msix_entries, 4);
+	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
+		err = pci_enable_msix(h->pdev, cciss_msix_entries, 4);
 		if (!err) {
 		if (!err) {
-			c->intr[0] = cciss_msix_entries[0].vector;
-			c->intr[1] = cciss_msix_entries[1].vector;
-			c->intr[2] = cciss_msix_entries[2].vector;
-			c->intr[3] = cciss_msix_entries[3].vector;
-			c->msix_vector = 1;
+			h->intr[0] = cciss_msix_entries[0].vector;
+			h->intr[1] = cciss_msix_entries[1].vector;
+			h->intr[2] = cciss_msix_entries[2].vector;
+			h->intr[3] = cciss_msix_entries[3].vector;
+			h->msix_vector = 1;
 			return;
 			return;
 		}
 		}
 		if (err > 0) {
 		if (err > 0) {
-			printk(KERN_WARNING "cciss: only %d MSI-X vectors "
-			       "available\n", err);
+			dev_warn(&h->pdev->dev,
+				"only %d MSI-X vectors available\n", err);
 			goto default_int_mode;
 			goto default_int_mode;
 		} else {
 		} else {
-			printk(KERN_WARNING "cciss: MSI-X init failed %d\n",
-			       err);
+			dev_warn(&h->pdev->dev,
+				"MSI-X init failed %d\n", err);
 			goto default_int_mode;
 			goto default_int_mode;
 		}
 		}
 	}
 	}
-	if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) {
-		if (!pci_enable_msi(pdev)) {
-			c->msi_vector = 1;
-		} else {
-			printk(KERN_WARNING "cciss: MSI init failed\n");
-		}
+	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
+		if (!pci_enable_msi(h->pdev))
+			h->msi_vector = 1;
+		else
+			dev_warn(&h->pdev->dev, "MSI init failed\n");
 	}
 	}
 default_int_mode:
 default_int_mode:
 #endif				/* CONFIG_PCI_MSI */
 #endif				/* CONFIG_PCI_MSI */
 	/* if we get here we're going to use the default interrupt mode */
 	/* if we get here we're going to use the default interrupt mode */
-	c->intr[SIMPLE_MODE_INT] = pdev->irq;
+	h->intr[PERF_MODE_INT] = h->pdev->irq;
 	return;
 	return;
 }
 }
 
 
-static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
+static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
 {
 {
-	ushort subsystem_vendor_id, subsystem_device_id, command;
-	__u32 board_id, scratchpad = 0;
-	__u64 cfg_offset;
-	__u32 cfg_base_addr;
-	__u64 cfg_base_addr_index;
-	int i, prod_index, err;
+	int i;
+	u32 subsystem_vendor_id, subsystem_device_id;
 
 
 	subsystem_vendor_id = pdev->subsystem_vendor;
 	subsystem_vendor_id = pdev->subsystem_vendor;
 	subsystem_device_id = pdev->subsystem_device;
 	subsystem_device_id = pdev->subsystem_device;
-	board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
-		    subsystem_vendor_id);
+	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
+			subsystem_vendor_id;
 
 
 	for (i = 0; i < ARRAY_SIZE(products); i++) {
 	for (i = 0; i < ARRAY_SIZE(products); i++) {
 		/* Stand aside for hpsa driver on request */
 		/* Stand aside for hpsa driver on request */
 		if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
 		if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
 			return -ENODEV;
 			return -ENODEV;
-		if (board_id == products[i].board_id)
-			break;
-	}
-	prod_index = i;
-	if (prod_index == ARRAY_SIZE(products)) {
-		dev_warn(&pdev->dev,
-			"unrecognized board ID: 0x%08lx, ignoring.\n",
-			(unsigned long) board_id);
-		return -ENODEV;
+		if (*board_id == products[i].board_id)
+			return i;
 	}
 	}
+	dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
+		*board_id);
+	return -ENODEV;
+}
 
 
-	/* check to see if controller has been disabled */
-	/* BEFORE trying to enable it */
-	(void)pci_read_config_word(pdev, PCI_COMMAND, &command);
-	if (!(command & 0x02)) {
-		printk(KERN_WARNING
-		       "cciss: controller appears to be disabled\n");
-		return -ENODEV;
-	}
+static inline bool cciss_board_disabled(ctlr_info_t *h)
+{
+	u16 command;
 
 
-	err = pci_enable_device(pdev);
-	if (err) {
-		printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
-		return err;
-	}
+	(void) pci_read_config_word(h->pdev, PCI_COMMAND, &command);
+	return ((command & PCI_COMMAND_MEMORY) == 0);
+}
 
 
-	err = pci_request_regions(pdev, "cciss");
-	if (err) {
-		printk(KERN_ERR "cciss: Cannot obtain PCI resources, "
-		       "aborting\n");
-		return err;
-	}
+static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
+	unsigned long *memory_bar)
+{
+	int i;
 
 
-#ifdef CCISS_DEBUG
-	printk("command = %x\n", command);
-	printk("irq = %x\n", pdev->irq);
-	printk("board_id = %x\n", board_id);
-#endif				/* CCISS_DEBUG */
+	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+			/* addressing mode bits already removed */
+			*memory_bar = pci_resource_start(pdev, i);
+			dev_dbg(&pdev->dev, "memory BAR = %lx\n",
+				*memory_bar);
+			return 0;
+		}
+	dev_warn(&pdev->dev, "no memory BAR found\n");
+	return -ENODEV;
+}
 
 
-/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
- * else we use the IO-APIC interrupt assigned to us by system ROM.
- */
-	cciss_interrupt_mode(c, pdev, board_id);
+static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h)
+{
+	int i;
+	u32 scratchpad;
 
 
-	/* find the memory BAR */
-	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
-		if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
-			break;
-	}
-	if (i == DEVICE_COUNT_RESOURCE) {
-		printk(KERN_WARNING "cciss: No memory BAR found\n");
-		err = -ENODEV;
-		goto err_out_free_res;
+	for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) {
+		scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
+		if (scratchpad == CCISS_FIRMWARE_READY)
+			return 0;
+		msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
 	}
 	}
+	dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+	return -ENODEV;
+}
 
 
-	c->paddr = pci_resource_start(pdev, i); /* addressing mode bits
-						 * already removed
-						 */
+static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
+	void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+	u64 *cfg_offset)
+{
+	*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
+	*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
+	*cfg_base_addr &= (u32) 0x0000ffff;
+	*cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
+	if (*cfg_base_addr_index == -1) {
+		dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, "
+			"*cfg_base_addr = 0x%08x\n", *cfg_base_addr);
+		return -ENODEV;
+	}
+	return 0;
+}
 
 
-#ifdef CCISS_DEBUG
-	printk("address 0 = %lx\n", c->paddr);
-#endif				/* CCISS_DEBUG */
-	c->vaddr = remap_pci_mem(c->paddr, 0x250);
+static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
+{
+	u64 cfg_offset;
+	u32 cfg_base_addr;
+	u64 cfg_base_addr_index;
+	u32 trans_offset;
+	int rc;
 
 
-	/* Wait for the board to become ready.  (PCI hotplug needs this.)
-	 * We poll for up to 120 secs, once per 100ms. */
-	for (i = 0; i < 1200; i++) {
-		scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
-		if (scratchpad == CCISS_FIRMWARE_READY)
-			break;
-		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(msecs_to_jiffies(100));	/* wait 100ms */
-	}
-	if (scratchpad != CCISS_FIRMWARE_READY) {
-		printk(KERN_WARNING "cciss: Board not ready.  Timed out.\n");
-		err = -ENODEV;
-		goto err_out_free_res;
-	}
+	rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
+		&cfg_base_addr_index, &cfg_offset);
+	if (rc)
+		return rc;
+	h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
+		cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable));
+	if (!h->cfgtable)
+		return -ENOMEM;
+	/* Find performant mode table. */
+	trans_offset = readl(&h->cfgtable->TransMethodOffset);
+	h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
+				cfg_base_addr_index)+cfg_offset+trans_offset,
+				sizeof(*h->transtable));
+	if (!h->transtable)
+		return -ENOMEM;
+	return 0;
+}
 
 
-	/* get the address index number */
-	cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
-	cfg_base_addr &= (__u32) 0x0000ffff;
-#ifdef CCISS_DEBUG
-	printk("cfg base address = %x\n", cfg_base_addr);
-#endif				/* CCISS_DEBUG */
-	cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
-#ifdef CCISS_DEBUG
-	printk("cfg base address index = %llx\n",
-		(unsigned long long)cfg_base_addr_index);
-#endif				/* CCISS_DEBUG */
-	if (cfg_base_addr_index == -1) {
-		printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
-		err = -ENODEV;
-		goto err_out_free_res;
+static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
+{
+	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+	if (h->max_commands < 16) {
+		dev_warn(&h->pdev->dev, "Controller reports "
+			"max supported commands of %d, an obvious lie. "
+			"Using 16.  Ensure that firmware is up to date.\n",
+			h->max_commands);
+		h->max_commands = 16;
 	}
 	}
+}
 
 
-	cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
-#ifdef CCISS_DEBUG
-	printk("cfg offset = %llx\n", (unsigned long long)cfg_offset);
-#endif				/* CCISS_DEBUG */
-	c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
-						       cfg_base_addr_index) +
-				    cfg_offset, sizeof(CfgTable_struct));
-	c->board_id = board_id;
-
-#ifdef CCISS_DEBUG
-	print_cfg_table(c->cfgtable);
-#endif				/* CCISS_DEBUG */
-
-	/* Some controllers support Zero Memory Raid (ZMR).
-	 * When configured in ZMR mode the number of supported
-	 * commands drops to 64. So instead of just setting an
-	 * arbitrary value we make the driver a little smarter.
-	 * We read the config table to tell us how many commands
-	 * are supported on the controller then subtract 4 to
-	 * leave a little room for ioctl calls.
-	 */
-	c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
-	c->maxsgentries = readl(&(c->cfgtable->MaxSGElements));
-
+/* Interrogate the hardware for some limits:
+ * max commands, max SG elements without chaining, and with chaining,
+ * SG chain block size, etc.
+ */
+static void __devinit cciss_find_board_params(ctlr_info_t *h)
+{
+	cciss_get_max_perf_mode_cmds(h);
+	h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
+	h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
 	/*
 	/*
-	 * Limit native command to 32 s/g elements to save dma'able memory.
+	 * Limit in-command s/g elements to 32 save dma'able memory.
 	 * Howvever spec says if 0, use 31
 	 * Howvever spec says if 0, use 31
 	 */
 	 */
-
-	c->max_cmd_sgentries = 31;
-	if (c->maxsgentries > 512) {
-		c->max_cmd_sgentries = 32;
-		c->chainsize = c->maxsgentries - c->max_cmd_sgentries + 1;
-		c->maxsgentries -= 1;   /* account for chain pointer */
+	h->max_cmd_sgentries = 31;
+	if (h->maxsgentries > 512) {
+		h->max_cmd_sgentries = 32;
+		h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1;
+		h->maxsgentries--; /* save one for chain pointer */
 	} else {
 	} else {
-		c->maxsgentries = 31;   /* Default to traditional value */
-		c->chainsize = 0;       /* traditional */
+		h->maxsgentries = 31; /* default to traditional values */
+		h->chainsize = 0;
 	}
 	}
+}
 
 
-	c->product_name = products[prod_index].product_name;
-	c->access = *(products[prod_index].access);
-	c->nr_cmds = c->max_commands - 4;
-	if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
-	    (readb(&c->cfgtable->Signature[1]) != 'I') ||
-	    (readb(&c->cfgtable->Signature[2]) != 'S') ||
-	    (readb(&c->cfgtable->Signature[3]) != 'S')) {
-		printk("Does not appear to be a valid CISS config table\n");
-		err = -ENODEV;
-		goto err_out_free_res;
+static inline bool CISS_signature_present(ctlr_info_t *h)
+{
+	if ((readb(&h->cfgtable->Signature[0]) != 'C') ||
+	    (readb(&h->cfgtable->Signature[1]) != 'I') ||
+	    (readb(&h->cfgtable->Signature[2]) != 'S') ||
+	    (readb(&h->cfgtable->Signature[3]) != 'S')) {
+		dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
+		return false;
 	}
 	}
+	return true;
+}
+
+/* Need to enable prefetch in the SCSI core for 6400 in x86 */
+static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h)
+{
 #ifdef CONFIG_X86
 #ifdef CONFIG_X86
-	{
-		/* Need to enable prefetch in the SCSI core for 6400 in x86 */
-		__u32 prefetch;
-		prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
-		prefetch |= 0x100;
-		writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
-	}
+	u32 prefetch;
+
+	prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
+	prefetch |= 0x100;
+	writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
 #endif
 #endif
+}
 
 
-	/* Disabling DMA prefetch and refetch for the P600.
-	 * An ASIC bug may result in accesses to invalid memory addresses.
-	 * We've disabled prefetch for some time now. Testing with XEN
-	 * kernels revealed a bug in the refetch if dom0 resides on a P600.
-	 */
-	if(board_id == 0x3225103C) {
-		__u32 dma_prefetch;
-		__u32 dma_refetch;
-		dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG);
-		dma_prefetch |= 0x8000;
-		writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG);
-		pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch);
-		dma_refetch |= 0x1;
-		pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch);
+/* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
+ * in a prefetch beyond physical memory.
+ */
+static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h)
+{
+	u32 dma_prefetch;
+	__u32 dma_refetch;
+
+	if (h->board_id != 0x3225103C)
+		return;
+	dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
+	dma_prefetch |= 0x8000;
+	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
+	pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch);
+	dma_refetch |= 0x1;
+	pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch);
+}
+
+static int __devinit cciss_pci_init(ctlr_info_t *h)
+{
+	int prod_index, err;
+
+	prod_index = cciss_lookup_board_id(h->pdev, &h->board_id);
+	if (prod_index < 0)
+		return -ENODEV;
+	h->product_name = products[prod_index].product_name;
+	h->access = *(products[prod_index].access);
+
+	if (cciss_board_disabled(h)) {
+		dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
+		return -ENODEV;
+	}
+	err = pci_enable_device(h->pdev);
+	if (err) {
+		dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
+		return err;
 	}
 	}
 
 
-#ifdef CCISS_DEBUG
-	printk("Trying to put board into Simple mode\n");
-#endif				/* CCISS_DEBUG */
-	c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
-	/* Update the field, and then ring the doorbell */
-	writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest));
-	writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
+	err = pci_request_regions(h->pdev, "cciss");
+	if (err) {
+		dev_warn(&h->pdev->dev,
+			"Cannot obtain PCI resources, aborting\n");
+		return err;
+	}
 
 
-	/* under certain very rare conditions, this can take awhile.
-	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
-	 * as we enter this code.) */
-	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
-		if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
-			break;
-		/* delay and try again */
-		set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(msecs_to_jiffies(1));
+	dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq);
+	dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id);
+
+/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
+ * else we use the IO-APIC interrupt assigned to us by system ROM.
+ */
+	cciss_interrupt_mode(h);
+	err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr);
+	if (err)
+		goto err_out_free_res;
+	h->vaddr = remap_pci_mem(h->paddr, 0x250);
+	if (!h->vaddr) {
+		err = -ENOMEM;
+		goto err_out_free_res;
 	}
 	}
+	err = cciss_wait_for_board_ready(h);
+	if (err)
+		goto err_out_free_res;
+	err = cciss_find_cfgtables(h);
+	if (err)
+		goto err_out_free_res;
+	print_cfg_table(h);
+	cciss_find_board_params(h);
 
 
-#ifdef CCISS_DEBUG
-	printk(KERN_DEBUG "I counter got to %d %x\n", i,
-	       readl(c->vaddr + SA5_DOORBELL));
-#endif				/* CCISS_DEBUG */
-#ifdef CCISS_DEBUG
-	print_cfg_table(c->cfgtable);
-#endif				/* CCISS_DEBUG */
-
-	if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
-		printk(KERN_WARNING "cciss: unable to get board into"
-		       " simple mode\n");
+	if (!CISS_signature_present(h)) {
 		err = -ENODEV;
 		err = -ENODEV;
 		goto err_out_free_res;
 		goto err_out_free_res;
 	}
 	}
+	cciss_enable_scsi_prefetch(h);
+	cciss_p600_dma_prefetch_quirk(h);
+	cciss_put_controller_into_performant_mode(h);
 	return 0;
 	return 0;
 
 
 err_out_free_res:
 err_out_free_res:
@@ -3913,42 +4220,47 @@ err_out_free_res:
 	 * Deliberately omit pci_disable_device(): it does something nasty to
 	 * Deliberately omit pci_disable_device(): it does something nasty to
 	 * Smart Array controllers that pci_enable_device does not undo
 	 * Smart Array controllers that pci_enable_device does not undo
 	 */
 	 */
-	pci_release_regions(pdev);
+	if (h->transtable)
+		iounmap(h->transtable);
+	if (h->cfgtable)
+		iounmap(h->cfgtable);
+	if (h->vaddr)
+		iounmap(h->vaddr);
+	pci_release_regions(h->pdev);
 	return err;
 	return err;
 }
 }
 
 
 /* Function to find the first free pointer into our hba[] array
 /* Function to find the first free pointer into our hba[] array
  * Returns -1 if no free entries are left.
  * Returns -1 if no free entries are left.
  */
  */
-static int alloc_cciss_hba(void)
+static int alloc_cciss_hba(struct pci_dev *pdev)
 {
 {
 	int i;
 	int i;
 
 
 	for (i = 0; i < MAX_CTLR; i++) {
 	for (i = 0; i < MAX_CTLR; i++) {
 		if (!hba[i]) {
 		if (!hba[i]) {
-			ctlr_info_t *p;
+			ctlr_info_t *h;
 
 
-			p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
-			if (!p)
+			h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
+			if (!h)
 				goto Enomem;
 				goto Enomem;
-			hba[i] = p;
+			hba[i] = h;
 			return i;
 			return i;
 		}
 		}
 	}
 	}
-	printk(KERN_WARNING "cciss: This driver supports a maximum"
+	dev_warn(&pdev->dev, "This driver supports a maximum"
 	       " of %d controllers.\n", MAX_CTLR);
 	       " of %d controllers.\n", MAX_CTLR);
 	return -1;
 	return -1;
 Enomem:
 Enomem:
-	printk(KERN_ERR "cciss: out of memory.\n");
+	dev_warn(&pdev->dev, "out of memory.\n");
 	return -1;
 	return -1;
 }
 }
 
 
-static void free_hba(int n)
+static void free_hba(ctlr_info_t *h)
 {
 {
-	ctlr_info_t *h = hba[n];
 	int i;
 	int i;
 
 
-	hba[n] = NULL;
+	hba[h->ctlr] = NULL;
 	for (i = 0; i < h->highest_lun + 1; i++)
 	for (i = 0; i < h->highest_lun + 1; i++)
 		if (h->gendisk[i] != NULL)
 		if (h->gendisk[i] != NULL)
 			put_disk(h->gendisk[i]);
 			put_disk(h->gendisk[i]);
@@ -4028,7 +4340,8 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
 	/* we leak the DMA buffer here ... no choice since the controller could
 	/* we leak the DMA buffer here ... no choice since the controller could
 	   still complete the command. */
 	   still complete the command. */
 	if (i == 10) {
 	if (i == 10) {
-		printk(KERN_ERR "cciss: controller message %02x:%02x timed out\n",
+		dev_err(&pdev->dev,
+			"controller message %02x:%02x timed out\n",
 			opcode, type);
 			opcode, type);
 		return -ETIMEDOUT;
 		return -ETIMEDOUT;
 	}
 	}
@@ -4036,12 +4349,12 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
 	pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
 	pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
 
 
 	if (tag & 2) {
 	if (tag & 2) {
-		printk(KERN_ERR "cciss: controller message %02x:%02x failed\n",
+		dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
 			opcode, type);
 			opcode, type);
 		return -EIO;
 		return -EIO;
 	}
 	}
 
 
-	printk(KERN_INFO "cciss: controller message %02x:%02x succeeded\n",
+	dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
 		opcode, type);
 		opcode, type);
 	return 0;
 	return 0;
 }
 }
@@ -4062,7 +4375,7 @@ static __devinit int cciss_reset_msi(struct pci_dev *pdev)
 	if (pos) {
 	if (pos) {
 		pci_read_config_word(pdev, msi_control_reg(pos), &control);
 		pci_read_config_word(pdev, msi_control_reg(pos), &control);
 		if (control & PCI_MSI_FLAGS_ENABLE) {
 		if (control & PCI_MSI_FLAGS_ENABLE) {
-			printk(KERN_INFO "cciss: resetting MSI\n");
+			dev_info(&pdev->dev, "resetting MSI\n");
 			pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
 			pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
 		}
 		}
 	}
 	}
@@ -4071,7 +4384,7 @@ static __devinit int cciss_reset_msi(struct pci_dev *pdev)
 	if (pos) {
 	if (pos) {
 		pci_read_config_word(pdev, msi_control_reg(pos), &control);
 		pci_read_config_word(pdev, msi_control_reg(pos), &control);
 		if (control & PCI_MSIX_FLAGS_ENABLE) {
 		if (control & PCI_MSIX_FLAGS_ENABLE) {
-			printk(KERN_INFO "cciss: resetting MSI-X\n");
+			dev_info(&pdev->dev, "resetting MSI-X\n");
 			pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
 			pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
 		}
 		}
 	}
 	}
@@ -4079,68 +4392,144 @@ static __devinit int cciss_reset_msi(struct pci_dev *pdev)
 	return 0;
 	return 0;
 }
 }
 
 
-/* This does a hard reset of the controller using PCI power management
- * states. */
-static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev)
+static int cciss_controller_hard_reset(struct pci_dev *pdev,
+	void * __iomem vaddr, bool use_doorbell)
 {
 {
-	u16 pmcsr, saved_config_space[32];
-	int i, pos;
+	u16 pmcsr;
+	int pos;
 
 
-	printk(KERN_INFO "cciss: using PCI PM to reset controller\n");
+	if (use_doorbell) {
+		/* For everything after the P600, the PCI power state method
+		 * of resetting the controller doesn't work, so we have this
+		 * other way using the doorbell register.
+		 */
+		dev_info(&pdev->dev, "using doorbell to reset controller\n");
+		writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL);
+		msleep(1000);
+	} else { /* Try to do it the PCI power state way */
+
+		/* Quoting from the Open CISS Specification: "The Power
+		 * Management Control/Status Register (CSR) controls the power
+		 * state of the device.  The normal operating state is D0,
+		 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
+		 * the controller, place the interface device in D3 then to D0,
+		 * this causes a secondary PCI reset which will reset the
+		 * controller." */
+
+		pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
+		if (pos == 0) {
+			dev_err(&pdev->dev,
+				"cciss_controller_hard_reset: "
+				"PCI PM not supported\n");
+			return -ENODEV;
+		}
+		dev_info(&pdev->dev, "using PCI PM to reset controller\n");
+		/* enter the D3hot power management state */
+		pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
+		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+		pmcsr |= PCI_D3hot;
+		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
 
 
-	/* This is very nearly the same thing as
+		msleep(500);
 
 
-	   pci_save_state(pci_dev);
-	   pci_set_power_state(pci_dev, PCI_D3hot);
-	   pci_set_power_state(pci_dev, PCI_D0);
-	   pci_restore_state(pci_dev);
+		/* enter the D0 power management state */
+		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+		pmcsr |= PCI_D0;
+		pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
 
 
-	   but we can't use these nice canned kernel routines on
-	   kexec, because they also check the MSI/MSI-X state in PCI
-	   configuration space and do the wrong thing when it is
-	   set/cleared.  Also, the pci_save/restore_state functions
-	   violate the ordering requirements for restoring the
-	   configuration space from the CCISS document (see the
-	   comment below).  So we roll our own .... */
+		msleep(500);
+	}
+	return 0;
+}
 
 
-	for (i = 0; i < 32; i++)
-		pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+/* This does a hard reset of the controller using PCI power management
+ * states or using the doorbell register. */
+static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
+{
+	u16 saved_config_space[32];
+	u64 cfg_offset;
+	u32 cfg_base_addr;
+	u64 cfg_base_addr_index;
+	void __iomem *vaddr;
+	unsigned long paddr;
+	u32 misc_fw_support, active_transport;
+	int rc, i;
+	CfgTable_struct __iomem *cfgtable;
+	bool use_doorbell;
+	u32 board_id;
+
+	/* For controllers as old a the p600, this is very nearly
+	 * the same thing as
+	 *
+	 * pci_save_state(pci_dev);
+	 * pci_set_power_state(pci_dev, PCI_D3hot);
+	 * pci_set_power_state(pci_dev, PCI_D0);
+	 * pci_restore_state(pci_dev);
+	 *
+	 * but we can't use these nice canned kernel routines on
+	 * kexec, because they also check the MSI/MSI-X state in PCI
+	 * configuration space and do the wrong thing when it is
+	 * set/cleared.  Also, the pci_save/restore_state functions
+	 * violate the ordering requirements for restoring the
+	 * configuration space from the CCISS document (see the
+	 * comment below).  So we roll our own ....
+	 *
+	 * For controllers newer than the P600, the pci power state
+	 * method of resetting doesn't work so we have another way
+	 * using the doorbell register.
+	 */
 
 
-	pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
-	if (pos == 0) {
-		printk(KERN_ERR "cciss_reset_controller: PCI PM not supported\n");
+	/* Exclude 640x boards.  These are two pci devices in one slot
+	 * which share a battery backed cache module.  One controls the
+	 * cache, the other accesses the cache through the one that controls
+	 * it.  If we reset the one controlling the cache, the other will
+	 * likely not be happy.  Just forbid resetting this conjoined mess.
+	 */
+	cciss_lookup_board_id(pdev, &board_id);
+	if (board_id == 0x409C0E11 || board_id == 0x409D0E11) {
+		dev_warn(&pdev->dev, "Cannot reset Smart Array 640x "
+				"due to shared cache module.");
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 
 
-	/* Quoting from the Open CISS Specification: "The Power
-	 * Management Control/Status Register (CSR) controls the power
-	 * state of the device.  The normal operating state is D0,
-	 * CSR=00h.  The software off state is D3, CSR=03h.  To reset
-	 * the controller, place the interface device in D3 then to
-	 * D0, this causes a secondary PCI reset which will reset the
-	 * controller." */
+	for (i = 0; i < 32; i++)
+		pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
 
 
-	/* enter the D3hot power management state */
-	pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
-	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
-	pmcsr |= PCI_D3hot;
-	pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+	/* find the first memory BAR, so we can find the cfg table */
+	rc = cciss_pci_find_memory_BAR(pdev, &paddr);
+	if (rc)
+		return rc;
+	vaddr = remap_pci_mem(paddr, 0x250);
+	if (!vaddr)
+		return -ENOMEM;
 
 
-	schedule_timeout_uninterruptible(HZ >> 1);
+	/* find cfgtable in order to check if reset via doorbell is supported */
+	rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
+					&cfg_base_addr_index, &cfg_offset);
+	if (rc)
+		goto unmap_vaddr;
+	cfgtable = remap_pci_mem(pci_resource_start(pdev,
+		       cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
+	if (!cfgtable) {
+		rc = -ENOMEM;
+		goto unmap_vaddr;
+	}
 
 
-	/* enter the D0 power management state */
-	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
-	pmcsr |= PCI_D0;
-	pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
+	/* If reset via doorbell register is supported, use that. */
+	misc_fw_support = readl(&cfgtable->misc_fw_support);
+	use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
 
 
-	schedule_timeout_uninterruptible(HZ >> 1);
+	rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
+	if (rc)
+		goto unmap_cfgtable;
 
 
 	/* Restore the PCI configuration space.  The Open CISS
 	/* Restore the PCI configuration space.  The Open CISS
 	 * Specification says, "Restore the PCI Configuration
 	 * Specification says, "Restore the PCI Configuration
 	 * Registers, offsets 00h through 60h. It is important to
 	 * Registers, offsets 00h through 60h. It is important to
 	 * restore the command register, 16-bits at offset 04h,
 	 * restore the command register, 16-bits at offset 04h,
 	 * last. Do not restore the configuration status register,
 	 * last. Do not restore the configuration status register,
-	 * 16-bits at offset 06h."  Note that the offset is 2*i. */
+	 * 16-bits at offset 06h."  Note that the offset is 2*i.
+	 */
 	for (i = 0; i < 32; i++) {
 	for (i = 0; i < 32; i++) {
 		if (i == 2 || i == 3)
 		if (i == 2 || i == 3)
 			continue;
 			continue;
@@ -4149,6 +4538,63 @@ static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev)
 	wmb();
 	wmb();
 	pci_write_config_word(pdev, 4, saved_config_space[2]);
 	pci_write_config_word(pdev, 4, saved_config_space[2]);
 
 
+	/* Some devices (notably the HP Smart Array 5i Controller)
+	   need a little pause here */
+	msleep(CCISS_POST_RESET_PAUSE_MSECS);
+
+	/* Controller should be in simple mode at this point.  If it's not,
+	 * It means we're on one of those controllers which doesn't support
+	 * the doorbell reset method and on which the PCI power management reset
+	 * method doesn't work (P800, for example.)
+	 * In those cases, don't try to proceed, as it generally doesn't work.
+	 */
+	active_transport = readl(&cfgtable->TransportActive);
+	if (active_transport & PERFORMANT_MODE) {
+		dev_warn(&pdev->dev, "Unable to successfully reset controller,"
+			" Ignoring controller.\n");
+		rc = -ENODEV;
+	}
+
+unmap_cfgtable:
+	iounmap(cfgtable);
+
+unmap_vaddr:
+	iounmap(vaddr);
+	return rc;
+}
+
+static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
+{
+	int rc, i;
+
+	if (!reset_devices)
+		return 0;
+
+	/* Reset the controller with a PCI power-cycle or via doorbell */
+	rc = cciss_kdump_hard_reset_controller(pdev);
+
+	/* -ENOTSUPP here means we cannot reset the controller
+	 * but it's already (and still) up and running in
+	 * "performant mode".  Or, it might be 640x, which can't reset
+	 * due to concerns about shared bbwc between 6402/6404 pair.
+	 */
+	if (rc == -ENOTSUPP)
+		return 0; /* just try to do the kdump anyhow. */
+	if (rc)
+		return -ENODEV;
+	if (cciss_reset_msi(pdev))
+		return -ENODEV;
+
+	/* Now try to get the controller to respond to a no-op */
+	for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
+		if (cciss_noop(pdev) == 0)
+			break;
+		else
+			dev_warn(&pdev->dev, "no-op failed%s\n",
+				(i < CCISS_POST_RESET_NOOP_RETRIES - 1 ?
+					"; re-trying" : ""));
+		msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS);
+	}
 	return 0;
 	return 0;
 }
 }
 
 
@@ -4166,46 +4612,31 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
 	int rc;
 	int rc;
 	int dac, return_code;
 	int dac, return_code;
 	InquiryData_struct *inq_buff;
 	InquiryData_struct *inq_buff;
+	ctlr_info_t *h;
 
 
-	if (reset_devices) {
-		/* Reset the controller with a PCI power-cycle */
-		if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev))
-			return -ENODEV;
-
-		/* Now try to get the controller to respond to a no-op. Some
-		   devices (notably the HP Smart Array 5i Controller) need
-		   up to 30 seconds to respond. */
-		for (i=0; i<30; i++) {
-			if (cciss_noop(pdev) == 0)
-				break;
-
-			schedule_timeout_uninterruptible(HZ);
-		}
-		if (i == 30) {
-			printk(KERN_ERR "cciss: controller seems dead\n");
-			return -EBUSY;
-		}
-	}
-
-	i = alloc_cciss_hba();
+	rc = cciss_init_reset_devices(pdev);
+	if (rc)
+		return rc;
+	i = alloc_cciss_hba(pdev);
 	if (i < 0)
 	if (i < 0)
 		return -1;
 		return -1;
 
 
-	hba[i]->busy_initializing = 1;
-	INIT_HLIST_HEAD(&hba[i]->cmpQ);
-	INIT_HLIST_HEAD(&hba[i]->reqQ);
-	mutex_init(&hba[i]->busy_shutting_down);
+	h = hba[i];
+	h->pdev = pdev;
+	h->busy_initializing = 1;
+	INIT_HLIST_HEAD(&h->cmpQ);
+	INIT_HLIST_HEAD(&h->reqQ);
+	mutex_init(&h->busy_shutting_down);
 
 
-	if (cciss_pci_init(hba[i], pdev) != 0)
+	if (cciss_pci_init(h) != 0)
 		goto clean_no_release_regions;
 		goto clean_no_release_regions;
 
 
-	sprintf(hba[i]->devname, "cciss%d", i);
-	hba[i]->ctlr = i;
-	hba[i]->pdev = pdev;
+	sprintf(h->devname, "cciss%d", i);
+	h->ctlr = i;
 
 
-	init_completion(&hba[i]->scan_wait);
+	init_completion(&h->scan_wait);
 
 
-	if (cciss_create_hba_sysfs_entry(hba[i]))
+	if (cciss_create_hba_sysfs_entry(h))
 		goto clean0;
 		goto clean0;
 
 
 	/* configure PCI DMA stuff */
 	/* configure PCI DMA stuff */
@@ -4214,7 +4645,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
 	else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
 	else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
 		dac = 0;
 		dac = 0;
 	else {
 	else {
-		printk(KERN_ERR "cciss: no suitable DMA available\n");
+		dev_err(&h->pdev->dev, "no suitable DMA available\n");
 		goto clean1;
 		goto clean1;
 	}
 	}
 
 
@@ -4224,151 +4655,161 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
 	 * 8 controller support.
 	 * 8 controller support.
 	 */
 	 */
 	if (i < MAX_CTLR_ORIG)
 	if (i < MAX_CTLR_ORIG)
-		hba[i]->major = COMPAQ_CISS_MAJOR + i;
-	rc = register_blkdev(hba[i]->major, hba[i]->devname);
+		h->major = COMPAQ_CISS_MAJOR + i;
+	rc = register_blkdev(h->major, h->devname);
 	if (rc == -EBUSY || rc == -EINVAL) {
 	if (rc == -EBUSY || rc == -EINVAL) {
-		printk(KERN_ERR
-		       "cciss:  Unable to get major number %d for %s "
-		       "on hba %d\n", hba[i]->major, hba[i]->devname, i);
+		dev_err(&h->pdev->dev,
+		       "Unable to get major number %d for %s "
+		       "on hba %d\n", h->major, h->devname, i);
 		goto clean1;
 		goto clean1;
 	} else {
 	} else {
 		if (i >= MAX_CTLR_ORIG)
 		if (i >= MAX_CTLR_ORIG)
-			hba[i]->major = rc;
+			h->major = rc;
 	}
 	}
 
 
 	/* make sure the board interrupts are off */
 	/* make sure the board interrupts are off */
-	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
-	if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
-			IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
-		printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
-		       hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname);
-		goto clean2;
+	h->access.set_intr_mask(h, CCISS_INTR_OFF);
+	if (h->msi_vector || h->msix_vector) {
+		if (request_irq(h->intr[PERF_MODE_INT],
+				do_cciss_msix_intr,
+				IRQF_DISABLED, h->devname, h)) {
+			dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
+			       h->intr[PERF_MODE_INT], h->devname);
+			goto clean2;
+		}
+	} else {
+		if (request_irq(h->intr[PERF_MODE_INT], do_cciss_intx,
+				IRQF_DISABLED, h->devname, h)) {
+			dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
+			       h->intr[PERF_MODE_INT], h->devname);
+			goto clean2;
+		}
 	}
 	}
 
 
-	printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
-	       hba[i]->devname, pdev->device, pci_name(pdev),
-	       hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not");
+	dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
+	       h->devname, pdev->device, pci_name(pdev),
+	       h->intr[PERF_MODE_INT], dac ? "" : " not");
 
 
-	hba[i]->cmd_pool_bits =
-	    kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
+	h->cmd_pool_bits =
+	    kmalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
 			* sizeof(unsigned long), GFP_KERNEL);
 			* sizeof(unsigned long), GFP_KERNEL);
-	hba[i]->cmd_pool = (CommandList_struct *)
-	    pci_alloc_consistent(hba[i]->pdev,
-		    hba[i]->nr_cmds * sizeof(CommandList_struct),
-		    &(hba[i]->cmd_pool_dhandle));
-	hba[i]->errinfo_pool = (ErrorInfo_struct *)
-	    pci_alloc_consistent(hba[i]->pdev,
-		    hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
-		    &(hba[i]->errinfo_pool_dhandle));
-	if ((hba[i]->cmd_pool_bits == NULL)
-	    || (hba[i]->cmd_pool == NULL)
-	    || (hba[i]->errinfo_pool == NULL)) {
-		printk(KERN_ERR "cciss: out of memory");
+	h->cmd_pool = (CommandList_struct *)
+	    pci_alloc_consistent(h->pdev,
+		    h->nr_cmds * sizeof(CommandList_struct),
+		    &(h->cmd_pool_dhandle));
+	h->errinfo_pool = (ErrorInfo_struct *)
+	    pci_alloc_consistent(h->pdev,
+		    h->nr_cmds * sizeof(ErrorInfo_struct),
+		    &(h->errinfo_pool_dhandle));
+	if ((h->cmd_pool_bits == NULL)
+	    || (h->cmd_pool == NULL)
+	    || (h->errinfo_pool == NULL)) {
+		dev_err(&h->pdev->dev, "out of memory");
 		goto clean4;
 		goto clean4;
 	}
 	}
 
 
 	/* Need space for temp scatter list */
 	/* Need space for temp scatter list */
-	hba[i]->scatter_list = kmalloc(hba[i]->max_commands *
+	h->scatter_list = kmalloc(h->max_commands *
 						sizeof(struct scatterlist *),
 						sizeof(struct scatterlist *),
 						GFP_KERNEL);
 						GFP_KERNEL);
-	for (k = 0; k < hba[i]->nr_cmds; k++) {
-		hba[i]->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
-							hba[i]->maxsgentries,
+	for (k = 0; k < h->nr_cmds; k++) {
+		h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) *
+							h->maxsgentries,
 							GFP_KERNEL);
 							GFP_KERNEL);
-		if (hba[i]->scatter_list[k] == NULL) {
-			printk(KERN_ERR "cciss%d: could not allocate "
-				"s/g lists\n", i);
+		if (h->scatter_list[k] == NULL) {
+			dev_err(&h->pdev->dev,
+				"could not allocate s/g lists\n");
 			goto clean4;
 			goto clean4;
 		}
 		}
 	}
 	}
-	hba[i]->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[i],
-		hba[i]->chainsize, hba[i]->nr_cmds);
-	if (!hba[i]->cmd_sg_list && hba[i]->chainsize > 0)
+	h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
+		h->chainsize, h->nr_cmds);
+	if (!h->cmd_sg_list && h->chainsize > 0)
 		goto clean4;
 		goto clean4;
 
 
-	spin_lock_init(&hba[i]->lock);
+	spin_lock_init(&h->lock);
 
 
 	/* Initialize the pdev driver private data.
 	/* Initialize the pdev driver private data.
-	   have it point to hba[i].  */
-	pci_set_drvdata(pdev, hba[i]);
+	   have it point to h.  */
+	pci_set_drvdata(pdev, h);
 	/* command and error info recs zeroed out before
 	/* command and error info recs zeroed out before
 	   they are used */
 	   they are used */
-	memset(hba[i]->cmd_pool_bits, 0,
-	       DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG)
+	memset(h->cmd_pool_bits, 0,
+	       DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG)
 			* sizeof(unsigned long));
 			* sizeof(unsigned long));
 
 
-	hba[i]->num_luns = 0;
-	hba[i]->highest_lun = -1;
+	h->num_luns = 0;
+	h->highest_lun = -1;
 	for (j = 0; j < CISS_MAX_LUN; j++) {
 	for (j = 0; j < CISS_MAX_LUN; j++) {
-		hba[i]->drv[j] = NULL;
-		hba[i]->gendisk[j] = NULL;
+		h->drv[j] = NULL;
+		h->gendisk[j] = NULL;
 	}
 	}
 
 
-	cciss_scsi_setup(i);
+	cciss_scsi_setup(h);
 
 
 	/* Turn the interrupts on so we can service requests */
 	/* Turn the interrupts on so we can service requests */
-	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
+	h->access.set_intr_mask(h, CCISS_INTR_ON);
 
 
 	/* Get the firmware version */
 	/* Get the firmware version */
 	inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
 	inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
 	if (inq_buff == NULL) {
 	if (inq_buff == NULL) {
-		printk(KERN_ERR "cciss: out of memory\n");
+		dev_err(&h->pdev->dev, "out of memory\n");
 		goto clean4;
 		goto clean4;
 	}
 	}
 
 
-	return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
+	return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
 		sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
 		sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
 	if (return_code == IO_OK) {
 	if (return_code == IO_OK) {
-		hba[i]->firm_ver[0] = inq_buff->data_byte[32];
-		hba[i]->firm_ver[1] = inq_buff->data_byte[33];
-		hba[i]->firm_ver[2] = inq_buff->data_byte[34];
-		hba[i]->firm_ver[3] = inq_buff->data_byte[35];
+		h->firm_ver[0] = inq_buff->data_byte[32];
+		h->firm_ver[1] = inq_buff->data_byte[33];
+		h->firm_ver[2] = inq_buff->data_byte[34];
+		h->firm_ver[3] = inq_buff->data_byte[35];
 	} else {	 /* send command failed */
 	} else {	 /* send command failed */
-		printk(KERN_WARNING "cciss: unable to determine firmware"
+		dev_warn(&h->pdev->dev, "unable to determine firmware"
 			" version of controller\n");
 			" version of controller\n");
 	}
 	}
 	kfree(inq_buff);
 	kfree(inq_buff);
 
 
-	cciss_procinit(i);
+	cciss_procinit(h);
 
 
-	hba[i]->cciss_max_sectors = 8192;
+	h->cciss_max_sectors = 8192;
 
 
-	rebuild_lun_table(hba[i], 1, 0);
-	hba[i]->busy_initializing = 0;
+	rebuild_lun_table(h, 1, 0);
+	h->busy_initializing = 0;
 	return 1;
 	return 1;
 
 
 clean4:
 clean4:
-	kfree(hba[i]->cmd_pool_bits);
+	kfree(h->cmd_pool_bits);
 	/* Free up sg elements */
 	/* Free up sg elements */
-	for (k = 0; k < hba[i]->nr_cmds; k++)
-		kfree(hba[i]->scatter_list[k]);
-	kfree(hba[i]->scatter_list);
-	cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds);
-	if (hba[i]->cmd_pool)
-		pci_free_consistent(hba[i]->pdev,
-				    hba[i]->nr_cmds * sizeof(CommandList_struct),
-				    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
-	if (hba[i]->errinfo_pool)
-		pci_free_consistent(hba[i]->pdev,
-				    hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
-				    hba[i]->errinfo_pool,
-				    hba[i]->errinfo_pool_dhandle);
-	free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]);
+	for (k = 0; k < h->nr_cmds; k++)
+		kfree(h->scatter_list[k]);
+	kfree(h->scatter_list);
+	cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
+	if (h->cmd_pool)
+		pci_free_consistent(h->pdev,
+				    h->nr_cmds * sizeof(CommandList_struct),
+				    h->cmd_pool, h->cmd_pool_dhandle);
+	if (h->errinfo_pool)
+		pci_free_consistent(h->pdev,
+				    h->nr_cmds * sizeof(ErrorInfo_struct),
+				    h->errinfo_pool,
+				    h->errinfo_pool_dhandle);
+	free_irq(h->intr[PERF_MODE_INT], h);
 clean2:
 clean2:
-	unregister_blkdev(hba[i]->major, hba[i]->devname);
+	unregister_blkdev(h->major, h->devname);
 clean1:
 clean1:
-	cciss_destroy_hba_sysfs_entry(hba[i]);
+	cciss_destroy_hba_sysfs_entry(h);
 clean0:
 clean0:
 	pci_release_regions(pdev);
 	pci_release_regions(pdev);
 clean_no_release_regions:
 clean_no_release_regions:
-	hba[i]->busy_initializing = 0;
+	h->busy_initializing = 0;
 
 
 	/*
 	/*
 	 * Deliberately omit pci_disable_device(): it does something nasty to
 	 * Deliberately omit pci_disable_device(): it does something nasty to
 	 * Smart Array controllers that pci_enable_device does not undo
 	 * Smart Array controllers that pci_enable_device does not undo
 	 */
 	 */
 	pci_set_drvdata(pdev, NULL);
 	pci_set_drvdata(pdev, NULL);
-	free_hba(i);
+	free_hba(h);
 	return -1;
 	return -1;
 }
 }
 
 
@@ -4381,55 +4822,51 @@ static void cciss_shutdown(struct pci_dev *pdev)
 	h = pci_get_drvdata(pdev);
 	h = pci_get_drvdata(pdev);
 	flush_buf = kzalloc(4, GFP_KERNEL);
 	flush_buf = kzalloc(4, GFP_KERNEL);
 	if (!flush_buf) {
 	if (!flush_buf) {
-		printk(KERN_WARNING
-			"cciss:%d cache not flushed, out of memory.\n",
-			h->ctlr);
+		dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n");
 		return;
 		return;
 	}
 	}
 	/* write all data in the battery backed cache to disk */
 	/* write all data in the battery backed cache to disk */
 	memset(flush_buf, 0, 4);
 	memset(flush_buf, 0, 4);
-	return_code = sendcmd_withirq(CCISS_CACHE_FLUSH, h->ctlr, flush_buf,
+	return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
 		4, 0, CTLR_LUNID, TYPE_CMD);
 		4, 0, CTLR_LUNID, TYPE_CMD);
 	kfree(flush_buf);
 	kfree(flush_buf);
 	if (return_code != IO_OK)
 	if (return_code != IO_OK)
-		printk(KERN_WARNING "cciss%d: Error flushing cache\n",
-			h->ctlr);
+		dev_warn(&h->pdev->dev, "Error flushing cache\n");
 	h->access.set_intr_mask(h, CCISS_INTR_OFF);
 	h->access.set_intr_mask(h, CCISS_INTR_OFF);
-	free_irq(h->intr[2], h);
+	free_irq(h->intr[PERF_MODE_INT], h);
 }
 }
 
 
 static void __devexit cciss_remove_one(struct pci_dev *pdev)
 static void __devexit cciss_remove_one(struct pci_dev *pdev)
 {
 {
-	ctlr_info_t *tmp_ptr;
+	ctlr_info_t *h;
 	int i, j;
 	int i, j;
 
 
 	if (pci_get_drvdata(pdev) == NULL) {
 	if (pci_get_drvdata(pdev) == NULL) {
-		printk(KERN_ERR "cciss: Unable to remove device \n");
+		dev_err(&pdev->dev, "Unable to remove device\n");
 		return;
 		return;
 	}
 	}
 
 
-	tmp_ptr = pci_get_drvdata(pdev);
-	i = tmp_ptr->ctlr;
+	h = pci_get_drvdata(pdev);
+	i = h->ctlr;
 	if (hba[i] == NULL) {
 	if (hba[i] == NULL) {
-		printk(KERN_ERR "cciss: device appears to "
-		       "already be removed \n");
+		dev_err(&pdev->dev, "device appears to already be removed\n");
 		return;
 		return;
 	}
 	}
 
 
-	mutex_lock(&hba[i]->busy_shutting_down);
+	mutex_lock(&h->busy_shutting_down);
 
 
-	remove_from_scan_list(hba[i]);
-	remove_proc_entry(hba[i]->devname, proc_cciss);
-	unregister_blkdev(hba[i]->major, hba[i]->devname);
+	remove_from_scan_list(h);
+	remove_proc_entry(h->devname, proc_cciss);
+	unregister_blkdev(h->major, h->devname);
 
 
 	/* remove it from the disk list */
 	/* remove it from the disk list */
 	for (j = 0; j < CISS_MAX_LUN; j++) {
 	for (j = 0; j < CISS_MAX_LUN; j++) {
-		struct gendisk *disk = hba[i]->gendisk[j];
+		struct gendisk *disk = h->gendisk[j];
 		if (disk) {
 		if (disk) {
 			struct request_queue *q = disk->queue;
 			struct request_queue *q = disk->queue;
 
 
 			if (disk->flags & GENHD_FL_UP) {
 			if (disk->flags & GENHD_FL_UP) {
-				cciss_destroy_ld_sysfs_entry(hba[i], j, 1);
+				cciss_destroy_ld_sysfs_entry(h, j, 1);
 				del_gendisk(disk);
 				del_gendisk(disk);
 			}
 			}
 			if (q)
 			if (q)
@@ -4438,39 +4875,41 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
 	}
 	}
 
 
 #ifdef CONFIG_CISS_SCSI_TAPE
 #ifdef CONFIG_CISS_SCSI_TAPE
-	cciss_unregister_scsi(i);	/* unhook from SCSI subsystem */
+	cciss_unregister_scsi(h);	/* unhook from SCSI subsystem */
 #endif
 #endif
 
 
 	cciss_shutdown(pdev);
 	cciss_shutdown(pdev);
 
 
 #ifdef CONFIG_PCI_MSI
 #ifdef CONFIG_PCI_MSI
-	if (hba[i]->msix_vector)
-		pci_disable_msix(hba[i]->pdev);
-	else if (hba[i]->msi_vector)
-		pci_disable_msi(hba[i]->pdev);
+	if (h->msix_vector)
+		pci_disable_msix(h->pdev);
+	else if (h->msi_vector)
+		pci_disable_msi(h->pdev);
 #endif				/* CONFIG_PCI_MSI */
 #endif				/* CONFIG_PCI_MSI */
 
 
-	iounmap(hba[i]->vaddr);
+	iounmap(h->transtable);
+	iounmap(h->cfgtable);
+	iounmap(h->vaddr);
 
 
-	pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct),
-			    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
-	pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct),
-			    hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
-	kfree(hba[i]->cmd_pool_bits);
+	pci_free_consistent(h->pdev, h->nr_cmds * sizeof(CommandList_struct),
+			    h->cmd_pool, h->cmd_pool_dhandle);
+	pci_free_consistent(h->pdev, h->nr_cmds * sizeof(ErrorInfo_struct),
+			    h->errinfo_pool, h->errinfo_pool_dhandle);
+	kfree(h->cmd_pool_bits);
 	/* Free up sg elements */
 	/* Free up sg elements */
-	for (j = 0; j < hba[i]->nr_cmds; j++)
-		kfree(hba[i]->scatter_list[j]);
-	kfree(hba[i]->scatter_list);
-	cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds);
+	for (j = 0; j < h->nr_cmds; j++)
+		kfree(h->scatter_list[j]);
+	kfree(h->scatter_list);
+	cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
 	/*
 	/*
 	 * Deliberately omit pci_disable_device(): it does something nasty to
 	 * Deliberately omit pci_disable_device(): it does something nasty to
 	 * Smart Array controllers that pci_enable_device does not undo
 	 * Smart Array controllers that pci_enable_device does not undo
 	 */
 	 */
 	pci_release_regions(pdev);
 	pci_release_regions(pdev);
 	pci_set_drvdata(pdev, NULL);
 	pci_set_drvdata(pdev, NULL);
-	cciss_destroy_hba_sysfs_entry(hba[i]);
-	mutex_unlock(&hba[i]->busy_shutting_down);
-	free_hba(i);
+	cciss_destroy_hba_sysfs_entry(h);
+	mutex_unlock(&h->busy_shutting_down);
+	free_hba(h);
 }
 }
 
 
 static struct pci_driver cciss_pci_driver = {
 static struct pci_driver cciss_pci_driver = {
@@ -4495,7 +4934,6 @@ static int __init cciss_init(void)
 	 * array of them, the size must be a multiple of 8 bytes.
 	 * array of them, the size must be a multiple of 8 bytes.
 	 */
 	 */
 	BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT);
 	BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT);
-
 	printk(KERN_INFO DRIVER_NAME "\n");
 	printk(KERN_INFO DRIVER_NAME "\n");
 
 
 	err = bus_register(&cciss_bus_type);
 	err = bus_register(&cciss_bus_type);
@@ -4532,8 +4970,8 @@ static void __exit cciss_cleanup(void)
 	/* double check that all controller entrys have been removed */
 	/* double check that all controller entrys have been removed */
 	for (i = 0; i < MAX_CTLR; i++) {
 	for (i = 0; i < MAX_CTLR; i++) {
 		if (hba[i] != NULL) {
 		if (hba[i] != NULL) {
-			printk(KERN_WARNING "cciss: had to remove"
-			       " controller %d\n", i);
+			dev_warn(&hba[i]->pdev->dev,
+				"had to remove controller\n");
 			cciss_remove_one(hba[i]->pdev);
 			cciss_remove_one(hba[i]->pdev);
 		}
 		}
 	}
 	}
@@ -4542,46 +4980,5 @@ static void __exit cciss_cleanup(void)
 	bus_unregister(&cciss_bus_type);
 	bus_unregister(&cciss_bus_type);
 }
 }
 
 
-static void fail_all_cmds(unsigned long ctlr)
-{
-	/* If we get here, the board is apparently dead. */
-	ctlr_info_t *h = hba[ctlr];
-	CommandList_struct *c;
-	unsigned long flags;
-
-	printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr);
-	h->alive = 0;		/* the controller apparently died... */
-
-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-
-	pci_disable_device(h->pdev);	/* Make sure it is really dead. */
-
-	/* move everything off the request queue onto the completed queue */
-	while (!hlist_empty(&h->reqQ)) {
-		c = hlist_entry(h->reqQ.first, CommandList_struct, list);
-		removeQ(c);
-		h->Qdepth--;
-		addQ(&h->cmpQ, c);
-	}
-
-	/* Now, fail everything on the completed queue with a HW error */
-	while (!hlist_empty(&h->cmpQ)) {
-		c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
-		removeQ(c);
-		if (c->cmd_type != CMD_MSG_STALE)
-			c->err_info->CommandStatus = CMD_HARDWARE_ERR;
-		if (c->cmd_type == CMD_RWREQ) {
-			complete_command(h, c, 0);
-		} else if (c->cmd_type == CMD_IOCTL_PEND)
-			complete(c->waiting);
-#ifdef CONFIG_CISS_SCSI_TAPE
-		else if (c->cmd_type == CMD_SCSI)
-			complete_scsi_command(c, 0, 0);
-#endif
-	}
-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-	return;
-}
-
 module_init(cciss_init);
 module_init(cciss_init);
 module_exit(cciss_cleanup);
 module_exit(cciss_cleanup);

+ 124 - 11
drivers/block/cciss.h

@@ -25,7 +25,7 @@ struct access_method {
 	void (*submit_command)(ctlr_info_t *h, CommandList_struct *c);
 	void (*submit_command)(ctlr_info_t *h, CommandList_struct *c);
 	void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
 	void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
 	unsigned long (*fifo_full)(ctlr_info_t *h);
 	unsigned long (*fifo_full)(ctlr_info_t *h);
-	unsigned long (*intr_pending)(ctlr_info_t *h);
+	bool (*intr_pending)(ctlr_info_t *h);
 	unsigned long (*command_completed)(ctlr_info_t *h);
 	unsigned long (*command_completed)(ctlr_info_t *h);
 };
 };
 typedef struct _drive_info_struct
 typedef struct _drive_info_struct
@@ -85,8 +85,8 @@ struct ctlr_info
 	int	max_cmd_sgentries;
 	int	max_cmd_sgentries;
 	SGDescriptor_struct **cmd_sg_list;
 	SGDescriptor_struct **cmd_sg_list;
 
 
-#	define DOORBELL_INT	0
-#	define PERF_MODE_INT	1
+#	define PERF_MODE_INT	0
+#	define DOORBELL_INT	1
 #	define SIMPLE_MODE_INT	2
 #	define SIMPLE_MODE_INT	2
 #	define MEMQ_MODE_INT	3
 #	define MEMQ_MODE_INT	3
 	unsigned int intr[4];
 	unsigned int intr[4];
@@ -137,10 +137,27 @@ struct ctlr_info
 	struct list_head scan_list;
 	struct list_head scan_list;
 	struct completion scan_wait;
 	struct completion scan_wait;
 	struct device dev;
 	struct device dev;
+	/*
+	 * Performant mode tables.
+	 */
+	u32 trans_support;
+	u32 trans_offset;
+	struct TransTable_struct *transtable;
+	unsigned long transMethod;
+
+	/*
+	 * Performant mode completion buffer
+	 */
+	u64 *reply_pool;
+	dma_addr_t reply_pool_dhandle;
+	u64 *reply_pool_head;
+	size_t reply_pool_size;
+	unsigned char reply_pool_wraparound;
+	u32 *blockFetchTable;
 };
 };
 
 
-/*  Defining the diffent access_menthods */
-/*
+/*  Defining the diffent access_methods
+ *
  * Memory mapped FIFO interface (SMART 53xx cards)
  * Memory mapped FIFO interface (SMART 53xx cards)
  */
  */
 #define SA5_DOORBELL	0x20
 #define SA5_DOORBELL	0x20
@@ -159,19 +176,47 @@ struct ctlr_info
 #define SA5B_INTR_PENDING	0x04
 #define SA5B_INTR_PENDING	0x04
 #define FIFO_EMPTY		0xffffffff	
 #define FIFO_EMPTY		0xffffffff	
 #define CCISS_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
 #define CCISS_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
+/* Perf. mode flags */
+#define SA5_PERF_INTR_PENDING	0x04
+#define SA5_PERF_INTR_OFF	0x05
+#define SA5_OUTDB_STATUS_PERF_BIT	0x01
+#define SA5_OUTDB_CLEAR_PERF_BIT	0x01
+#define SA5_OUTDB_CLEAR         0xA0
+#define SA5_OUTDB_CLEAR_PERF_BIT        0x01
+#define SA5_OUTDB_STATUS        0x9C
+
 
 
 #define  CISS_ERROR_BIT		0x02
 #define  CISS_ERROR_BIT		0x02
 
 
 #define CCISS_INTR_ON 	1 
 #define CCISS_INTR_ON 	1 
 #define CCISS_INTR_OFF	0
 #define CCISS_INTR_OFF	0
+
+
+/* CCISS_BOARD_READY_WAIT_SECS is how long to wait for a board
+ * to become ready, in seconds, before giving up on it.
+ * CCISS_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
+ * between polling the board to see if it is ready, in
+ * milliseconds.  CCISS_BOARD_READY_ITERATIONS is derived
+ * the above.
+ */
+#define CCISS_BOARD_READY_WAIT_SECS (120)
+#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
+#define CCISS_BOARD_READY_ITERATIONS \
+	((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
+		CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
+#define CCISS_POST_RESET_PAUSE_MSECS (3000)
+#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
+#define CCISS_POST_RESET_NOOP_RETRIES (12)
+
 /* 
 /* 
 	Send the command to the hardware 
 	Send the command to the hardware 
 */
 */
 static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) 
 static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) 
 {
 {
 #ifdef CCISS_DEBUG
 #ifdef CCISS_DEBUG
-	 printk("Sending %x - down to controller\n", c->busaddr );
-#endif /* CCISS_DEBUG */ 
+	printk(KERN_WARNING "cciss%d: Sending %08x - down to controller\n",
+			h->ctlr, c->busaddr);
+#endif /* CCISS_DEBUG */
          writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
          writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
 	 h->commands_outstanding++;
 	 h->commands_outstanding++;
 	 if ( h->commands_outstanding > h->max_outstanding)
 	 if ( h->commands_outstanding > h->max_outstanding)
@@ -214,6 +259,20 @@ static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
                         h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
                         h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
         }
         }
 }
 }
+
+/* Performant mode intr_mask */
+static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val)
+{
+	if (val) { /* turn on interrupts */
+		h->interrupts_enabled = 1;
+		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+	} else {
+		h->interrupts_enabled = 0;
+		writel(SA5_PERF_INTR_OFF,
+				h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+	}
+}
+
 /*
 /*
  *  Returns true if fifo is full.  
  *  Returns true if fifo is full.  
  * 
  * 
@@ -250,10 +309,44 @@ static unsigned long SA5_completed(ctlr_info_t *h)
 	return ( register_value); 
 	return ( register_value); 
 
 
 }
 }
+
+/* Performant mode command completed */
+static unsigned long SA5_performant_completed(ctlr_info_t *h)
+{
+	unsigned long register_value = FIFO_EMPTY;
+
+	/* flush the controller write of the reply queue by reading
+	 * outbound doorbell status register.
+	 */
+	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+	/* msi auto clears the interrupt pending bit. */
+	if (!(h->msi_vector || h->msix_vector)) {
+		writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
+		/* Do a read in order to flush the write to the controller
+		 * (as per spec.)
+		 */
+		register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+	}
+
+	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+		register_value = *(h->reply_pool_head);
+		(h->reply_pool_head)++;
+		h->commands_outstanding--;
+	} else {
+		register_value = FIFO_EMPTY;
+	}
+	/* Check for wraparound */
+	if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+		h->reply_pool_head = h->reply_pool;
+		h->reply_pool_wraparound ^= 1;
+	}
+
+	return register_value;
+}
 /*
 /*
  *	Returns true if an interrupt is pending.. 
  *	Returns true if an interrupt is pending.. 
  */
  */
-static unsigned long SA5_intr_pending(ctlr_info_t *h)
+static bool SA5_intr_pending(ctlr_info_t *h)
 {
 {
 	unsigned long register_value  = 
 	unsigned long register_value  = 
 		readl(h->vaddr + SA5_INTR_STATUS);
 		readl(h->vaddr + SA5_INTR_STATUS);
@@ -268,7 +361,7 @@ static unsigned long SA5_intr_pending(ctlr_info_t *h)
 /*
 /*
  *      Returns true if an interrupt is pending..
  *      Returns true if an interrupt is pending..
  */
  */
-static unsigned long SA5B_intr_pending(ctlr_info_t *h)
+static bool SA5B_intr_pending(ctlr_info_t *h)
 {
 {
         unsigned long register_value  =
         unsigned long register_value  =
                 readl(h->vaddr + SA5_INTR_STATUS);
                 readl(h->vaddr + SA5_INTR_STATUS);
@@ -280,6 +373,20 @@ static unsigned long SA5B_intr_pending(ctlr_info_t *h)
         return 0 ;
         return 0 ;
 }
 }
 
 
+static bool SA5_performant_intr_pending(ctlr_info_t *h)
+{
+	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
+
+	if (!register_value)
+		return false;
+
+	if (h->msi_vector || h->msix_vector)
+		return true;
+
+	/* Read outbound doorbell to flush */
+	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
+	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
+}
 
 
 static struct access_method SA5_access = {
 static struct access_method SA5_access = {
 	SA5_submit_command,
 	SA5_submit_command,
@@ -297,6 +404,14 @@ static struct access_method SA5B_access = {
         SA5_completed,
         SA5_completed,
 };
 };
 
 
+static struct access_method SA5_performant_access = {
+	SA5_submit_command,
+	SA5_performant_intr_mask,
+	SA5_fifo_full,
+	SA5_performant_intr_pending,
+	SA5_performant_completed,
+};
+
 struct board_type {
 struct board_type {
 	__u32	board_id;
 	__u32	board_id;
 	char	*product_name;
 	char	*product_name;
@@ -304,6 +419,4 @@ struct board_type {
 	int nr_cmds; /* Max cmds this kind of ctlr can handle. */
 	int nr_cmds; /* Max cmds this kind of ctlr can handle. */
 };
 };
 
 
-#define CCISS_LOCK(i)	(&hba[i]->lock)
-
 #endif /* CCISS_H */
 #endif /* CCISS_H */

+ 33 - 3
drivers/block/cciss_cmd.h

@@ -52,8 +52,10 @@
 /* Configuration Table */
 /* Configuration Table */
 #define CFGTBL_ChangeReq        0x00000001l
 #define CFGTBL_ChangeReq        0x00000001l
 #define CFGTBL_AccCmds          0x00000001l
 #define CFGTBL_AccCmds          0x00000001l
+#define DOORBELL_CTLR_RESET     0x00000004l
 
 
 #define CFGTBL_Trans_Simple     0x00000002l
 #define CFGTBL_Trans_Simple     0x00000002l
+#define CFGTBL_Trans_Performant 0x00000004l
 
 
 #define CFGTBL_BusType_Ultra2   0x00000001l
 #define CFGTBL_BusType_Ultra2   0x00000001l
 #define CFGTBL_BusType_Ultra3   0x00000002l
 #define CFGTBL_BusType_Ultra3   0x00000002l
@@ -173,12 +175,15 @@ typedef struct _SGDescriptor_struct {
  * PAD_64 can be adjusted independently as needed for 32-bit
  * PAD_64 can be adjusted independently as needed for 32-bit
  * and 64-bits systems.
  * and 64-bits systems.
  */
  */
-#define COMMANDLIST_ALIGNMENT (8)
+#define COMMANDLIST_ALIGNMENT (32)
 #define IS_64_BIT ((sizeof(long) - 4)/4)
 #define IS_64_BIT ((sizeof(long) - 4)/4)
 #define IS_32_BIT (!IS_64_BIT)
 #define IS_32_BIT (!IS_64_BIT)
 #define PAD_32 (0)
 #define PAD_32 (0)
 #define PAD_64 (4)
 #define PAD_64 (4)
 #define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
 #define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
+#define DIRECT_LOOKUP_BIT 0x10
+#define DIRECT_LOOKUP_SHIFT 5
+
 typedef struct _CommandList_struct {
 typedef struct _CommandList_struct {
   CommandListHeader_struct Header;
   CommandListHeader_struct Header;
   RequestBlock_struct      Request;
   RequestBlock_struct      Request;
@@ -195,7 +200,7 @@ typedef struct _CommandList_struct {
   struct completion *waiting;
   struct completion *waiting;
   int	 retry_count;
   int	 retry_count;
   void * scsi_cmd;
   void * scsi_cmd;
-  char   pad[PADSIZE];
+  char pad[PADSIZE];
 } CommandList_struct;
 } CommandList_struct;
 
 
 /* Configuration Table Structure */
 /* Configuration Table Structure */
@@ -209,12 +214,15 @@ typedef struct _HostWrite_struct {
 typedef struct _CfgTable_struct {
 typedef struct _CfgTable_struct {
   BYTE             Signature[4];
   BYTE             Signature[4];
   DWORD            SpecValence;
   DWORD            SpecValence;
+#define SIMPLE_MODE	0x02
+#define PERFORMANT_MODE	0x04
+#define MEMQ_MODE	0x08
   DWORD            TransportSupport;
   DWORD            TransportSupport;
   DWORD            TransportActive;
   DWORD            TransportActive;
   HostWrite_struct HostWrite;
   HostWrite_struct HostWrite;
   DWORD            CmdsOutMax;
   DWORD            CmdsOutMax;
   DWORD            BusTypes;
   DWORD            BusTypes;
-  DWORD            Reserved; 
+  DWORD            TransMethodOffset;
   BYTE             ServerName[16];
   BYTE             ServerName[16];
   DWORD            HeartBeat;
   DWORD            HeartBeat;
   DWORD            SCSI_Prefetch;
   DWORD            SCSI_Prefetch;
@@ -222,6 +230,28 @@ typedef struct _CfgTable_struct {
   DWORD            MaxLogicalUnits;
   DWORD            MaxLogicalUnits;
   DWORD            MaxPhysicalDrives;
   DWORD            MaxPhysicalDrives;
   DWORD            MaxPhysicalDrivesPerLogicalUnit;
   DWORD            MaxPhysicalDrivesPerLogicalUnit;
+  DWORD            MaxPerformantModeCommands;
+  u8		   reserved[0x78 - 0x58];
+  u32		   misc_fw_support; /* offset 0x78 */
+#define MISC_FW_DOORBELL_RESET (0x02)
 } CfgTable_struct;
 } CfgTable_struct;
+
+struct TransTable_struct {
+  u32 BlockFetch0;
+  u32 BlockFetch1;
+  u32 BlockFetch2;
+  u32 BlockFetch3;
+  u32 BlockFetch4;
+  u32 BlockFetch5;
+  u32 BlockFetch6;
+  u32 BlockFetch7;
+  u32 RepQSize;
+  u32 RepQCount;
+  u32 RepQCtrAddrLow32;
+  u32 RepQCtrAddrHigh32;
+  u32 RepQAddr0Low32;
+  u32 RepQAddr0High32;
+};
+
 #pragma pack()	 
 #pragma pack()	 
 #endif /* CCISS_CMD_H */
 #endif /* CCISS_CMD_H */

+ 315 - 355
drivers/block/cciss_scsi.c

@@ -44,13 +44,15 @@
 #define CCISS_ABORT_MSG 0x00
 #define CCISS_ABORT_MSG 0x00
 #define CCISS_RESET_MSG 0x01
 #define CCISS_RESET_MSG 0x01
 
 
-static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
 	size_t size,
 	size_t size,
 	__u8 page_code, unsigned char *scsi3addr,
 	__u8 page_code, unsigned char *scsi3addr,
 	int cmd_type);
 	int cmd_type);
 
 
-static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool);
-static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool);
+static CommandList_struct *cmd_alloc(ctlr_info_t *h);
+static CommandList_struct *cmd_special_alloc(ctlr_info_t *h);
+static void cmd_free(ctlr_info_t *h, CommandList_struct *c);
+static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c);
 
 
 static int cciss_scsi_proc_info(
 static int cciss_scsi_proc_info(
 		struct Scsi_Host *sh,
 		struct Scsi_Host *sh,
@@ -93,8 +95,8 @@ static struct scsi_host_template cciss_driver_template = {
 
 
 #pragma pack(1)
 #pragma pack(1)
 
 
-#define SCSI_PAD_32 0
-#define SCSI_PAD_64 0
+#define SCSI_PAD_32 8
+#define SCSI_PAD_64 8
 
 
 struct cciss_scsi_cmd_stack_elem_t {
 struct cciss_scsi_cmd_stack_elem_t {
 	CommandList_struct cmd;
 	CommandList_struct cmd;
@@ -127,16 +129,16 @@ struct cciss_scsi_adapter_data_t {
 	spinlock_t lock; // to protect ccissscsi[ctlr]; 
 	spinlock_t lock; // to protect ccissscsi[ctlr]; 
 };
 };
 
 
-#define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \
-	&hba[ctlr]->scsi_ctlr->lock, flags);
-#define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \
-	&hba[ctlr]->scsi_ctlr->lock, flags);
+#define CPQ_TAPE_LOCK(h, flags) spin_lock_irqsave( \
+	&h->scsi_ctlr->lock, flags);
+#define CPQ_TAPE_UNLOCK(h, flags) spin_unlock_irqrestore( \
+	&h->scsi_ctlr->lock, flags);
 
 
 static CommandList_struct *
 static CommandList_struct *
 scsi_cmd_alloc(ctlr_info_t *h)
 scsi_cmd_alloc(ctlr_info_t *h)
 {
 {
 	/* assume only one process in here at a time, locking done by caller. */
 	/* assume only one process in here at a time, locking done by caller. */
-	/* use CCISS_LOCK(ctlr) */
+	/* use h->lock */
 	/* might be better to rewrite how we allocate scsi commands in a way that */
 	/* might be better to rewrite how we allocate scsi commands in a way that */
 	/* needs no locking at all. */
 	/* needs no locking at all. */
 
 
@@ -177,10 +179,10 @@ scsi_cmd_alloc(ctlr_info_t *h)
 }
 }
 
 
 static void 
 static void 
-scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)
+scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c)
 {
 {
 	/* assume only one process in here at a time, locking done by caller. */
 	/* assume only one process in here at a time, locking done by caller. */
-	/* use CCISS_LOCK(ctlr) */
+	/* use h->lock */
 	/* drop the free memory chunk on top of the stack. */
 	/* drop the free memory chunk on top of the stack. */
 
 
 	struct cciss_scsi_adapter_data_t *sa;
 	struct cciss_scsi_adapter_data_t *sa;
@@ -190,22 +192,23 @@ scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)
 	stk = &sa->cmd_stack; 
 	stk = &sa->cmd_stack; 
 	stk->top++;
 	stk->top++;
 	if (stk->top >= CMD_STACK_SIZE) {
 	if (stk->top >= CMD_STACK_SIZE) {
-		printk("cciss: scsi_cmd_free called too many times.\n");
+		dev_err(&h->pdev->dev,
+			"scsi_cmd_free called too many times.\n");
 		BUG();
 		BUG();
 	}
 	}
-	stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd;
+	stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) c;
 }
 }
 
 
 static int
 static int
-scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
+scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa)
 {
 {
 	int i;
 	int i;
 	struct cciss_scsi_cmd_stack_t *stk;
 	struct cciss_scsi_cmd_stack_t *stk;
 	size_t size;
 	size_t size;
 
 
-	sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[ctlr],
-		hba[ctlr]->chainsize, CMD_STACK_SIZE);
-	if (!sa->cmd_sg_list && hba[ctlr]->chainsize > 0)
+	sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
+		h->chainsize, CMD_STACK_SIZE);
+	if (!sa->cmd_sg_list && h->chainsize > 0)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
 	stk = &sa->cmd_stack; 
 	stk = &sa->cmd_stack; 
@@ -215,7 +218,7 @@ scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
 	BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
 	BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
 	/* pci_alloc_consistent guarantees 32-bit DMA address will be used */
 	/* pci_alloc_consistent guarantees 32-bit DMA address will be used */
 	stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
 	stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
-		pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle);
+		pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle);
 
 
 	if (stk->pool == NULL) {
 	if (stk->pool == NULL) {
 		cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE);
 		cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE);
@@ -234,23 +237,22 @@ scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
 }
 }
 
 
 static void
 static void
-scsi_cmd_stack_free(int ctlr)
+scsi_cmd_stack_free(ctlr_info_t *h)
 {
 {
 	struct cciss_scsi_adapter_data_t *sa;
 	struct cciss_scsi_adapter_data_t *sa;
 	struct cciss_scsi_cmd_stack_t *stk;
 	struct cciss_scsi_cmd_stack_t *stk;
 	size_t size;
 	size_t size;
 
 
-	sa = hba[ctlr]->scsi_ctlr;
+	sa = h->scsi_ctlr;
 	stk = &sa->cmd_stack; 
 	stk = &sa->cmd_stack; 
 	if (stk->top != CMD_STACK_SIZE-1) {
 	if (stk->top != CMD_STACK_SIZE-1) {
-		printk( "cciss: %d scsi commands are still outstanding.\n",
+		dev_warn(&h->pdev->dev,
+			"bug: %d scsi commands are still outstanding.\n",
 			CMD_STACK_SIZE - stk->top);
 			CMD_STACK_SIZE - stk->top);
-		// BUG();
-		printk("WE HAVE A BUG HERE!!! stk=0x%p\n", stk);
 	}
 	}
 	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
 	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
 
 
-	pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle);
+	pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle);
 	stk->pool = NULL;
 	stk->pool = NULL;
 	cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE);
 	cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE);
 }
 }
@@ -342,20 +344,20 @@ print_cmd(CommandList_struct *cp)
 #endif
 #endif
 
 
 static int 
 static int 
-find_bus_target_lun(int ctlr, int *bus, int *target, int *lun)
+find_bus_target_lun(ctlr_info_t *h, int *bus, int *target, int *lun)
 {
 {
 	/* finds an unused bus, target, lun for a new device */
 	/* finds an unused bus, target, lun for a new device */
-	/* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
+	/* assumes h->scsi_ctlr->lock is held */
 	int i, found=0;
 	int i, found=0;
 	unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA];
 	unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA];
 
 
 	memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA);
 	memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA);
 
 
 	target_taken[SELF_SCSI_ID] = 1;	
 	target_taken[SELF_SCSI_ID] = 1;	
-	for (i=0;i<ccissscsi[ctlr].ndevices;i++)
-		target_taken[ccissscsi[ctlr].dev[i].target] = 1;
+	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++)
+		target_taken[ccissscsi[h->ctlr].dev[i].target] = 1;
 	
 	
-	for (i=0;i<CCISS_MAX_SCSI_DEVS_PER_HBA;i++) {
+	for (i = 0; i < CCISS_MAX_SCSI_DEVS_PER_HBA; i++) {
 		if (!target_taken[i]) {
 		if (!target_taken[i]) {
 			*bus = 0; *target=i; *lun = 0; found=1;
 			*bus = 0; *target=i; *lun = 0; found=1;
 			break;
 			break;
@@ -369,19 +371,19 @@ struct scsi2map {
 };
 };
 
 
 static int 
 static int 
-cciss_scsi_add_entry(int ctlr, int hostno, 
+cciss_scsi_add_entry(ctlr_info_t *h, int hostno,
 		struct cciss_scsi_dev_t *device,
 		struct cciss_scsi_dev_t *device,
 		struct scsi2map *added, int *nadded)
 		struct scsi2map *added, int *nadded)
 {
 {
-	/* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
-	int n = ccissscsi[ctlr].ndevices;
+	/* assumes h->scsi_ctlr->lock is held */
+	int n = ccissscsi[h->ctlr].ndevices;
 	struct cciss_scsi_dev_t *sd;
 	struct cciss_scsi_dev_t *sd;
 	int i, bus, target, lun;
 	int i, bus, target, lun;
 	unsigned char addr1[8], addr2[8];
 	unsigned char addr1[8], addr2[8];
 
 
 	if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
 	if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
-		printk("cciss%d: Too many devices, "
-			"some will be inaccessible.\n", ctlr);
+		dev_warn(&h->pdev->dev, "Too many devices, "
+			"some will be inaccessible.\n");
 		return -1;
 		return -1;
 	}
 	}
 
 
@@ -397,7 +399,7 @@ cciss_scsi_add_entry(int ctlr, int hostno,
 		memcpy(addr1, device->scsi3addr, 8);
 		memcpy(addr1, device->scsi3addr, 8);
 		addr1[4] = 0;
 		addr1[4] = 0;
 		for (i = 0; i < n; i++) {
 		for (i = 0; i < n; i++) {
-			sd = &ccissscsi[ctlr].dev[i];
+			sd = &ccissscsi[h->ctlr].dev[i];
 			memcpy(addr2, sd->scsi3addr, 8);
 			memcpy(addr2, sd->scsi3addr, 8);
 			addr2[4] = 0;
 			addr2[4] = 0;
 			/* differ only in byte 4? */
 			/* differ only in byte 4? */
@@ -410,9 +412,9 @@ cciss_scsi_add_entry(int ctlr, int hostno,
 		}
 		}
 	}
 	}
 
 
-	sd = &ccissscsi[ctlr].dev[n];
+	sd = &ccissscsi[h->ctlr].dev[n];
 	if (lun == 0) {
 	if (lun == 0) {
-		if (find_bus_target_lun(ctlr,
+		if (find_bus_target_lun(h,
 			&sd->bus, &sd->target, &sd->lun) != 0)
 			&sd->bus, &sd->target, &sd->lun) != 0)
 			return -1;
 			return -1;
 	} else {
 	} else {
@@ -431,37 +433,37 @@ cciss_scsi_add_entry(int ctlr, int hostno,
 	memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
 	memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
 	sd->devtype = device->devtype;
 	sd->devtype = device->devtype;
 
 
-	ccissscsi[ctlr].ndevices++;
+	ccissscsi[h->ctlr].ndevices++;
 
 
 	/* initially, (before registering with scsi layer) we don't 
 	/* initially, (before registering with scsi layer) we don't 
 	   know our hostno and we don't want to print anything first 
 	   know our hostno and we don't want to print anything first 
 	   time anyway (the scsi layer's inquiries will show that info) */
 	   time anyway (the scsi layer's inquiries will show that info) */
 	if (hostno != -1)
 	if (hostno != -1)
-		printk("cciss%d: %s device c%db%dt%dl%d added.\n", 
-			ctlr, scsi_device_type(sd->devtype), hostno,
+		dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
+			scsi_device_type(sd->devtype), hostno,
 			sd->bus, sd->target, sd->lun);
 			sd->bus, sd->target, sd->lun);
 	return 0;
 	return 0;
 }
 }
 
 
 static void
 static void
-cciss_scsi_remove_entry(int ctlr, int hostno, int entry,
+cciss_scsi_remove_entry(ctlr_info_t *h, int hostno, int entry,
 	struct scsi2map *removed, int *nremoved)
 	struct scsi2map *removed, int *nremoved)
 {
 {
-	/* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
+	/* assumes h->ctlr]->scsi_ctlr->lock is held */
 	int i;
 	int i;
 	struct cciss_scsi_dev_t sd;
 	struct cciss_scsi_dev_t sd;
 
 
 	if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
 	if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
-	sd = ccissscsi[ctlr].dev[entry];
+	sd = ccissscsi[h->ctlr].dev[entry];
 	removed[*nremoved].bus    = sd.bus;
 	removed[*nremoved].bus    = sd.bus;
 	removed[*nremoved].target = sd.target;
 	removed[*nremoved].target = sd.target;
 	removed[*nremoved].lun    = sd.lun;
 	removed[*nremoved].lun    = sd.lun;
 	(*nremoved)++;
 	(*nremoved)++;
-	for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++)
-		ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1];
-	ccissscsi[ctlr].ndevices--;
-	printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
-		ctlr, scsi_device_type(sd.devtype), hostno,
+	for (i = entry; i < ccissscsi[h->ctlr].ndevices-1; i++)
+		ccissscsi[h->ctlr].dev[i] = ccissscsi[h->ctlr].dev[i+1];
+	ccissscsi[h->ctlr].ndevices--;
+	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
+		scsi_device_type(sd.devtype), hostno,
 			sd.bus, sd.target, sd.lun);
 			sd.bus, sd.target, sd.lun);
 }
 }
 
 
@@ -476,24 +478,24 @@ cciss_scsi_remove_entry(int ctlr, int hostno, int entry,
 	(a)[1] == (b)[1] && \
 	(a)[1] == (b)[1] && \
 	(a)[0] == (b)[0])
 	(a)[0] == (b)[0])
 
 
-static void fixup_botched_add(int ctlr, char *scsi3addr)
+static void fixup_botched_add(ctlr_info_t *h, char *scsi3addr)
 {
 {
 	/* called when scsi_add_device fails in order to re-adjust */
 	/* called when scsi_add_device fails in order to re-adjust */
 	/* ccissscsi[] to match the mid layer's view. */
 	/* ccissscsi[] to match the mid layer's view. */
 	unsigned long flags;
 	unsigned long flags;
 	int i, j;
 	int i, j;
-	CPQ_TAPE_LOCK(ctlr, flags);
-	for (i = 0; i < ccissscsi[ctlr].ndevices; i++) {
+	CPQ_TAPE_LOCK(h, flags);
+	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
 		if (memcmp(scsi3addr,
 		if (memcmp(scsi3addr,
-				ccissscsi[ctlr].dev[i].scsi3addr, 8) == 0) {
-			for (j = i; j < ccissscsi[ctlr].ndevices-1; j++)
-				ccissscsi[ctlr].dev[j] =
-					ccissscsi[ctlr].dev[j+1];
-			ccissscsi[ctlr].ndevices--;
+				ccissscsi[h->ctlr].dev[i].scsi3addr, 8) == 0) {
+			for (j = i; j < ccissscsi[h->ctlr].ndevices-1; j++)
+				ccissscsi[h->ctlr].dev[j] =
+					ccissscsi[h->ctlr].dev[j+1];
+			ccissscsi[h->ctlr].ndevices--;
 			break;
 			break;
 		}
 		}
 	}
 	}
-	CPQ_TAPE_UNLOCK(ctlr, flags);
+	CPQ_TAPE_UNLOCK(h, flags);
 }
 }
 
 
 static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
 static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
@@ -513,7 +515,7 @@ static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
 }
 }
 
 
 static int
 static int
-adjust_cciss_scsi_table(int ctlr, int hostno,
+adjust_cciss_scsi_table(ctlr_info_t *h, int hostno,
 	struct cciss_scsi_dev_t sd[], int nsds)
 	struct cciss_scsi_dev_t sd[], int nsds)
 {
 {
 	/* sd contains scsi3 addresses and devtypes, but
 	/* sd contains scsi3 addresses and devtypes, but
@@ -534,15 +536,15 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
 			GFP_KERNEL);
 			GFP_KERNEL);
 
 
 	if (!added || !removed) {
 	if (!added || !removed) {
-		printk(KERN_WARNING "cciss%d: Out of memory in "
-			"adjust_cciss_scsi_table\n", ctlr);
+		dev_warn(&h->pdev->dev,
+			"Out of memory in adjust_cciss_scsi_table\n");
 		goto free_and_out;
 		goto free_and_out;
 	}
 	}
 
 
-	CPQ_TAPE_LOCK(ctlr, flags);
+	CPQ_TAPE_LOCK(h, flags);
 
 
 	if (hostno != -1)  /* if it's not the first time... */
 	if (hostno != -1)  /* if it's not the first time... */
-		sh = hba[ctlr]->scsi_ctlr->scsi_host;
+		sh = h->scsi_ctlr->scsi_host;
 
 
 	/* find any devices in ccissscsi[] that are not in 
 	/* find any devices in ccissscsi[] that are not in 
 	   sd[] and remove them from ccissscsi[] */
 	   sd[] and remove them from ccissscsi[] */
@@ -550,8 +552,8 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
 	i = 0;
 	i = 0;
 	nremoved = 0;
 	nremoved = 0;
 	nadded = 0;
 	nadded = 0;
-	while(i<ccissscsi[ctlr].ndevices) {
-		csd = &ccissscsi[ctlr].dev[i];
+	while (i < ccissscsi[h->ctlr].ndevices) {
+		csd = &ccissscsi[h->ctlr].dev[i];
 		found=0;
 		found=0;
 		for (j=0;j<nsds;j++) {
 		for (j=0;j<nsds;j++) {
 			if (SCSI3ADDR_EQ(sd[j].scsi3addr,
 			if (SCSI3ADDR_EQ(sd[j].scsi3addr,
@@ -566,20 +568,18 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
 
 
 		if (found == 0) { /* device no longer present. */ 
 		if (found == 0) { /* device no longer present. */ 
 			changes++;
 			changes++;
-			/* printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
-				ctlr, scsi_device_type(csd->devtype), hostno,
-					csd->bus, csd->target, csd->lun); */
-			cciss_scsi_remove_entry(ctlr, hostno, i,
+			cciss_scsi_remove_entry(h, hostno, i,
 				removed, &nremoved);
 				removed, &nremoved);
 			/* remove ^^^, hence i not incremented */
 			/* remove ^^^, hence i not incremented */
 		} else if (found == 1) { /* device is different in some way */
 		} else if (found == 1) { /* device is different in some way */
 			changes++;
 			changes++;
-			printk("cciss%d: device c%db%dt%dl%d has changed.\n",
-				ctlr, hostno, csd->bus, csd->target, csd->lun);
-			cciss_scsi_remove_entry(ctlr, hostno, i,
+			dev_info(&h->pdev->dev,
+				"device c%db%dt%dl%d has changed.\n",
+				hostno, csd->bus, csd->target, csd->lun);
+			cciss_scsi_remove_entry(h, hostno, i,
 				removed, &nremoved);
 				removed, &nremoved);
 			/* remove ^^^, hence i not incremented */
 			/* remove ^^^, hence i not incremented */
-			if (cciss_scsi_add_entry(ctlr, hostno, &sd[j],
+			if (cciss_scsi_add_entry(h, hostno, &sd[j],
 				added, &nadded) != 0)
 				added, &nadded) != 0)
 				/* we just removed one, so add can't fail. */
 				/* we just removed one, so add can't fail. */
 					BUG();
 					BUG();
@@ -601,8 +601,8 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
 
 
 	for (i=0;i<nsds;i++) {
 	for (i=0;i<nsds;i++) {
 		found=0;
 		found=0;
-		for (j=0;j<ccissscsi[ctlr].ndevices;j++) {
-			csd = &ccissscsi[ctlr].dev[j];
+		for (j = 0; j < ccissscsi[h->ctlr].ndevices; j++) {
+			csd = &ccissscsi[h->ctlr].dev[j];
 			if (SCSI3ADDR_EQ(sd[i].scsi3addr,
 			if (SCSI3ADDR_EQ(sd[i].scsi3addr,
 				csd->scsi3addr)) {
 				csd->scsi3addr)) {
 				if (device_is_the_same(&sd[i], csd))
 				if (device_is_the_same(&sd[i], csd))
@@ -614,18 +614,18 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
 		}
 		}
 		if (!found) {
 		if (!found) {
 			changes++;
 			changes++;
-			if (cciss_scsi_add_entry(ctlr, hostno, &sd[i],
+			if (cciss_scsi_add_entry(h, hostno, &sd[i],
 				added, &nadded) != 0)
 				added, &nadded) != 0)
 				break;
 				break;
 		} else if (found == 1) {
 		} else if (found == 1) {
 			/* should never happen... */
 			/* should never happen... */
 			changes++;
 			changes++;
-			printk(KERN_WARNING "cciss%d: device "
-				"unexpectedly changed\n", ctlr);
+			dev_warn(&h->pdev->dev,
+				"device unexpectedly changed\n");
 			/* but if it does happen, we just ignore that device */
 			/* but if it does happen, we just ignore that device */
 		}
 		}
 	}
 	}
-	CPQ_TAPE_UNLOCK(ctlr, flags);
+	CPQ_TAPE_UNLOCK(h, flags);
 
 
 	/* Don't notify scsi mid layer of any changes the first time through */
 	/* Don't notify scsi mid layer of any changes the first time through */
 	/* (or if there are no changes) scsi_scan_host will do it later the */
 	/* (or if there are no changes) scsi_scan_host will do it later the */
@@ -645,9 +645,9 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
 			/* We don't expect to get here. */
 			/* We don't expect to get here. */
 			/* future cmds to this device will get selection */
 			/* future cmds to this device will get selection */
 			/* timeout as if the device was gone. */
 			/* timeout as if the device was gone. */
-			printk(KERN_WARNING "cciss%d: didn't find "
+			dev_warn(&h->pdev->dev, "didn't find "
 				"c%db%dt%dl%d\n for removal.",
 				"c%db%dt%dl%d\n for removal.",
-				ctlr, hostno, removed[i].bus,
+				hostno, removed[i].bus,
 				removed[i].target, removed[i].lun);
 				removed[i].target, removed[i].lun);
 		}
 		}
 	}
 	}
@@ -659,13 +659,12 @@ adjust_cciss_scsi_table(int ctlr, int hostno,
 			added[i].target, added[i].lun);
 			added[i].target, added[i].lun);
 		if (rc == 0)
 		if (rc == 0)
 			continue;
 			continue;
-		printk(KERN_WARNING "cciss%d: scsi_add_device "
+		dev_warn(&h->pdev->dev, "scsi_add_device "
 			"c%db%dt%dl%d failed, device not added.\n",
 			"c%db%dt%dl%d failed, device not added.\n",
-			ctlr, hostno,
-			added[i].bus, added[i].target, added[i].lun);
+			hostno, added[i].bus, added[i].target, added[i].lun);
 		/* now we have to remove it from ccissscsi, */
 		/* now we have to remove it from ccissscsi, */
 		/* since it didn't get added to scsi mid layer */
 		/* since it didn't get added to scsi mid layer */
-		fixup_botched_add(ctlr, added[i].scsi3addr);
+		fixup_botched_add(h, added[i].scsi3addr);
 	}
 	}
 
 
 free_and_out:
 free_and_out:
@@ -675,33 +674,33 @@ free_and_out:
 }
 }
 
 
 static int
 static int
-lookup_scsi3addr(int ctlr, int bus, int target, int lun, char *scsi3addr)
+lookup_scsi3addr(ctlr_info_t *h, int bus, int target, int lun, char *scsi3addr)
 {
 {
 	int i;
 	int i;
 	struct cciss_scsi_dev_t *sd;
 	struct cciss_scsi_dev_t *sd;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	CPQ_TAPE_LOCK(ctlr, flags);
-	for (i=0;i<ccissscsi[ctlr].ndevices;i++) {
-		sd = &ccissscsi[ctlr].dev[i];
+	CPQ_TAPE_LOCK(h, flags);
+	for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
+		sd = &ccissscsi[h->ctlr].dev[i];
 		if (sd->bus == bus &&
 		if (sd->bus == bus &&
 		    sd->target == target &&
 		    sd->target == target &&
 		    sd->lun == lun) {
 		    sd->lun == lun) {
 			memcpy(scsi3addr, &sd->scsi3addr[0], 8);
 			memcpy(scsi3addr, &sd->scsi3addr[0], 8);
-			CPQ_TAPE_UNLOCK(ctlr, flags);
+			CPQ_TAPE_UNLOCK(h, flags);
 			return 0;
 			return 0;
 		}
 		}
 	}
 	}
-	CPQ_TAPE_UNLOCK(ctlr, flags);
+	CPQ_TAPE_UNLOCK(h, flags);
 	return -1;
 	return -1;
 }
 }
 
 
 static void 
 static void 
-cciss_scsi_setup(int cntl_num)
+cciss_scsi_setup(ctlr_info_t *h)
 {
 {
 	struct cciss_scsi_adapter_data_t * shba;
 	struct cciss_scsi_adapter_data_t * shba;
 
 
-	ccissscsi[cntl_num].ndevices = 0;
+	ccissscsi[h->ctlr].ndevices = 0;
 	shba = (struct cciss_scsi_adapter_data_t *)
 	shba = (struct cciss_scsi_adapter_data_t *)
 		kmalloc(sizeof(*shba), GFP_KERNEL);	
 		kmalloc(sizeof(*shba), GFP_KERNEL);	
 	if (shba == NULL)
 	if (shba == NULL)
@@ -709,35 +708,35 @@ cciss_scsi_setup(int cntl_num)
 	shba->scsi_host = NULL;
 	shba->scsi_host = NULL;
 	spin_lock_init(&shba->lock);
 	spin_lock_init(&shba->lock);
 	shba->registered = 0;
 	shba->registered = 0;
-	if (scsi_cmd_stack_setup(cntl_num, shba) != 0) {
+	if (scsi_cmd_stack_setup(h, shba) != 0) {
 		kfree(shba);
 		kfree(shba);
 		shba = NULL;
 		shba = NULL;
 	}
 	}
-	hba[cntl_num]->scsi_ctlr = shba;
+	h->scsi_ctlr = shba;
 	return;
 	return;
 }
 }
 
 
-static void
-complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
+static void complete_scsi_command(CommandList_struct *c, int timeout,
+	__u32 tag)
 {
 {
 	struct scsi_cmnd *cmd;
 	struct scsi_cmnd *cmd;
-	ctlr_info_t *ctlr;
+	ctlr_info_t *h;
 	ErrorInfo_struct *ei;
 	ErrorInfo_struct *ei;
 
 
-	ei = cp->err_info;
+	ei = c->err_info;
 
 
 	/* First, see if it was a message rather than a command */
 	/* First, see if it was a message rather than a command */
-	if (cp->Request.Type.Type == TYPE_MSG)  {
-		cp->cmd_type = CMD_MSG_DONE;
+	if (c->Request.Type.Type == TYPE_MSG)  {
+		c->cmd_type = CMD_MSG_DONE;
 		return;
 		return;
 	}
 	}
 
 
-	cmd = (struct scsi_cmnd *) cp->scsi_cmd;	
-	ctlr = hba[cp->ctlr];
+	cmd = (struct scsi_cmnd *) c->scsi_cmd;
+	h = hba[c->ctlr];
 
 
 	scsi_dma_unmap(cmd);
 	scsi_dma_unmap(cmd);
-	if (cp->Header.SGTotal > ctlr->max_cmd_sgentries)
-		cciss_unmap_sg_chain_block(ctlr, cp);
+	if (c->Header.SGTotal > h->max_cmd_sgentries)
+		cciss_unmap_sg_chain_block(h, c);
 
 
 	cmd->result = (DID_OK << 16); 		/* host byte */
 	cmd->result = (DID_OK << 16); 		/* host byte */
 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
@@ -764,9 +763,8 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
                 		{
                 		{
 #if 0
 #if 0
                     			printk(KERN_WARNING "cciss: cmd %p "
                     			printk(KERN_WARNING "cciss: cmd %p "
-					"has SCSI Status = %x\n",
-                        			cp,  
-						ei->ScsiStatus); 
+						"has SCSI Status = %x\n",
+						c, ei->ScsiStatus);
 #endif
 #endif
 					cmd->result |= (ei->ScsiStatus << 1);
 					cmd->result |= (ei->ScsiStatus << 1);
                 		}
                 		}
@@ -786,13 +784,13 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
 			case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
 			case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
 			break;
 			break;
 			case CMD_DATA_OVERRUN:
 			case CMD_DATA_OVERRUN:
-				printk(KERN_WARNING "cciss: cp %p has"
+				dev_warn(&h->pdev->dev, "%p has"
 					" completed with data overrun "
 					" completed with data overrun "
-					"reported\n", cp);
+					"reported\n", c);
 			break;
 			break;
 			case CMD_INVALID: {
 			case CMD_INVALID: {
-				/* print_bytes(cp, sizeof(*cp), 1, 0);
-				print_cmd(cp); */
+				/* print_bytes(c, sizeof(*c), 1, 0);
+				print_cmd(c); */
      /* We get CMD_INVALID if you address a non-existent tape drive instead
      /* We get CMD_INVALID if you address a non-existent tape drive instead
 	of a selection timeout (no response).  You will see this if you yank 
 	of a selection timeout (no response).  You will see this if you yank 
 	out a tape drive, then try to access it. This is kind of a shame
 	out a tape drive, then try to access it. This is kind of a shame
@@ -802,54 +800,50 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
 				}
 				}
 			break;
 			break;
 			case CMD_PROTOCOL_ERR:
 			case CMD_PROTOCOL_ERR:
-                                printk(KERN_WARNING "cciss: cp %p has "
-					"protocol error \n", cp);
+				dev_warn(&h->pdev->dev,
+					"%p has protocol error\n", c);
                         break;
                         break;
 			case CMD_HARDWARE_ERR:
 			case CMD_HARDWARE_ERR:
 				cmd->result = DID_ERROR << 16;
 				cmd->result = DID_ERROR << 16;
-                                printk(KERN_WARNING "cciss: cp %p had " 
-                                        " hardware error\n", cp);
+				dev_warn(&h->pdev->dev,
+					"%p had hardware error\n", c);
                         break;
                         break;
 			case CMD_CONNECTION_LOST:
 			case CMD_CONNECTION_LOST:
 				cmd->result = DID_ERROR << 16;
 				cmd->result = DID_ERROR << 16;
-				printk(KERN_WARNING "cciss: cp %p had "
-					"connection lost\n", cp);
+				dev_warn(&h->pdev->dev,
+					"%p had connection lost\n", c);
 			break;
 			break;
 			case CMD_ABORTED:
 			case CMD_ABORTED:
 				cmd->result = DID_ABORT << 16;
 				cmd->result = DID_ABORT << 16;
-				printk(KERN_WARNING "cciss: cp %p was "
-					"aborted\n", cp);
+				dev_warn(&h->pdev->dev, "%p was aborted\n", c);
 			break;
 			break;
 			case CMD_ABORT_FAILED:
 			case CMD_ABORT_FAILED:
 				cmd->result = DID_ERROR << 16;
 				cmd->result = DID_ERROR << 16;
-				printk(KERN_WARNING "cciss: cp %p reports "
-					"abort failed\n", cp);
+				dev_warn(&h->pdev->dev,
+					"%p reports abort failed\n", c);
 			break;
 			break;
 			case CMD_UNSOLICITED_ABORT:
 			case CMD_UNSOLICITED_ABORT:
 				cmd->result = DID_ABORT << 16;
 				cmd->result = DID_ABORT << 16;
-				printk(KERN_WARNING "cciss: cp %p aborted "
-					"do to an unsolicited abort\n", cp);
+				dev_warn(&h->pdev->dev, "%p aborted do to an "
+					"unsolicited abort\n", c);
 			break;
 			break;
 			case CMD_TIMEOUT:
 			case CMD_TIMEOUT:
 				cmd->result = DID_TIME_OUT << 16;
 				cmd->result = DID_TIME_OUT << 16;
-				printk(KERN_WARNING "cciss: cp %p timedout\n",
-					cp);
+				dev_warn(&h->pdev->dev, "%p timedout\n", c);
 			break;
 			break;
 			default:
 			default:
 				cmd->result = DID_ERROR << 16;
 				cmd->result = DID_ERROR << 16;
-				printk(KERN_WARNING "cciss: cp %p returned "
-					"unknown status %x\n", cp, 
+				dev_warn(&h->pdev->dev,
+					"%p returned unknown status %x\n", c,
 						ei->CommandStatus); 
 						ei->CommandStatus); 
 		}
 		}
 	}
 	}
-	// printk("c:%p:c%db%dt%dl%d ", cmd, ctlr->ctlr, cmd->channel, 
-	//	cmd->target, cmd->lun);
 	cmd->scsi_done(cmd);
 	cmd->scsi_done(cmd);
-	scsi_cmd_free(ctlr, cp);
+	scsi_cmd_free(h, c);
 }
 }
 
 
 static int
 static int
-cciss_scsi_detect(int ctlr)
+cciss_scsi_detect(ctlr_info_t *h)
 {
 {
 	struct Scsi_Host *sh;
 	struct Scsi_Host *sh;
 	int error;
 	int error;
@@ -860,15 +854,15 @@ cciss_scsi_detect(int ctlr)
 	sh->io_port = 0;	// good enough?  FIXME, 
 	sh->io_port = 0;	// good enough?  FIXME, 
 	sh->n_io_port = 0;	// I don't think we use these two...
 	sh->n_io_port = 0;	// I don't think we use these two...
 	sh->this_id = SELF_SCSI_ID;  
 	sh->this_id = SELF_SCSI_ID;  
-	sh->sg_tablesize = hba[ctlr]->maxsgentries;
+	sh->sg_tablesize = h->maxsgentries;
 	sh->max_cmd_len = MAX_COMMAND_SIZE;
 	sh->max_cmd_len = MAX_COMMAND_SIZE;
 
 
 	((struct cciss_scsi_adapter_data_t *) 
 	((struct cciss_scsi_adapter_data_t *) 
-		hba[ctlr]->scsi_ctlr)->scsi_host = sh;
-	sh->hostdata[0] = (unsigned long) hba[ctlr];
-	sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT];
+		h->scsi_ctlr)->scsi_host = sh;
+	sh->hostdata[0] = (unsigned long) h;
+	sh->irq = h->intr[SIMPLE_MODE_INT];
 	sh->unique_id = sh->irq;
 	sh->unique_id = sh->irq;
-	error = scsi_add_host(sh, &hba[ctlr]->pdev->dev);
+	error = scsi_add_host(sh, &h->pdev->dev);
 	if (error)
 	if (error)
 		goto fail_host_put;
 		goto fail_host_put;
 	scsi_scan_host(sh);
 	scsi_scan_host(sh);
@@ -882,20 +876,20 @@ cciss_scsi_detect(int ctlr)
 
 
 static void
 static void
 cciss_unmap_one(struct pci_dev *pdev,
 cciss_unmap_one(struct pci_dev *pdev,
-		CommandList_struct *cp,
+		CommandList_struct *c,
 		size_t buflen,
 		size_t buflen,
 		int data_direction)
 		int data_direction)
 {
 {
 	u64bit addr64;
 	u64bit addr64;
 
 
-	addr64.val32.lower = cp->SG[0].Addr.lower;
-	addr64.val32.upper = cp->SG[0].Addr.upper;
+	addr64.val32.lower = c->SG[0].Addr.lower;
+	addr64.val32.upper = c->SG[0].Addr.upper;
 	pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction);
 	pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction);
 }
 }
 
 
 static void
 static void
 cciss_map_one(struct pci_dev *pdev,
 cciss_map_one(struct pci_dev *pdev,
-		CommandList_struct *cp,
+		CommandList_struct *c,
 		unsigned char *buf,
 		unsigned char *buf,
 		size_t buflen,
 		size_t buflen,
 		int data_direction)
 		int data_direction)
@@ -903,164 +897,149 @@ cciss_map_one(struct pci_dev *pdev,
 	__u64 addr64;
 	__u64 addr64;
 
 
 	addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
 	addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
-	cp->SG[0].Addr.lower = 
+	c->SG[0].Addr.lower =
 	  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
 	  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
-	cp->SG[0].Addr.upper =
+	c->SG[0].Addr.upper =
 	  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
 	  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
-	cp->SG[0].Len = buflen;
-	cp->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
-	cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
+	c->SG[0].Len = buflen;
+	c->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
+	c->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
 }
 }
 
 
 static int
 static int
-cciss_scsi_do_simple_cmd(ctlr_info_t *c,
-			CommandList_struct *cp,
+cciss_scsi_do_simple_cmd(ctlr_info_t *h,
+			CommandList_struct *c,
 			unsigned char *scsi3addr, 
 			unsigned char *scsi3addr, 
 			unsigned char *cdb,
 			unsigned char *cdb,
 			unsigned char cdblen,
 			unsigned char cdblen,
 			unsigned char *buf, int bufsize,
 			unsigned char *buf, int bufsize,
 			int direction)
 			int direction)
 {
 {
-	unsigned long flags;
 	DECLARE_COMPLETION_ONSTACK(wait);
 	DECLARE_COMPLETION_ONSTACK(wait);
 
 
-	cp->cmd_type = CMD_IOCTL_PEND;		// treat this like an ioctl 
-	cp->scsi_cmd = NULL;
-	cp->Header.ReplyQueue = 0;  // unused in simple mode
-	memcpy(&cp->Header.LUN, scsi3addr, sizeof(cp->Header.LUN));
-	cp->Header.Tag.lower = cp->busaddr;  // Use k. address of cmd as tag
+	c->cmd_type = CMD_IOCTL_PEND; /* treat this like an ioctl */
+	c->scsi_cmd = NULL;
+	c->Header.ReplyQueue = 0;  /* unused in simple mode */
+	memcpy(&c->Header.LUN, scsi3addr, sizeof(c->Header.LUN));
+	c->Header.Tag.lower = c->busaddr;  /* Use k. address of cmd as tag */
 	// Fill in the request block...
 	// Fill in the request block...
 
 
 	/* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n", 
 	/* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n", 
 		scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
 		scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
 		scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */
 		scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */
 
 
-	memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
-	memcpy(cp->Request.CDB, cdb, cdblen);
-	cp->Request.Timeout = 0;
-	cp->Request.CDBLen = cdblen;
-	cp->Request.Type.Type = TYPE_CMD;
-	cp->Request.Type.Attribute = ATTR_SIMPLE;
-	cp->Request.Type.Direction = direction;
+	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
+	memcpy(c->Request.CDB, cdb, cdblen);
+	c->Request.Timeout = 0;
+	c->Request.CDBLen = cdblen;
+	c->Request.Type.Type = TYPE_CMD;
+	c->Request.Type.Attribute = ATTR_SIMPLE;
+	c->Request.Type.Direction = direction;
 
 
 	/* Fill in the SG list and do dma mapping */
 	/* Fill in the SG list and do dma mapping */
-	cciss_map_one(c->pdev, cp, (unsigned char *) buf,
+	cciss_map_one(h->pdev, c, (unsigned char *) buf,
 			bufsize, DMA_FROM_DEVICE); 
 			bufsize, DMA_FROM_DEVICE); 
 
 
-	cp->waiting = &wait;
-
-	/* Put the request on the tail of the request queue */
-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
-	addQ(&c->reqQ, cp);
-	c->Qdepth++;
-	start_io(c);
-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
-
+	c->waiting = &wait;
+	enqueue_cmd_and_start_io(h, c);
 	wait_for_completion(&wait);
 	wait_for_completion(&wait);
 
 
 	/* undo the dma mapping */
 	/* undo the dma mapping */
-	cciss_unmap_one(c->pdev, cp, bufsize, DMA_FROM_DEVICE);
+	cciss_unmap_one(h->pdev, c, bufsize, DMA_FROM_DEVICE);
 	return(0);
 	return(0);
 }
 }
 
 
 static void 
 static void 
-cciss_scsi_interpret_error(CommandList_struct *cp)
+cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
 {
 {
 	ErrorInfo_struct *ei;
 	ErrorInfo_struct *ei;
 
 
-	ei = cp->err_info; 
+	ei = c->err_info;
 	switch(ei->CommandStatus)
 	switch(ei->CommandStatus)
 	{
 	{
 		case CMD_TARGET_STATUS:
 		case CMD_TARGET_STATUS:
-			printk(KERN_WARNING "cciss: cmd %p has "
-				"completed with errors\n", cp);
-			printk(KERN_WARNING "cciss: cmd %p "
-				"has SCSI Status = %x\n",
-					cp,  
-					ei->ScsiStatus);
+			dev_warn(&h->pdev->dev,
+				"cmd %p has completed with errors\n", c);
+			dev_warn(&h->pdev->dev,
+				"cmd %p has SCSI Status = %x\n",
+				c, ei->ScsiStatus);
 			if (ei->ScsiStatus == 0)
 			if (ei->ScsiStatus == 0)
-				printk(KERN_WARNING 
-				"cciss:SCSI status is abnormally zero.  "
+				dev_warn(&h->pdev->dev,
+				"SCSI status is abnormally zero.  "
 				"(probably indicates selection timeout "
 				"(probably indicates selection timeout "
 				"reported incorrectly due to a known "
 				"reported incorrectly due to a known "
 				"firmware bug, circa July, 2001.)\n");
 				"firmware bug, circa July, 2001.)\n");
 		break;
 		break;
 		case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
 		case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
-			printk("UNDERRUN\n");
+			dev_info(&h->pdev->dev, "UNDERRUN\n");
 		break;
 		break;
 		case CMD_DATA_OVERRUN:
 		case CMD_DATA_OVERRUN:
-			printk(KERN_WARNING "cciss: cp %p has"
+			dev_warn(&h->pdev->dev, "%p has"
 				" completed with data overrun "
 				" completed with data overrun "
-				"reported\n", cp);
+				"reported\n", c);
 		break;
 		break;
 		case CMD_INVALID: {
 		case CMD_INVALID: {
 			/* controller unfortunately reports SCSI passthru's */
 			/* controller unfortunately reports SCSI passthru's */
 			/* to non-existent targets as invalid commands. */
 			/* to non-existent targets as invalid commands. */
-			printk(KERN_WARNING "cciss: cp %p is "
-				"reported invalid (probably means "
-				"target device no longer present)\n", 
-				cp); 
-			/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
-			print_cmd(cp);  */
+			dev_warn(&h->pdev->dev,
+				"%p is reported invalid (probably means "
+				"target device no longer present)\n", c);
+			/* print_bytes((unsigned char *) c, sizeof(*c), 1, 0);
+			print_cmd(c);  */
 			}
 			}
 		break;
 		break;
 		case CMD_PROTOCOL_ERR:
 		case CMD_PROTOCOL_ERR:
-			printk(KERN_WARNING "cciss: cp %p has "
-				"protocol error \n", cp);
+			dev_warn(&h->pdev->dev, "%p has protocol error\n", c);
 		break;
 		break;
 		case CMD_HARDWARE_ERR:
 		case CMD_HARDWARE_ERR:
 			/* cmd->result = DID_ERROR << 16; */
 			/* cmd->result = DID_ERROR << 16; */
-			printk(KERN_WARNING "cciss: cp %p had " 
-				" hardware error\n", cp);
+			dev_warn(&h->pdev->dev, "%p had hardware error\n", c);
 		break;
 		break;
 		case CMD_CONNECTION_LOST:
 		case CMD_CONNECTION_LOST:
-			printk(KERN_WARNING "cciss: cp %p had "
-				"connection lost\n", cp);
+			dev_warn(&h->pdev->dev, "%p had connection lost\n", c);
 		break;
 		break;
 		case CMD_ABORTED:
 		case CMD_ABORTED:
-			printk(KERN_WARNING "cciss: cp %p was "
-				"aborted\n", cp);
+			dev_warn(&h->pdev->dev, "%p was aborted\n", c);
 		break;
 		break;
 		case CMD_ABORT_FAILED:
 		case CMD_ABORT_FAILED:
-			printk(KERN_WARNING "cciss: cp %p reports "
-				"abort failed\n", cp);
+			dev_warn(&h->pdev->dev,
+				"%p reports abort failed\n", c);
 		break;
 		break;
 		case CMD_UNSOLICITED_ABORT:
 		case CMD_UNSOLICITED_ABORT:
-			printk(KERN_WARNING "cciss: cp %p aborted "
-				"do to an unsolicited abort\n", cp);
+			dev_warn(&h->pdev->dev,
+				"%p aborted do to an unsolicited abort\n", c);
 		break;
 		break;
 		case CMD_TIMEOUT:
 		case CMD_TIMEOUT:
-			printk(KERN_WARNING "cciss: cp %p timedout\n",
-				cp);
+			dev_warn(&h->pdev->dev, "%p timedout\n", c);
 		break;
 		break;
 		default:
 		default:
-			printk(KERN_WARNING "cciss: cp %p returned "
-				"unknown status %x\n", cp, 
-					ei->CommandStatus); 
+			dev_warn(&h->pdev->dev,
+				"%p returned unknown status %x\n",
+				c, ei->CommandStatus);
 	}
 	}
 }
 }
 
 
 static int
 static int
-cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, 
+cciss_scsi_do_inquiry(ctlr_info_t *h, unsigned char *scsi3addr,
 	unsigned char page, unsigned char *buf,
 	unsigned char page, unsigned char *buf,
 	unsigned char bufsize)
 	unsigned char bufsize)
 {
 {
 	int rc;
 	int rc;
-	CommandList_struct *cp;
+	CommandList_struct *c;
 	char cdb[6];
 	char cdb[6];
 	ErrorInfo_struct *ei;
 	ErrorInfo_struct *ei;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
-	cp = scsi_cmd_alloc(c);
-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
+	c = scsi_cmd_alloc(h);
+	spin_unlock_irqrestore(&h->lock, flags);
 
 
-	if (cp == NULL) {			/* trouble... */
+	if (c == NULL) {			/* trouble... */
 		printk("cmd_alloc returned NULL!\n");
 		printk("cmd_alloc returned NULL!\n");
 		return -1;
 		return -1;
 	}
 	}
 
 
-	ei = cp->err_info; 
+	ei = c->err_info;
 
 
 	cdb[0] = CISS_INQUIRY;
 	cdb[0] = CISS_INQUIRY;
 	cdb[1] = (page != 0);
 	cdb[1] = (page != 0);
@@ -1068,24 +1047,24 @@ cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr,
 	cdb[3] = 0;
 	cdb[3] = 0;
 	cdb[4] = bufsize;
 	cdb[4] = bufsize;
 	cdb[5] = 0;
 	cdb[5] = 0;
-	rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb, 
+	rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, cdb,
 				6, buf, bufsize, XFER_READ);
 				6, buf, bufsize, XFER_READ);
 
 
 	if (rc != 0) return rc; /* something went wrong */
 	if (rc != 0) return rc; /* something went wrong */
 
 
 	if (ei->CommandStatus != 0 && 
 	if (ei->CommandStatus != 0 && 
 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
-		cciss_scsi_interpret_error(cp);
+		cciss_scsi_interpret_error(h, c);
 		rc = -1;
 		rc = -1;
 	}
 	}
-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
-	scsi_cmd_free(c, cp);
-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
+	scsi_cmd_free(h, c);
+	spin_unlock_irqrestore(&h->lock, flags);
 	return rc;	
 	return rc;	
 }
 }
 
 
 /* Get the device id from inquiry page 0x83 */
 /* Get the device id from inquiry page 0x83 */
-static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
+static int cciss_scsi_get_device_id(ctlr_info_t *h, unsigned char *scsi3addr,
 	unsigned char *device_id, int buflen)
 	unsigned char *device_id, int buflen)
 {
 {
 	int rc;
 	int rc;
@@ -1096,7 +1075,7 @@ static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
 	buf = kzalloc(64, GFP_KERNEL);
 	buf = kzalloc(64, GFP_KERNEL);
 	if (!buf)
 	if (!buf)
 		return -1;
 		return -1;
-	rc = cciss_scsi_do_inquiry(c, scsi3addr, 0x83, buf, 64);
+	rc = cciss_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
 	if (rc == 0)
 	if (rc == 0)
 		memcpy(device_id, &buf[8], buflen);
 		memcpy(device_id, &buf[8], buflen);
 	kfree(buf);
 	kfree(buf);
@@ -1104,20 +1083,20 @@ static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr,
 }
 }
 
 
 static int
 static int
-cciss_scsi_do_report_phys_luns(ctlr_info_t *c, 
+cciss_scsi_do_report_phys_luns(ctlr_info_t *h,
 		ReportLunData_struct *buf, int bufsize)
 		ReportLunData_struct *buf, int bufsize)
 {
 {
 	int rc;
 	int rc;
-	CommandList_struct *cp;
+	CommandList_struct *c;
 	unsigned char cdb[12];
 	unsigned char cdb[12];
 	unsigned char scsi3addr[8]; 
 	unsigned char scsi3addr[8]; 
 	ErrorInfo_struct *ei;
 	ErrorInfo_struct *ei;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
-	cp = scsi_cmd_alloc(c);
-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
-	if (cp == NULL) {			/* trouble... */
+	spin_lock_irqsave(&h->lock, flags);
+	c = scsi_cmd_alloc(h);
+	spin_unlock_irqrestore(&h->lock, flags);
+	if (c == NULL) {			/* trouble... */
 		printk("cmd_alloc returned NULL!\n");
 		printk("cmd_alloc returned NULL!\n");
 		return -1;
 		return -1;
 	}
 	}
@@ -1136,27 +1115,27 @@ cciss_scsi_do_report_phys_luns(ctlr_info_t *c,
 	cdb[10] = 0;
 	cdb[10] = 0;
 	cdb[11] = 0;
 	cdb[11] = 0;
 
 
-	rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, 
+	rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr,
 				cdb, 12, 
 				cdb, 12, 
 				(unsigned char *) buf, 
 				(unsigned char *) buf, 
 				bufsize, XFER_READ);
 				bufsize, XFER_READ);
 
 
 	if (rc != 0) return rc; /* something went wrong */
 	if (rc != 0) return rc; /* something went wrong */
 
 
-	ei = cp->err_info; 
+	ei = c->err_info;
 	if (ei->CommandStatus != 0 && 
 	if (ei->CommandStatus != 0 && 
 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
 	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
-		cciss_scsi_interpret_error(cp);
+		cciss_scsi_interpret_error(h, c);
 		rc = -1;
 		rc = -1;
 	}
 	}
-	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
-	scsi_cmd_free(c, cp);
-	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+	spin_lock_irqsave(&h->lock, flags);
+	scsi_cmd_free(h, c);
+	spin_unlock_irqrestore(&h->lock, flags);
 	return rc;	
 	return rc;	
 }
 }
 
 
 static void
 static void
-cciss_update_non_disk_devices(int cntl_num, int hostno)
+cciss_update_non_disk_devices(ctlr_info_t *h, int hostno)
 {
 {
 	/* the idea here is we could get notified from /proc
 	/* the idea here is we could get notified from /proc
 	   that some devices have changed, so we do a report 
 	   that some devices have changed, so we do a report 
@@ -1189,7 +1168,6 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
 	ReportLunData_struct *ld_buff;
 	ReportLunData_struct *ld_buff;
 	unsigned char *inq_buff;
 	unsigned char *inq_buff;
 	unsigned char scsi3addr[8];
 	unsigned char scsi3addr[8];
-	ctlr_info_t *c;
 	__u32 num_luns=0;
 	__u32 num_luns=0;
 	unsigned char *ch;
 	unsigned char *ch;
 	struct cciss_scsi_dev_t *currentsd, *this_device;
 	struct cciss_scsi_dev_t *currentsd, *this_device;
@@ -1197,7 +1175,6 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
 	int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
 	int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
 	int i;
 	int i;
 
 
-	c = (ctlr_info_t *) hba[cntl_num];	
 	ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
 	ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
 	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
 	inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
 	currentsd = kzalloc(sizeof(*currentsd) *
 	currentsd = kzalloc(sizeof(*currentsd) *
@@ -1207,7 +1184,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
 		goto out;
 		goto out;
 	}
 	}
 	this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
 	this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
-	if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
+	if (cciss_scsi_do_report_phys_luns(h, ld_buff, reportlunsize) == 0) {
 		ch = &ld_buff->LUNListLength[0];
 		ch = &ld_buff->LUNListLength[0];
 		num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
 		num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
 		if (num_luns > CISS_MAX_PHYS_LUN) {
 		if (num_luns > CISS_MAX_PHYS_LUN) {
@@ -1231,7 +1208,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
 		memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
 		memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
 		memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
 		memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
 
 
-		if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, 0, inq_buff,
+		if (cciss_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
 			(unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
 			(unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
 			/* Inquiry failed (msg printed already) */
 			/* Inquiry failed (msg printed already) */
 			continue; /* so we will skip this device. */
 			continue; /* so we will skip this device. */
@@ -1249,7 +1226,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
 			sizeof(this_device->revision));
 			sizeof(this_device->revision));
 		memset(this_device->device_id, 0,
 		memset(this_device->device_id, 0,
 			sizeof(this_device->device_id));
 			sizeof(this_device->device_id));
-		cciss_scsi_get_device_id(hba[cntl_num], scsi3addr,
+		cciss_scsi_get_device_id(h, scsi3addr,
 			this_device->device_id, sizeof(this_device->device_id));
 			this_device->device_id, sizeof(this_device->device_id));
 
 
 		switch (this_device->devtype)
 		switch (this_device->devtype)
@@ -1276,7 +1253,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
 		  case 0x08: /* medium changer */
 		  case 0x08: /* medium changer */
 			if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
 			if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
 				printk(KERN_INFO "cciss%d: %s ignored, "
 				printk(KERN_INFO "cciss%d: %s ignored, "
-					"too many devices.\n", cntl_num,
+					"too many devices.\n", h->ctlr,
 					scsi_device_type(this_device->devtype));
 					scsi_device_type(this_device->devtype));
 				break;
 				break;
 			}
 			}
@@ -1288,7 +1265,7 @@ cciss_update_non_disk_devices(int cntl_num, int hostno)
 		}
 		}
 	}
 	}
 
 
-	adjust_cciss_scsi_table(cntl_num, hostno, currentsd, ncurrent);
+	adjust_cciss_scsi_table(h, hostno, currentsd, ncurrent);
 out:
 out:
 	kfree(inq_buff);
 	kfree(inq_buff);
 	kfree(ld_buff);
 	kfree(ld_buff);
@@ -1307,12 +1284,12 @@ is_keyword(char *ptr, int len, char *verb)  // Thanks to ncr53c8xx.c
 }
 }
 
 
 static int
 static int
-cciss_scsi_user_command(int ctlr, int hostno, char *buffer, int length)
+cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length)
 {
 {
 	int arg_len;
 	int arg_len;
 
 
 	if ((arg_len = is_keyword(buffer, length, "rescan")) != 0)
 	if ((arg_len = is_keyword(buffer, length, "rescan")) != 0)
-		cciss_update_non_disk_devices(ctlr, hostno);
+		cciss_update_non_disk_devices(h, hostno);
 	else
 	else
 		return -EINVAL;
 		return -EINVAL;
 	return length;
 	return length;
@@ -1329,20 +1306,16 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
 {
 {
 
 
 	int buflen, datalen;
 	int buflen, datalen;
-	ctlr_info_t *ci;
+	ctlr_info_t *h;
 	int i;
 	int i;
-	int cntl_num;
-
 
 
-	ci = (ctlr_info_t *) sh->hostdata[0];
-	if (ci == NULL)  /* This really shouldn't ever happen. */
+	h = (ctlr_info_t *) sh->hostdata[0];
+	if (h == NULL)  /* This really shouldn't ever happen. */
 		return -EINVAL;
 		return -EINVAL;
 
 
-	cntl_num = ci->ctlr;	/* Get our index into the hba[] array */
-
 	if (func == 0) {	/* User is reading from /proc/scsi/ciss*?/?*  */
 	if (func == 0) {	/* User is reading from /proc/scsi/ciss*?/?*  */
 		buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n",
 		buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n",
-				cntl_num, sh->host_no);
+				h->ctlr, sh->host_no);
 
 
 		/* this information is needed by apps to know which cciss
 		/* this information is needed by apps to know which cciss
 		   device corresponds to which scsi host number without
 		   device corresponds to which scsi host number without
@@ -1352,8 +1325,9 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
 		   this info is for an app to be able to use to know how to
 		   this info is for an app to be able to use to know how to
 		   get them back in sync. */
 		   get them back in sync. */
 
 
-		for (i=0;i<ccissscsi[cntl_num].ndevices;i++) {
-			struct cciss_scsi_dev_t *sd = &ccissscsi[cntl_num].dev[i];
+		for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
+			struct cciss_scsi_dev_t *sd =
+				&ccissscsi[h->ctlr].dev[i];
 			buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d "
 			buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d "
 				"0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
 				"0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
 				sh->host_no, sd->bus, sd->target, sd->lun,
 				sh->host_no, sd->bus, sd->target, sd->lun,
@@ -1371,15 +1345,15 @@ cciss_scsi_proc_info(struct Scsi_Host *sh,
 			*start = buffer + offset;
 			*start = buffer + offset;
 		return(datalen);
 		return(datalen);
 	} else 	/* User is writing to /proc/scsi/cciss*?/?*  ... */
 	} else 	/* User is writing to /proc/scsi/cciss*?/?*  ... */
-		return cciss_scsi_user_command(cntl_num, sh->host_no,
+		return cciss_scsi_user_command(h, sh->host_no,
 			buffer, length);	
 			buffer, length);	
 } 
 } 
 
 
 /* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 
 /* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 
    dma mapping  and fills in the scatter gather entries of the 
    dma mapping  and fills in the scatter gather entries of the 
-   cciss command, cp. */
+   cciss command, c. */
 
 
-static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp,
+static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
 	struct scsi_cmnd *cmd)
 	struct scsi_cmnd *cmd)
 {
 {
 	unsigned int len;
 	unsigned int len;
@@ -1393,7 +1367,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp,
 
 
 	chained = 0;
 	chained = 0;
 	sg_index = 0;
 	sg_index = 0;
-	curr_sg = cp->SG;
+	curr_sg = c->SG;
 	request_nsgs = scsi_dma_map(cmd);
 	request_nsgs = scsi_dma_map(cmd);
 	if (request_nsgs) {
 	if (request_nsgs) {
 		scsi_for_each_sg(cmd, sg, request_nsgs, i) {
 		scsi_for_each_sg(cmd, sg, request_nsgs, i) {
@@ -1401,7 +1375,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp,
 				!chained && request_nsgs - i > 1) {
 				!chained && request_nsgs - i > 1) {
 				chained = 1;
 				chained = 1;
 				sg_index = 0;
 				sg_index = 0;
-				curr_sg = sa->cmd_sg_list[cp->cmdindex];
+				curr_sg = sa->cmd_sg_list[c->cmdindex];
 			}
 			}
 			addr64 = (__u64) sg_dma_address(sg);
 			addr64 = (__u64) sg_dma_address(sg);
 			len  = sg_dma_len(sg);
 			len  = sg_dma_len(sg);
@@ -1414,19 +1388,19 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp,
 			++sg_index;
 			++sg_index;
 		}
 		}
 		if (chained)
 		if (chained)
-			cciss_map_sg_chain_block(h, cp,
-				sa->cmd_sg_list[cp->cmdindex],
+			cciss_map_sg_chain_block(h, c,
+				sa->cmd_sg_list[c->cmdindex],
 				(request_nsgs - (h->max_cmd_sgentries - 1)) *
 				(request_nsgs - (h->max_cmd_sgentries - 1)) *
 					sizeof(SGDescriptor_struct));
 					sizeof(SGDescriptor_struct));
 	}
 	}
 	/* track how many SG entries we are using */
 	/* track how many SG entries we are using */
 	if (request_nsgs > h->maxSG)
 	if (request_nsgs > h->maxSG)
 		h->maxSG = request_nsgs;
 		h->maxSG = request_nsgs;
-	cp->Header.SGTotal = (__u8) request_nsgs + chained;
+	c->Header.SGTotal = (__u8) request_nsgs + chained;
 	if (request_nsgs > h->max_cmd_sgentries)
 	if (request_nsgs > h->max_cmd_sgentries)
-		cp->Header.SGList = h->max_cmd_sgentries;
+		c->Header.SGList = h->max_cmd_sgentries;
 	else
 	else
-		cp->Header.SGList = cp->Header.SGTotal;
+		c->Header.SGList = c->Header.SGTotal;
 	return;
 	return;
 }
 }
 
 
@@ -1434,18 +1408,17 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp,
 static int
 static int
 cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
 cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
 {
 {
-	ctlr_info_t *c;
-	int ctlr, rc;
+	ctlr_info_t *h;
+	int rc;
 	unsigned char scsi3addr[8];
 	unsigned char scsi3addr[8];
-	CommandList_struct *cp;
+	CommandList_struct *c;
 	unsigned long flags;
 	unsigned long flags;
 
 
 	// Get the ptr to our adapter structure (hba[i]) out of cmd->host.
 	// Get the ptr to our adapter structure (hba[i]) out of cmd->host.
 	// We violate cmd->host privacy here.  (Is there another way?)
 	// We violate cmd->host privacy here.  (Is there another way?)
-	c = (ctlr_info_t *) cmd->device->host->hostdata[0];
-	ctlr = c->ctlr;
+	h = (ctlr_info_t *) cmd->device->host->hostdata[0];
 
 
-	rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id, 
+	rc = lookup_scsi3addr(h, cmd->device->channel, cmd->device->id,
 			cmd->device->lun, scsi3addr);
 			cmd->device->lun, scsi3addr);
 	if (rc != 0) {
 	if (rc != 0) {
 		/* the scsi nexus does not match any that we presented... */
 		/* the scsi nexus does not match any that we presented... */
@@ -1457,19 +1430,14 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
 		return 0;
 		return 0;
 	}
 	}
 
 
-	/* printk("cciss_queue_command, p=%p, cmd=0x%02x, c%db%dt%dl%d\n", 
-		cmd, cmd->cmnd[0], ctlr, cmd->channel, cmd->target, cmd->lun);*/
-	// printk("q:%p:c%db%dt%dl%d ", cmd, ctlr, cmd->channel, 
-	//	cmd->target, cmd->lun);
-
 	/* Ok, we have a reasonable scsi nexus, so send the cmd down, and
 	/* Ok, we have a reasonable scsi nexus, so send the cmd down, and
            see what the device thinks of it. */
            see what the device thinks of it. */
 
 
-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-	cp = scsi_cmd_alloc(c);
-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-	if (cp == NULL) {			/* trouble... */
-		printk("scsi_cmd_alloc returned NULL!\n");
+	spin_lock_irqsave(&h->lock, flags);
+	c = scsi_cmd_alloc(h);
+	spin_unlock_irqrestore(&h->lock, flags);
+	if (c == NULL) {			/* trouble... */
+		dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n");
 		/* FIXME: next 3 lines are -> BAD! <- */
 		/* FIXME: next 3 lines are -> BAD! <- */
 		cmd->result = DID_NO_CONNECT << 16;
 		cmd->result = DID_NO_CONNECT << 16;
 		done(cmd);
 		done(cmd);
@@ -1480,35 +1448,41 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
 
 
 	cmd->scsi_done = done;    // save this for use by completion code 
 	cmd->scsi_done = done;    // save this for use by completion code 
 
 
-	// save cp in case we have to abort it 
-	cmd->host_scribble = (unsigned char *) cp; 
+	/* save c in case we have to abort it */
+	cmd->host_scribble = (unsigned char *) c;
 
 
-	cp->cmd_type = CMD_SCSI;
-	cp->scsi_cmd = cmd;
-	cp->Header.ReplyQueue = 0;  // unused in simple mode
-	memcpy(&cp->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
-	cp->Header.Tag.lower = cp->busaddr;  // Use k. address of cmd as tag
+	c->cmd_type = CMD_SCSI;
+	c->scsi_cmd = cmd;
+	c->Header.ReplyQueue = 0;  /* unused in simple mode */
+	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+	c->Header.Tag.lower = c->busaddr;  /* Use k. address of cmd as tag */
 	
 	
 	// Fill in the request block...
 	// Fill in the request block...
 
 
-	cp->Request.Timeout = 0;
-	memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
-	BUG_ON(cmd->cmd_len > sizeof(cp->Request.CDB));
-	cp->Request.CDBLen = cmd->cmd_len;
-	memcpy(cp->Request.CDB, cmd->cmnd, cmd->cmd_len);
-	cp->Request.Type.Type = TYPE_CMD;
-	cp->Request.Type.Attribute = ATTR_SIMPLE;
+	c->Request.Timeout = 0;
+	memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
+	BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
+	c->Request.CDBLen = cmd->cmd_len;
+	memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
+	c->Request.Type.Type = TYPE_CMD;
+	c->Request.Type.Attribute = ATTR_SIMPLE;
 	switch(cmd->sc_data_direction)
 	switch(cmd->sc_data_direction)
 	{
 	{
-	  case DMA_TO_DEVICE: cp->Request.Type.Direction = XFER_WRITE; break;
-	  case DMA_FROM_DEVICE: cp->Request.Type.Direction = XFER_READ; break;
-	  case DMA_NONE: cp->Request.Type.Direction = XFER_NONE; break;
+	  case DMA_TO_DEVICE:
+		c->Request.Type.Direction = XFER_WRITE;
+		break;
+	  case DMA_FROM_DEVICE:
+		c->Request.Type.Direction = XFER_READ;
+		break;
+	  case DMA_NONE:
+		c->Request.Type.Direction = XFER_NONE;
+		break;
 	  case DMA_BIDIRECTIONAL:
 	  case DMA_BIDIRECTIONAL:
 		// This can happen if a buggy application does a scsi passthru
 		// This can happen if a buggy application does a scsi passthru
 		// and sets both inlen and outlen to non-zero. ( see
 		// and sets both inlen and outlen to non-zero. ( see
 		// ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
 		// ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
 
 
-	  	cp->Request.Type.Direction = XFER_RSVD;
+		c->Request.Type.Direction = XFER_RSVD;
 		// This is technically wrong, and cciss controllers should
 		// This is technically wrong, and cciss controllers should
 		// reject it with CMD_INVALID, which is the most correct 
 		// reject it with CMD_INVALID, which is the most correct 
 		// response, but non-fibre backends appear to let it 
 		// response, but non-fibre backends appear to let it 
@@ -1519,27 +1493,18 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd
 		break;
 		break;
 
 
 	  default: 
 	  default: 
-		printk("cciss: unknown data direction: %d\n", 
+		dev_warn(&h->pdev->dev, "unknown data direction: %d\n",
 			cmd->sc_data_direction);
 			cmd->sc_data_direction);
 		BUG();
 		BUG();
 		break;
 		break;
 	}
 	}
-	cciss_scatter_gather(c, cp, cmd);
-
-	/* Put the request on the tail of the request queue */
-
-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-	addQ(&c->reqQ, cp);
-	c->Qdepth++;
-	start_io(c);
-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-
+	cciss_scatter_gather(h, c, cmd);
+	enqueue_cmd_and_start_io(h, c);
 	/* the cmd'll come back via intr handler in complete_scsi_command()  */
 	/* the cmd'll come back via intr handler in complete_scsi_command()  */
 	return 0;
 	return 0;
 }
 }
 
 
-static void 
-cciss_unregister_scsi(int ctlr)
+static void cciss_unregister_scsi(ctlr_info_t *h)
 {
 {
 	struct cciss_scsi_adapter_data_t *sa;
 	struct cciss_scsi_adapter_data_t *sa;
 	struct cciss_scsi_cmd_stack_t *stk;
 	struct cciss_scsi_cmd_stack_t *stk;
@@ -1547,59 +1512,58 @@ cciss_unregister_scsi(int ctlr)
 
 
 	/* we are being forcibly unloaded, and may not refuse. */
 	/* we are being forcibly unloaded, and may not refuse. */
 
 
-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-	sa = hba[ctlr]->scsi_ctlr;
+	spin_lock_irqsave(&h->lock, flags);
+	sa = h->scsi_ctlr;
 	stk = &sa->cmd_stack; 
 	stk = &sa->cmd_stack; 
 
 
 	/* if we weren't ever actually registered, don't unregister */ 
 	/* if we weren't ever actually registered, don't unregister */ 
 	if (sa->registered) {
 	if (sa->registered) {
-		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+		spin_unlock_irqrestore(&h->lock, flags);
 		scsi_remove_host(sa->scsi_host);
 		scsi_remove_host(sa->scsi_host);
 		scsi_host_put(sa->scsi_host);
 		scsi_host_put(sa->scsi_host);
-		spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+		spin_lock_irqsave(&h->lock, flags);
 	}
 	}
 
 
 	/* set scsi_host to NULL so our detect routine will 
 	/* set scsi_host to NULL so our detect routine will 
 	   find us on register */
 	   find us on register */
 	sa->scsi_host = NULL;
 	sa->scsi_host = NULL;
-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-	scsi_cmd_stack_free(ctlr);
+	spin_unlock_irqrestore(&h->lock, flags);
+	scsi_cmd_stack_free(h);
 	kfree(sa);
 	kfree(sa);
 }
 }
 
 
-static int 
-cciss_engage_scsi(int ctlr)
+static int cciss_engage_scsi(ctlr_info_t *h)
 {
 {
 	struct cciss_scsi_adapter_data_t *sa;
 	struct cciss_scsi_adapter_data_t *sa;
 	struct cciss_scsi_cmd_stack_t *stk;
 	struct cciss_scsi_cmd_stack_t *stk;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
-	sa = hba[ctlr]->scsi_ctlr;
+	spin_lock_irqsave(&h->lock, flags);
+	sa = h->scsi_ctlr;
 	stk = &sa->cmd_stack; 
 	stk = &sa->cmd_stack; 
 
 
 	if (sa->registered) {
 	if (sa->registered) {
-		printk("cciss%d: SCSI subsystem already engaged.\n", ctlr);
-		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+		dev_info(&h->pdev->dev, "SCSI subsystem already engaged.\n");
+		spin_unlock_irqrestore(&h->lock, flags);
 		return -ENXIO;
 		return -ENXIO;
 	}
 	}
 	sa->registered = 1;
 	sa->registered = 1;
-	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
-	cciss_update_non_disk_devices(ctlr, -1);
-	cciss_scsi_detect(ctlr);
+	spin_unlock_irqrestore(&h->lock, flags);
+	cciss_update_non_disk_devices(h, -1);
+	cciss_scsi_detect(h);
 	return 0;
 	return 0;
 }
 }
 
 
 static void
 static void
-cciss_seq_tape_report(struct seq_file *seq, int ctlr)
+cciss_seq_tape_report(struct seq_file *seq, ctlr_info_t *h)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
-	CPQ_TAPE_LOCK(ctlr, flags);
+	CPQ_TAPE_LOCK(h, flags);
 	seq_printf(seq,
 	seq_printf(seq,
 		"Sequential access devices: %d\n\n",
 		"Sequential access devices: %d\n\n",
-			ccissscsi[ctlr].ndevices);
-	CPQ_TAPE_UNLOCK(ctlr, flags);
+			ccissscsi[h->ctlr].ndevices);
+	CPQ_TAPE_UNLOCK(h, flags);
 }
 }
 
 
 static int wait_for_device_to_become_ready(ctlr_info_t *h,
 static int wait_for_device_to_become_ready(ctlr_info_t *h,
@@ -1610,10 +1574,10 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h,
 	int waittime = HZ;
 	int waittime = HZ;
 	CommandList_struct *c;
 	CommandList_struct *c;
 
 
-	c = cmd_alloc(h, 1);
+	c = cmd_alloc(h);
 	if (!c) {
 	if (!c) {
-		printk(KERN_WARNING "cciss%d: out of memory in "
-			"wait_for_device_to_become_ready.\n", h->ctlr);
+		dev_warn(&h->pdev->dev, "out of memory in "
+			"wait_for_device_to_become_ready.\n");
 		return IO_ERROR;
 		return IO_ERROR;
 	}
 	}
 
 
@@ -1631,7 +1595,7 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h,
 			waittime = waittime * 2;
 			waittime = waittime * 2;
 
 
 		/* Send the Test Unit Ready */
 		/* Send the Test Unit Ready */
-		rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0,
+		rc = fill_cmd(h, c, TEST_UNIT_READY, NULL, 0, 0,
 			lunaddr, TYPE_CMD);
 			lunaddr, TYPE_CMD);
 		if (rc == 0)
 		if (rc == 0)
 			rc = sendcmd_withirq_core(h, c, 0);
 			rc = sendcmd_withirq_core(h, c, 0);
@@ -1657,18 +1621,18 @@ static int wait_for_device_to_become_ready(ctlr_info_t *h,
 			}
 			}
 		}
 		}
 retry_tur:
 retry_tur:
-		printk(KERN_WARNING "cciss%d: Waiting %d secs "
+		dev_warn(&h->pdev->dev, "Waiting %d secs "
 			"for device to become ready.\n",
 			"for device to become ready.\n",
-			h->ctlr, waittime / HZ);
+			waittime / HZ);
 		rc = 1; /* device not ready. */
 		rc = 1; /* device not ready. */
 	}
 	}
 
 
 	if (rc)
 	if (rc)
-		printk("cciss%d: giving up on device.\n", h->ctlr);
+		dev_warn(&h->pdev->dev, "giving up on device.\n");
 	else
 	else
-		printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr);
+		dev_warn(&h->pdev->dev, "device is ready.\n");
 
 
-	cmd_free(h, c, 1);
+	cmd_free(h, c);
 	return rc;
 	return rc;
 }
 }
 
 
@@ -1688,26 +1652,24 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
 	int rc;
 	int rc;
 	CommandList_struct *cmd_in_trouble;
 	CommandList_struct *cmd_in_trouble;
 	unsigned char lunaddr[8];
 	unsigned char lunaddr[8];
-	ctlr_info_t *c;
-	int ctlr;
+	ctlr_info_t *h;
 
 
 	/* find the controller to which the command to be aborted was sent */
 	/* find the controller to which the command to be aborted was sent */
-	c = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
-	if (c == NULL) /* paranoia */
+	h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
+	if (h == NULL) /* paranoia */
 		return FAILED;
 		return FAILED;
-	ctlr = c->ctlr;
-	printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr);
+	dev_warn(&h->pdev->dev, "resetting tape drive or medium changer.\n");
 	/* find the command that's giving us trouble */
 	/* find the command that's giving us trouble */
 	cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
 	cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
 	if (cmd_in_trouble == NULL) /* paranoia */
 	if (cmd_in_trouble == NULL) /* paranoia */
 		return FAILED;
 		return FAILED;
 	memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
 	memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
 	/* send a reset to the SCSI LUN which the command was sent to */
 	/* send a reset to the SCSI LUN which the command was sent to */
-	rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr,
+	rc = sendcmd_withirq(h, CCISS_RESET_MSG, NULL, 0, 0, lunaddr,
 		TYPE_MSG);
 		TYPE_MSG);
-	if (rc == 0 && wait_for_device_to_become_ready(c, lunaddr) == 0)
+	if (rc == 0 && wait_for_device_to_become_ready(h, lunaddr) == 0)
 		return SUCCESS;
 		return SUCCESS;
-	printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr);
+	dev_warn(&h->pdev->dev, "resetting device failed.\n");
 	return FAILED;
 	return FAILED;
 }
 }
 
 
@@ -1716,22 +1678,20 @@ static int  cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
 	int rc;
 	int rc;
 	CommandList_struct *cmd_to_abort;
 	CommandList_struct *cmd_to_abort;
 	unsigned char lunaddr[8];
 	unsigned char lunaddr[8];
-	ctlr_info_t *c;
-	int ctlr;
+	ctlr_info_t *h;
 
 
 	/* find the controller to which the command to be aborted was sent */
 	/* find the controller to which the command to be aborted was sent */
-	c = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
-	if (c == NULL) /* paranoia */
+	h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
+	if (h == NULL) /* paranoia */
 		return FAILED;
 		return FAILED;
-	ctlr = c->ctlr;
-	printk(KERN_WARNING "cciss%d: aborting tardy SCSI cmd\n", ctlr);
+	dev_warn(&h->pdev->dev, "aborting tardy SCSI cmd\n");
 
 
 	/* find the command to be aborted */
 	/* find the command to be aborted */
 	cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
 	cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
 	if (cmd_to_abort == NULL) /* paranoia */
 	if (cmd_to_abort == NULL) /* paranoia */
 		return FAILED;
 		return FAILED;
 	memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
 	memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
-	rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
+	rc = sendcmd_withirq(h, CCISS_ABORT_MSG, &cmd_to_abort->Header.Tag,
 		0, 0, lunaddr, TYPE_MSG);
 		0, 0, lunaddr, TYPE_MSG);
 	if (rc == 0)
 	if (rc == 0)
 		return SUCCESS;
 		return SUCCESS;

+ 49 - 29
drivers/block/cpqarray.c

@@ -35,6 +35,7 @@
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/hdreg.h>
 #include <linux/hdreg.h>
+#include <linux/smp_lock.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
 #include <linux/genhd.h>
 #include <linux/genhd.h>
@@ -157,7 +158,7 @@ static int sendcmd(
 	unsigned int blkcnt,
 	unsigned int blkcnt,
 	unsigned int log_unit );
 	unsigned int log_unit );
 
 
-static int ida_open(struct block_device *bdev, fmode_t mode);
+static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
 static int ida_release(struct gendisk *disk, fmode_t mode);
 static int ida_release(struct gendisk *disk, fmode_t mode);
 static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
 static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -195,9 +196,9 @@ static inline ctlr_info_t *get_host(struct gendisk *disk)
 
 
 static const struct block_device_operations ida_fops  = {
 static const struct block_device_operations ida_fops  = {
 	.owner		= THIS_MODULE,
 	.owner		= THIS_MODULE,
-	.open		= ida_open,
+	.open		= ida_unlocked_open,
 	.release	= ida_release,
 	.release	= ida_release,
-	.locked_ioctl	= ida_ioctl,
+	.ioctl		= ida_ioctl,
 	.getgeo		= ida_getgeo,
 	.getgeo		= ida_getgeo,
 	.revalidate_disk= ida_revalidate,
 	.revalidate_disk= ida_revalidate,
 };
 };
@@ -840,13 +841,29 @@ static int ida_open(struct block_device *bdev, fmode_t mode)
 	return 0;
 	return 0;
 }
 }
 
 
+static int ida_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+	int ret;
+
+	lock_kernel();
+	ret = ida_open(bdev, mode);
+	unlock_kernel();
+
+	return ret;
+}
+
 /*
 /*
  * Close.  Sync first.
  * Close.  Sync first.
  */
  */
 static int ida_release(struct gendisk *disk, fmode_t mode)
 static int ida_release(struct gendisk *disk, fmode_t mode)
 {
 {
-	ctlr_info_t *host = get_host(disk);
+	ctlr_info_t *host;
+
+	lock_kernel();
+	host = get_host(disk);
 	host->usage_count--;
 	host->usage_count--;
+	unlock_kernel();
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1128,7 +1145,7 @@ static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  *  ida_ioctl does some miscellaneous stuff like reporting drive geometry,
  *  ida_ioctl does some miscellaneous stuff like reporting drive geometry,
  *  setting readahead and submitting commands from userspace to the controller.
  *  setting readahead and submitting commands from userspace to the controller.
  */
  */
-static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
+static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
 {
 {
 	drv_info_t *drv = get_drv(bdev->bd_disk);
 	drv_info_t *drv = get_drv(bdev->bd_disk);
 	ctlr_info_t *host = get_host(bdev->bd_disk);
 	ctlr_info_t *host = get_host(bdev->bd_disk);
@@ -1162,7 +1179,8 @@ out_passthru:
 		return error;
 		return error;
 	case IDAGETCTLRSIG:
 	case IDAGETCTLRSIG:
 		if (!arg) return -EINVAL;
 		if (!arg) return -EINVAL;
-		put_user(host->ctlr_sig, (int __user *)arg);
+		if (put_user(host->ctlr_sig, (int __user *)arg))
+			return -EFAULT;
 		return 0;
 		return 0;
 	case IDAREVALIDATEVOLS:
 	case IDAREVALIDATEVOLS:
 		if (MINOR(bdev->bd_dev) != 0)
 		if (MINOR(bdev->bd_dev) != 0)
@@ -1170,7 +1188,8 @@ out_passthru:
 		return revalidate_allvol(host);
 		return revalidate_allvol(host);
 	case IDADRIVERVERSION:
 	case IDADRIVERVERSION:
 		if (!arg) return -EINVAL;
 		if (!arg) return -EINVAL;
-		put_user(DRIVER_VERSION, (unsigned long __user *)arg);
+		if (put_user(DRIVER_VERSION, (unsigned long __user *)arg))
+			return -EFAULT;
 		return 0;
 		return 0;
 	case IDAGETPCIINFO:
 	case IDAGETPCIINFO:
 	{
 	{
@@ -1192,6 +1211,19 @@ out_passthru:
 	}
 	}
 		
 		
 }
 }
+
+static int ida_ioctl(struct block_device *bdev, fmode_t mode,
+			     unsigned int cmd, unsigned long param)
+{
+	int ret;
+
+	lock_kernel();
+	ret = ida_locked_ioctl(bdev, mode, cmd, param);
+	unlock_kernel();
+
+	return ret;
+}
+
 /*
 /*
  * ida_ctlr_ioctl is for passing commands to the controller from userspace.
  * ida_ctlr_ioctl is for passing commands to the controller from userspace.
  * The command block (io) has already been copied to kernel space for us,
  * The command block (io) has already been copied to kernel space for us,
@@ -1225,17 +1257,11 @@ static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
 	/* Pre submit processing */
 	/* Pre submit processing */
 	switch(io->cmd) {
 	switch(io->cmd) {
 	case PASSTHRU_A:
 	case PASSTHRU_A:
-		p = kmalloc(io->sg[0].size, GFP_KERNEL);
-		if (!p) 
-		{ 
-			error = -ENOMEM; 
-			cmd_free(h, c, 0); 
-			return(error);
-		}
-		if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
-			kfree(p);
-			cmd_free(h, c, 0); 
-			return -EFAULT;
+		p = memdup_user(io->sg[0].addr, io->sg[0].size);
+		if (IS_ERR(p)) {
+			error = PTR_ERR(p);
+			cmd_free(h, c, 0);
+			return error;
 		}
 		}
 		c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), 
 		c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), 
 				sizeof(ida_ioctl_t), 
 				sizeof(ida_ioctl_t), 
@@ -1266,18 +1292,12 @@ static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
 	case DIAG_PASS_THRU:
 	case DIAG_PASS_THRU:
 	case COLLECT_BUFFER:
 	case COLLECT_BUFFER:
 	case WRITE_FLASH_ROM:
 	case WRITE_FLASH_ROM:
-		p = kmalloc(io->sg[0].size, GFP_KERNEL);
-		if (!p) 
- 		{ 
-                        error = -ENOMEM; 
-                        cmd_free(h, c, 0);
-                        return(error);
+		p = memdup_user(io->sg[0].addr, io->sg[0].size);
+		if (IS_ERR(p)) {
+			error = PTR_ERR(p);
+			cmd_free(h, c, 0);
+			return error;
                 }
                 }
-		if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
-			kfree(p);
-                        cmd_free(h, c, 0);
-			return -EFAULT;
-		}
 		c->req.sg[0].size = io->sg[0].size;
 		c->req.sg[0].size = io->sg[0].size;
 		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
 		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
 			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 
 			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 

+ 4 - 4
drivers/block/drbd/drbd_actlog.c

@@ -79,8 +79,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
 	md_io.error = 0;
 	md_io.error = 0;
 
 
 	if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
 	if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
-		rw |= (1 << BIO_RW_BARRIER);
-	rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO));
+		rw |= REQ_HARDBARRIER;
+	rw |= REQ_UNPLUG | REQ_SYNC;
 
 
  retry:
  retry:
 	bio = bio_alloc(GFP_NOIO, 1);
 	bio = bio_alloc(GFP_NOIO, 1);
@@ -103,11 +103,11 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
 	/* check for unsupported barrier op.
 	/* check for unsupported barrier op.
 	 * would rather check on EOPNOTSUPP, but that is not reliable.
 	 * would rather check on EOPNOTSUPP, but that is not reliable.
 	 * don't try again for ANY return value != 0 */
 	 * don't try again for ANY return value != 0 */
-	if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) {
+	if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
 		/* Try again with no barrier */
 		/* Try again with no barrier */
 		dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
 		dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
 		set_bit(MD_NO_BARRIER, &mdev->flags);
 		set_bit(MD_NO_BARRIER, &mdev->flags);
-		rw &= ~(1 << BIO_RW_BARRIER);
+		rw &= ~REQ_HARDBARRIER;
 		bio_put(bio);
 		bio_put(bio);
 		goto retry;
 		goto retry;
 	}
 	}

+ 1 - 15
drivers/block/drbd/drbd_int.h

@@ -550,12 +550,6 @@ struct p_delay_probe {
 	u32	offset;	 /* usecs the probe got sent after the reference time point */
 	u32	offset;	 /* usecs the probe got sent after the reference time point */
 } __packed;
 } __packed;
 
 
-struct delay_probe {
-	struct list_head list;
-	unsigned int seq_num;
-	struct timeval time;
-};
-
 /* DCBP: Drbd Compressed Bitmap Packet ... */
 /* DCBP: Drbd Compressed Bitmap Packet ... */
 static inline enum drbd_bitmap_code
 static inline enum drbd_bitmap_code
 DCBP_get_code(struct p_compressed_bm *p)
 DCBP_get_code(struct p_compressed_bm *p)
@@ -942,11 +936,9 @@ struct drbd_conf {
 	unsigned int ko_count;
 	unsigned int ko_count;
 	struct drbd_work  resync_work,
 	struct drbd_work  resync_work,
 			  unplug_work,
 			  unplug_work,
-			  md_sync_work,
-			  delay_probe_work;
+			  md_sync_work;
 	struct timer_list resync_timer;
 	struct timer_list resync_timer;
 	struct timer_list md_sync_timer;
 	struct timer_list md_sync_timer;
-	struct timer_list delay_probe_timer;
 
 
 	/* Used after attach while negotiating new disk state. */
 	/* Used after attach while negotiating new disk state. */
 	union drbd_state new_state_tmp;
 	union drbd_state new_state_tmp;
@@ -1062,12 +1054,6 @@ struct drbd_conf {
 	u64 ed_uuid; /* UUID of the exposed data */
 	u64 ed_uuid; /* UUID of the exposed data */
 	struct mutex state_mutex;
 	struct mutex state_mutex;
 	char congestion_reason;  /* Why we where congested... */
 	char congestion_reason;  /* Why we where congested... */
-	struct list_head delay_probes; /* protected by peer_seq_lock */
-	int data_delay;   /* Delay of packets on the data-sock behind meta-sock */
-	unsigned int delay_seq; /* To generate sequence numbers of delay probes */
-	struct timeval dps_time; /* delay-probes-start-time */
-	unsigned int dp_volume_last;  /* send_cnt of last delay probe */
-	int c_sync_rate; /* current resync rate after delay_probe magic */
 };
 };
 
 
 static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
 static inline struct drbd_conf *minor_to_mdev(unsigned int minor)

+ 21 - 81
drivers/block/drbd/drbd_main.c

@@ -2184,43 +2184,6 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
 	return ok;
 	return ok;
 }
 }
 
 
-static int drbd_send_delay_probe(struct drbd_conf *mdev, struct drbd_socket *ds)
-{
-	struct p_delay_probe dp;
-	int offset, ok = 0;
-	struct timeval now;
-
-	mutex_lock(&ds->mutex);
-	if (likely(ds->socket)) {
-		do_gettimeofday(&now);
-		offset = now.tv_usec - mdev->dps_time.tv_usec +
-			 (now.tv_sec - mdev->dps_time.tv_sec) * 1000000;
-		dp.seq_num  = cpu_to_be32(mdev->delay_seq);
-		dp.offset   = cpu_to_be32(offset);
-
-		ok = _drbd_send_cmd(mdev, ds->socket, P_DELAY_PROBE,
-				    (struct p_header *)&dp, sizeof(dp), 0);
-	}
-	mutex_unlock(&ds->mutex);
-
-	return ok;
-}
-
-static int drbd_send_delay_probes(struct drbd_conf *mdev)
-{
-	int ok;
-
-	mdev->delay_seq++;
-	do_gettimeofday(&mdev->dps_time);
-	ok = drbd_send_delay_probe(mdev, &mdev->meta);
-	ok = ok && drbd_send_delay_probe(mdev, &mdev->data);
-
-	mdev->dp_volume_last = mdev->send_cnt;
-	mod_timer(&mdev->delay_probe_timer, jiffies + mdev->sync_conf.dp_interval * HZ / 10);
-
-	return ok;
-}
-
 /* called on sndtimeo
 /* called on sndtimeo
  * returns FALSE if we should retry,
  * returns FALSE if we should retry,
  * TRUE if we think connection is dead
  * TRUE if we think connection is dead
@@ -2369,31 +2332,6 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
 	return 1;
 	return 1;
 }
 }
 
 
-static void consider_delay_probes(struct drbd_conf *mdev)
-{
-	if (mdev->state.conn != C_SYNC_SOURCE || mdev->agreed_pro_version < 93)
-		return;
-
-	if (mdev->dp_volume_last + mdev->sync_conf.dp_volume * 2 < mdev->send_cnt)
-		drbd_send_delay_probes(mdev);
-}
-
-static int w_delay_probes(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
-	if (!cancel && mdev->state.conn == C_SYNC_SOURCE)
-		drbd_send_delay_probes(mdev);
-
-	return 1;
-}
-
-static void delay_probe_timer_fn(unsigned long data)
-{
-	struct drbd_conf *mdev = (struct drbd_conf *) data;
-
-	if (list_empty(&mdev->delay_probe_work.list))
-		drbd_queue_work(&mdev->data.work, &mdev->delay_probe_work);
-}
-
 /* Used to send write requests
 /* Used to send write requests
  * R_PRIMARY -> Peer	(P_DATA)
  * R_PRIMARY -> Peer	(P_DATA)
  */
  */
@@ -2425,15 +2363,15 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
 	/* NOTE: no need to check if barriers supported here as we would
 	/* NOTE: no need to check if barriers supported here as we would
 	 *       not pass the test in make_request_common in that case
 	 *       not pass the test in make_request_common in that case
 	 */
 	 */
-	if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) {
+	if (req->master_bio->bi_rw & REQ_HARDBARRIER) {
 		dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
 		dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
 		/* dp_flags |= DP_HARDBARRIER; */
 		/* dp_flags |= DP_HARDBARRIER; */
 	}
 	}
-	if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO))
+	if (req->master_bio->bi_rw & REQ_SYNC)
 		dp_flags |= DP_RW_SYNC;
 		dp_flags |= DP_RW_SYNC;
 	/* for now handle SYNCIO and UNPLUG
 	/* for now handle SYNCIO and UNPLUG
 	 * as if they still were one and the same flag */
 	 * as if they still were one and the same flag */
-	if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG))
+	if (req->master_bio->bi_rw & REQ_UNPLUG)
 		dp_flags |= DP_RW_SYNC;
 		dp_flags |= DP_RW_SYNC;
 	if (mdev->state.conn >= C_SYNC_SOURCE &&
 	if (mdev->state.conn >= C_SYNC_SOURCE &&
 	    mdev->state.conn <= C_PAUSED_SYNC_T)
 	    mdev->state.conn <= C_PAUSED_SYNC_T)
@@ -2457,9 +2395,6 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
 
 
 	drbd_put_data_sock(mdev);
 	drbd_put_data_sock(mdev);
 
 
-	if (ok)
-		consider_delay_probes(mdev);
-
 	return ok;
 	return ok;
 }
 }
 
 
@@ -2506,9 +2441,6 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
 
 
 	drbd_put_data_sock(mdev);
 	drbd_put_data_sock(mdev);
 
 
-	if (ok)
-		consider_delay_probes(mdev);
-
 	return ok;
 	return ok;
 }
 }
 
 
@@ -2604,6 +2536,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
 	unsigned long flags;
 	unsigned long flags;
 	int rv = 0;
 	int rv = 0;
 
 
+	lock_kernel();
 	spin_lock_irqsave(&mdev->req_lock, flags);
 	spin_lock_irqsave(&mdev->req_lock, flags);
 	/* to have a stable mdev->state.role
 	/* to have a stable mdev->state.role
 	 * and no race with updating open_cnt */
 	 * and no race with updating open_cnt */
@@ -2618,6 +2551,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
 	if (!rv)
 	if (!rv)
 		mdev->open_cnt++;
 		mdev->open_cnt++;
 	spin_unlock_irqrestore(&mdev->req_lock, flags);
 	spin_unlock_irqrestore(&mdev->req_lock, flags);
+	unlock_kernel();
 
 
 	return rv;
 	return rv;
 }
 }
@@ -2625,7 +2559,9 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
 static int drbd_release(struct gendisk *gd, fmode_t mode)
 static int drbd_release(struct gendisk *gd, fmode_t mode)
 {
 {
 	struct drbd_conf *mdev = gd->private_data;
 	struct drbd_conf *mdev = gd->private_data;
+	lock_kernel();
 	mdev->open_cnt--;
 	mdev->open_cnt--;
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 
@@ -2660,9 +2596,20 @@ static void drbd_unplug_fn(struct request_queue *q)
 
 
 static void drbd_set_defaults(struct drbd_conf *mdev)
 static void drbd_set_defaults(struct drbd_conf *mdev)
 {
 {
-	mdev->sync_conf.after      = DRBD_AFTER_DEF;
-	mdev->sync_conf.rate       = DRBD_RATE_DEF;
-	mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF;
+	/* This way we get a compile error when sync_conf grows,
+	   and we forgot to initialize it here */
+	mdev->sync_conf = (struct syncer_conf) {
+		/* .rate = */		DRBD_RATE_DEF,
+		/* .after = */		DRBD_AFTER_DEF,
+		/* .al_extents = */	DRBD_AL_EXTENTS_DEF,
+		/* .verify_alg = */	{}, 0,
+		/* .cpu_mask = */	{}, 0,
+		/* .csums_alg = */	{}, 0,
+		/* .use_rle = */	0
+	};
+
+	/* Have to use that way, because the layout differs between
+	   big endian and little endian */
 	mdev->state = (union drbd_state) {
 	mdev->state = (union drbd_state) {
 		{ .role = R_SECONDARY,
 		{ .role = R_SECONDARY,
 		  .peer = R_UNKNOWN,
 		  .peer = R_UNKNOWN,
@@ -2721,24 +2668,17 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
 	INIT_LIST_HEAD(&mdev->unplug_work.list);
 	INIT_LIST_HEAD(&mdev->unplug_work.list);
 	INIT_LIST_HEAD(&mdev->md_sync_work.list);
 	INIT_LIST_HEAD(&mdev->md_sync_work.list);
 	INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
 	INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
-	INIT_LIST_HEAD(&mdev->delay_probes);
-	INIT_LIST_HEAD(&mdev->delay_probe_work.list);
 
 
 	mdev->resync_work.cb  = w_resync_inactive;
 	mdev->resync_work.cb  = w_resync_inactive;
 	mdev->unplug_work.cb  = w_send_write_hint;
 	mdev->unplug_work.cb  = w_send_write_hint;
 	mdev->md_sync_work.cb = w_md_sync;
 	mdev->md_sync_work.cb = w_md_sync;
 	mdev->bm_io_work.w.cb = w_bitmap_io;
 	mdev->bm_io_work.w.cb = w_bitmap_io;
-	mdev->delay_probe_work.cb = w_delay_probes;
 	init_timer(&mdev->resync_timer);
 	init_timer(&mdev->resync_timer);
 	init_timer(&mdev->md_sync_timer);
 	init_timer(&mdev->md_sync_timer);
-	init_timer(&mdev->delay_probe_timer);
 	mdev->resync_timer.function = resync_timer_fn;
 	mdev->resync_timer.function = resync_timer_fn;
 	mdev->resync_timer.data = (unsigned long) mdev;
 	mdev->resync_timer.data = (unsigned long) mdev;
 	mdev->md_sync_timer.function = md_sync_timer_fn;
 	mdev->md_sync_timer.function = md_sync_timer_fn;
 	mdev->md_sync_timer.data = (unsigned long) mdev;
 	mdev->md_sync_timer.data = (unsigned long) mdev;
-	mdev->delay_probe_timer.function = delay_probe_timer_fn;
-	mdev->delay_probe_timer.data = (unsigned long) mdev;
-
 
 
 	init_waitqueue_head(&mdev->misc_wait);
 	init_waitqueue_head(&mdev->misc_wait);
 	init_waitqueue_head(&mdev->state_wait);
 	init_waitqueue_head(&mdev->state_wait);

+ 0 - 4
drivers/block/drbd/drbd_nl.c

@@ -1557,10 +1557,6 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
 		sc.rate       = DRBD_RATE_DEF;
 		sc.rate       = DRBD_RATE_DEF;
 		sc.after      = DRBD_AFTER_DEF;
 		sc.after      = DRBD_AFTER_DEF;
 		sc.al_extents = DRBD_AL_EXTENTS_DEF;
 		sc.al_extents = DRBD_AL_EXTENTS_DEF;
-		sc.dp_volume  = DRBD_DP_VOLUME_DEF;
-		sc.dp_interval = DRBD_DP_INTERVAL_DEF;
-		sc.throttle_th = DRBD_RS_THROTTLE_TH_DEF;
-		sc.hold_off_th = DRBD_RS_HOLD_OFF_TH_DEF;
 	} else
 	} else
 		memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
 		memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
 
 

+ 2 - 17
drivers/block/drbd/drbd_proc.c

@@ -73,21 +73,14 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
 	seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10);
 	seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10);
 	/* if more than 1 GB display in MB */
 	/* if more than 1 GB display in MB */
 	if (mdev->rs_total > 0x100000L)
 	if (mdev->rs_total > 0x100000L)
-		seq_printf(seq, "(%lu/%lu)M",
+		seq_printf(seq, "(%lu/%lu)M\n\t",
 			    (unsigned long) Bit2KB(rs_left >> 10),
 			    (unsigned long) Bit2KB(rs_left >> 10),
 			    (unsigned long) Bit2KB(mdev->rs_total >> 10));
 			    (unsigned long) Bit2KB(mdev->rs_total >> 10));
 	else
 	else
-		seq_printf(seq, "(%lu/%lu)K",
+		seq_printf(seq, "(%lu/%lu)K\n\t",
 			    (unsigned long) Bit2KB(rs_left),
 			    (unsigned long) Bit2KB(rs_left),
 			    (unsigned long) Bit2KB(mdev->rs_total));
 			    (unsigned long) Bit2KB(mdev->rs_total));
 
 
-	if (mdev->state.conn == C_SYNC_TARGET)
-		seq_printf(seq, " queue_delay: %d.%d ms\n\t",
-			   mdev->data_delay / 1000,
-			   (mdev->data_delay % 1000) / 100);
-	else if (mdev->state.conn == C_SYNC_SOURCE)
-		seq_printf(seq, " delay_probe: %u\n\t", mdev->delay_seq);
-
 	/* see drivers/md/md.c
 	/* see drivers/md/md.c
 	 * We do not want to overflow, so the order of operands and
 	 * We do not want to overflow, so the order of operands and
 	 * the * 100 / 100 trick are important. We do a +1 to be
 	 * the * 100 / 100 trick are important. We do a +1 to be
@@ -135,14 +128,6 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
 	else
 	else
 		seq_printf(seq, " (%ld)", dbdt);
 		seq_printf(seq, " (%ld)", dbdt);
 
 
-	if (mdev->state.conn == C_SYNC_TARGET) {
-		if (mdev->c_sync_rate > 1000)
-			seq_printf(seq, " want: %d,%03d",
-				   mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000);
-		else
-			seq_printf(seq, " want: %d", mdev->c_sync_rate);
-	}
-
 	seq_printf(seq, " K/sec\n");
 	seq_printf(seq, " K/sec\n");
 }
 }
 
 

+ 29 - 106
drivers/block/drbd/drbd_receiver.c

@@ -1180,7 +1180,7 @@ next_bio:
 	bio->bi_sector = sector;
 	bio->bi_sector = sector;
 	bio->bi_bdev = mdev->ldev->backing_bdev;
 	bio->bi_bdev = mdev->ldev->backing_bdev;
 	/* we special case some flags in the multi-bio case, see below
 	/* we special case some flags in the multi-bio case, see below
-	 * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */
+	 * (REQ_UNPLUG, REQ_HARDBARRIER) */
 	bio->bi_rw = rw;
 	bio->bi_rw = rw;
 	bio->bi_private = e;
 	bio->bi_private = e;
 	bio->bi_end_io = drbd_endio_sec;
 	bio->bi_end_io = drbd_endio_sec;
@@ -1209,16 +1209,16 @@ next_bio:
 		bios = bios->bi_next;
 		bios = bios->bi_next;
 		bio->bi_next = NULL;
 		bio->bi_next = NULL;
 
 
-		/* strip off BIO_RW_UNPLUG unless it is the last bio */
+		/* strip off REQ_UNPLUG unless it is the last bio */
 		if (bios)
 		if (bios)
-			bio->bi_rw &= ~(1<<BIO_RW_UNPLUG);
+			bio->bi_rw &= ~REQ_UNPLUG;
 
 
 		drbd_generic_make_request(mdev, fault_type, bio);
 		drbd_generic_make_request(mdev, fault_type, bio);
 
 
-		/* strip off BIO_RW_BARRIER,
+		/* strip off REQ_HARDBARRIER,
 		 * unless it is the first or last bio */
 		 * unless it is the first or last bio */
 		if (bios && bios->bi_next)
 		if (bios && bios->bi_next)
-			bios->bi_rw &= ~(1<<BIO_RW_BARRIER);
+			bios->bi_rw &= ~REQ_HARDBARRIER;
 	} while (bios);
 	} while (bios);
 	maybe_kick_lo(mdev);
 	maybe_kick_lo(mdev);
 	return 0;
 	return 0;
@@ -1233,7 +1233,7 @@ fail:
 }
 }
 
 
 /**
 /**
- * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
+ * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
  * @mdev:	DRBD device.
  * @mdev:	DRBD device.
  * @w:		work object.
  * @w:		work object.
  * @cancel:	The connection will be closed anyways (unused in this callback)
  * @cancel:	The connection will be closed anyways (unused in this callback)
@@ -1245,7 +1245,7 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
 	   (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
 	   (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
 	   so that we can finish that epoch in drbd_may_finish_epoch().
 	   so that we can finish that epoch in drbd_may_finish_epoch().
 	   That is necessary if we already have a long chain of Epochs, before
 	   That is necessary if we already have a long chain of Epochs, before
-	   we realize that BIO_RW_BARRIER is actually not supported */
+	   we realize that REQ_HARDBARRIER is actually not supported */
 
 
 	/* As long as the -ENOTSUPP on the barrier is reported immediately
 	/* As long as the -ENOTSUPP on the barrier is reported immediately
 	   that will never trigger. If it is reported late, we will just
 	   that will never trigger. If it is reported late, we will just
@@ -1824,14 +1824,14 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
 		epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
 		epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
 		if (epoch == e->epoch) {
 		if (epoch == e->epoch) {
 			set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
 			set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-			rw |= (1<<BIO_RW_BARRIER);
+			rw |= REQ_HARDBARRIER;
 			e->flags |= EE_IS_BARRIER;
 			e->flags |= EE_IS_BARRIER;
 		} else {
 		} else {
 			if (atomic_read(&epoch->epoch_size) > 1 ||
 			if (atomic_read(&epoch->epoch_size) > 1 ||
 			    !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
 			    !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
 				set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
 				set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
 				set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
 				set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-				rw |= (1<<BIO_RW_BARRIER);
+				rw |= REQ_HARDBARRIER;
 				e->flags |= EE_IS_BARRIER;
 				e->flags |= EE_IS_BARRIER;
 			}
 			}
 		}
 		}
@@ -1841,10 +1841,10 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
 	dp_flags = be32_to_cpu(p->dp_flags);
 	dp_flags = be32_to_cpu(p->dp_flags);
 	if (dp_flags & DP_HARDBARRIER) {
 	if (dp_flags & DP_HARDBARRIER) {
 		dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
 		dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
-		/* rw |= (1<<BIO_RW_BARRIER); */
+		/* rw |= REQ_HARDBARRIER; */
 	}
 	}
 	if (dp_flags & DP_RW_SYNC)
 	if (dp_flags & DP_RW_SYNC)
-		rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
+		rw |= REQ_SYNC | REQ_UNPLUG;
 	if (dp_flags & DP_MAY_SET_IN_SYNC)
 	if (dp_flags & DP_MAY_SET_IN_SYNC)
 		e->flags |= EE_MAY_SET_IN_SYNC;
 		e->flags |= EE_MAY_SET_IN_SYNC;
 
 
@@ -3555,14 +3555,15 @@ static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
 	return ok;
 	return ok;
 }
 }
 
 
-static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
+static int receive_skip_(struct drbd_conf *mdev, struct p_header *h, int silent)
 {
 {
 	/* TODO zero copy sink :) */
 	/* TODO zero copy sink :) */
 	static char sink[128];
 	static char sink[128];
 	int size, want, r;
 	int size, want, r;
 
 
-	dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
-	     h->command, h->length);
+	if (!silent)
+		dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
+		     h->command, h->length);
 
 
 	size = h->length;
 	size = h->length;
 	while (size > 0) {
 	while (size > 0) {
@@ -3574,101 +3575,25 @@ static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
 	return size == 0;
 	return size == 0;
 }
 }
 
 
-static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
-{
-	if (mdev->state.disk >= D_INCONSISTENT)
-		drbd_kick_lo(mdev);
-
-	/* Make sure we've acked all the TCP data associated
-	 * with the data requests being unplugged */
-	drbd_tcp_quickack(mdev->data.socket);
-
-	return TRUE;
-}
-
-static void timeval_sub_us(struct timeval* tv, unsigned int us)
+static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
 {
 {
-	tv->tv_sec -= us / 1000000;
-	us = us % 1000000;
-	if (tv->tv_usec > us) {
-		tv->tv_usec += 1000000;
-		tv->tv_sec--;
-	}
-	tv->tv_usec -= us;
+	return receive_skip_(mdev, h, 0);
 }
 }
 
 
-static void got_delay_probe(struct drbd_conf *mdev, int from, struct p_delay_probe *p)
+static int receive_skip_silent(struct drbd_conf *mdev, struct p_header *h)
 {
 {
-	struct delay_probe *dp;
-	struct list_head *le;
-	struct timeval now;
-	int seq_num;
-	int offset;
-	int data_delay;
-
-	seq_num = be32_to_cpu(p->seq_num);
-	offset  = be32_to_cpu(p->offset);
-
-	spin_lock(&mdev->peer_seq_lock);
-	if (!list_empty(&mdev->delay_probes)) {
-		if (from == USE_DATA_SOCKET)
-			le = mdev->delay_probes.next;
-		else
-			le = mdev->delay_probes.prev;
-
-		dp = list_entry(le, struct delay_probe, list);
-
-		if (dp->seq_num == seq_num) {
-			list_del(le);
-			spin_unlock(&mdev->peer_seq_lock);
-			do_gettimeofday(&now);
-			timeval_sub_us(&now, offset);
-			data_delay =
-				now.tv_usec - dp->time.tv_usec +
-				(now.tv_sec - dp->time.tv_sec) * 1000000;
-
-			if (data_delay > 0)
-				mdev->data_delay = data_delay;
-
-			kfree(dp);
-			return;
-		}
-
-		if (dp->seq_num > seq_num) {
-			spin_unlock(&mdev->peer_seq_lock);
-			dev_warn(DEV, "Previous allocation failure of struct delay_probe?\n");
-			return; /* Do not alloca a struct delay_probe.... */
-		}
-	}
-	spin_unlock(&mdev->peer_seq_lock);
-
-	dp = kmalloc(sizeof(struct delay_probe), GFP_NOIO);
-	if (!dp) {
-		dev_warn(DEV, "Failed to allocate a struct delay_probe, do not worry.\n");
-		return;
-	}
-
-	dp->seq_num = seq_num;
-	do_gettimeofday(&dp->time);
-	timeval_sub_us(&dp->time, offset);
-
-	spin_lock(&mdev->peer_seq_lock);
-	if (from == USE_DATA_SOCKET)
-		list_add(&dp->list, &mdev->delay_probes);
-	else
-		list_add_tail(&dp->list, &mdev->delay_probes);
-	spin_unlock(&mdev->peer_seq_lock);
+	return receive_skip_(mdev, h, 1);
 }
 }
 
 
-static int receive_delay_probe(struct drbd_conf *mdev, struct p_header *h)
+static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
 {
 {
-	struct p_delay_probe *p = (struct p_delay_probe *)h;
+	if (mdev->state.disk >= D_INCONSISTENT)
+		drbd_kick_lo(mdev);
 
 
-	ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
-	if (drbd_recv(mdev, h->payload, h->length) != h->length)
-		return FALSE;
+	/* Make sure we've acked all the TCP data associated
+	 * with the data requests being unplugged */
+	drbd_tcp_quickack(mdev->data.socket);
 
 
-	got_delay_probe(mdev, USE_DATA_SOCKET, p);
 	return TRUE;
 	return TRUE;
 }
 }
 
 
@@ -3695,7 +3620,7 @@ static drbd_cmd_handler_f drbd_default_handler[] = {
 	[P_OV_REQUEST]      = receive_DataRequest,
 	[P_OV_REQUEST]      = receive_DataRequest,
 	[P_OV_REPLY]        = receive_DataRequest,
 	[P_OV_REPLY]        = receive_DataRequest,
 	[P_CSUM_RS_REQUEST]    = receive_DataRequest,
 	[P_CSUM_RS_REQUEST]    = receive_DataRequest,
-	[P_DELAY_PROBE]     = receive_delay_probe,
+	[P_DELAY_PROBE]     = receive_skip_silent,
 	/* anything missing from this table is in
 	/* anything missing from this table is in
 	 * the asender_tbl, see get_asender_cmd */
 	 * the asender_tbl, see get_asender_cmd */
 	[P_MAX_CMD]	    = NULL,
 	[P_MAX_CMD]	    = NULL,
@@ -4472,11 +4397,9 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
 	return TRUE;
 	return TRUE;
 }
 }
 
 
-static int got_delay_probe_m(struct drbd_conf *mdev, struct p_header *h)
+static int got_something_to_ignore_m(struct drbd_conf *mdev, struct p_header *h)
 {
 {
-	struct p_delay_probe *p = (struct p_delay_probe *)h;
-
-	got_delay_probe(mdev, USE_META_SOCKET, p);
+	/* IGNORE */
 	return TRUE;
 	return TRUE;
 }
 }
 
 
@@ -4504,7 +4427,7 @@ static struct asender_cmd *get_asender_cmd(int cmd)
 	[P_BARRIER_ACK]	    = { sizeof(struct p_barrier_ack), got_BarrierAck },
 	[P_BARRIER_ACK]	    = { sizeof(struct p_barrier_ack), got_BarrierAck },
 	[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
 	[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
 	[P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
 	[P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
-	[P_DELAY_PROBE]     = { sizeof(struct p_delay_probe), got_delay_probe_m },
+	[P_DELAY_PROBE]     = { sizeof(struct p_delay_probe), got_something_to_ignore_m },
 	[P_MAX_CMD]	    = { 0, NULL },
 	[P_MAX_CMD]	    = { 0, NULL },
 	};
 	};
 	if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
 	if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)

+ 1 - 1
drivers/block/drbd/drbd_req.c

@@ -997,7 +997,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
 	 * because of those XXX, this is not yet enabled,
 	 * because of those XXX, this is not yet enabled,
 	 * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
 	 * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
 	 */
 	 */
-	if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags))) {
+	if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
 		/* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
 		/* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
 		bio_endio(bio, -EOPNOTSUPP);
 		bio_endio(bio, -EOPNOTSUPP);
 		return 0;
 		return 0;

+ 1 - 14
drivers/block/drbd/drbd_worker.c

@@ -424,18 +424,6 @@ void resync_timer_fn(unsigned long data)
 		drbd_queue_work(&mdev->data.work, &mdev->resync_work);
 		drbd_queue_work(&mdev->data.work, &mdev->resync_work);
 }
 }
 
 
-static int calc_resync_rate(struct drbd_conf *mdev)
-{
-	int d = mdev->data_delay / 1000; /* us -> ms */
-	int td = mdev->sync_conf.throttle_th * 100;  /* 0.1s -> ms */
-	int hd = mdev->sync_conf.hold_off_th * 100;  /* 0.1s -> ms */
-	int cr = mdev->sync_conf.rate;
-
-	return d <= td ? cr :
-		d >= hd ? 0 :
-		cr + (cr * (td - d) / (hd - td));
-}
-
 int w_make_resync_request(struct drbd_conf *mdev,
 int w_make_resync_request(struct drbd_conf *mdev,
 		struct drbd_work *w, int cancel)
 		struct drbd_work *w, int cancel)
 {
 {
@@ -473,8 +461,7 @@ int w_make_resync_request(struct drbd_conf *mdev,
 	max_segment_size = mdev->agreed_pro_version < 94 ?
 	max_segment_size = mdev->agreed_pro_version < 94 ?
 		queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE;
 		queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE;
 
 
-	mdev->c_sync_rate = calc_resync_rate(mdev);
-	number = SLEEP_TIME * mdev->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
+	number = SLEEP_TIME * mdev->sync_conf.rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
 	pe = atomic_read(&mdev->rs_pending_cnt);
 	pe = atomic_read(&mdev->rs_pending_cnt);
 
 
 	mutex_lock(&mdev->data.mutex);
 	mutex_lock(&mdev->data.mutex);

+ 70 - 112
drivers/block/floppy.c

@@ -178,6 +178,7 @@ static int print_unex = 1;
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/bio.h>
 #include <linux/bio.h>
+#include <linux/smp_lock.h>
 #include <linux/string.h>
 #include <linux/string.h>
 #include <linux/jiffies.h>
 #include <linux/jiffies.h>
 #include <linux/fcntl.h>
 #include <linux/fcntl.h>
@@ -514,8 +515,6 @@ static unsigned long fdc_busy;
 static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
 static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
 static DECLARE_WAIT_QUEUE_HEAD(command_done);
 static DECLARE_WAIT_QUEUE_HEAD(command_done);
 
 
-#define NO_SIGNAL (!interruptible || !signal_pending(current))
-
 /* Errors during formatting are counted here. */
 /* Errors during formatting are counted here. */
 static int format_errors;
 static int format_errors;
 
 
@@ -539,7 +538,7 @@ static int max_buffer_sectors;
 
 
 static int *errors;
 static int *errors;
 typedef void (*done_f)(int);
 typedef void (*done_f)(int);
-static struct cont_t {
+static const struct cont_t {
 	void (*interrupt)(void);
 	void (*interrupt)(void);
 				/* this is called after the interrupt of the
 				/* this is called after the interrupt of the
 				 * main command */
 				 * main command */
@@ -578,7 +577,7 @@ static void reset_fdc(void);
 #define NEED_1_RECAL	-2
 #define NEED_1_RECAL	-2
 #define NEED_2_RECAL	-3
 #define NEED_2_RECAL	-3
 
 
-static int usage_count;
+static atomic_t usage_count = ATOMIC_INIT(0);
 
 
 /* buffer related variables */
 /* buffer related variables */
 static int buffer_track = -1;
 static int buffer_track = -1;
@@ -858,36 +857,15 @@ static void set_fdc(int drive)
 }
 }
 
 
 /* locks the driver */
 /* locks the driver */
-static int _lock_fdc(int drive, bool interruptible, int line)
+static int lock_fdc(int drive, bool interruptible)
 {
 {
-	if (!usage_count) {
-		pr_err("Trying to lock fdc while usage count=0 at line %d\n",
-		       line);
+	if (WARN(atomic_read(&usage_count) == 0,
+		 "Trying to lock fdc while usage count=0\n"))
 		return -1;
 		return -1;
-	}
-
-	if (test_and_set_bit(0, &fdc_busy)) {
-		DECLARE_WAITQUEUE(wait, current);
-		add_wait_queue(&fdc_wait, &wait);
-
-		for (;;) {
-			set_current_state(TASK_INTERRUPTIBLE);
-
-			if (!test_and_set_bit(0, &fdc_busy))
-				break;
 
 
-			schedule();
-
-			if (!NO_SIGNAL) {
-				remove_wait_queue(&fdc_wait, &wait);
-				return -EINTR;
-			}
-		}
+	if (wait_event_interruptible(fdc_wait, !test_and_set_bit(0, &fdc_busy)))
+		return -EINTR;
 
 
-		set_current_state(TASK_RUNNING);
-		remove_wait_queue(&fdc_wait, &wait);
-		flush_scheduled_work();
-	}
 	command_status = FD_COMMAND_NONE;
 	command_status = FD_COMMAND_NONE;
 
 
 	__reschedule_timeout(drive, "lock fdc");
 	__reschedule_timeout(drive, "lock fdc");
@@ -895,11 +873,8 @@ static int _lock_fdc(int drive, bool interruptible, int line)
 	return 0;
 	return 0;
 }
 }
 
 
-#define lock_fdc(drive, interruptible)			\
-	_lock_fdc(drive, interruptible, __LINE__)
-
 /* unlocks the driver */
 /* unlocks the driver */
-static inline void unlock_fdc(void)
+static void unlock_fdc(void)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 
 
@@ -1224,7 +1199,7 @@ static int need_more_output(void)
 /* Set perpendicular mode as required, based on data rate, if supported.
 /* Set perpendicular mode as required, based on data rate, if supported.
  * 82077 Now tested. 1Mbps data rate only possible with 82077-1.
  * 82077 Now tested. 1Mbps data rate only possible with 82077-1.
  */
  */
-static inline void perpendicular_mode(void)
+static void perpendicular_mode(void)
 {
 {
 	unsigned char perp_mode;
 	unsigned char perp_mode;
 
 
@@ -1995,14 +1970,14 @@ static void do_wakeup(void)
 	wake_up(&command_done);
 	wake_up(&command_done);
 }
 }
 
 
-static struct cont_t wakeup_cont = {
+static const struct cont_t wakeup_cont = {
 	.interrupt	= empty,
 	.interrupt	= empty,
 	.redo		= do_wakeup,
 	.redo		= do_wakeup,
 	.error		= empty,
 	.error		= empty,
 	.done		= (done_f)empty
 	.done		= (done_f)empty
 };
 };
 
 
-static struct cont_t intr_cont = {
+static const struct cont_t intr_cont = {
 	.interrupt	= empty,
 	.interrupt	= empty,
 	.redo		= process_fd_request,
 	.redo		= process_fd_request,
 	.error		= empty,
 	.error		= empty,
@@ -2015,25 +1990,10 @@ static int wait_til_done(void (*handler)(void), bool interruptible)
 
 
 	schedule_bh(handler);
 	schedule_bh(handler);
 
 
-	if (command_status < 2 && NO_SIGNAL) {
-		DECLARE_WAITQUEUE(wait, current);
-
-		add_wait_queue(&command_done, &wait);
-		for (;;) {
-			set_current_state(interruptible ?
-					  TASK_INTERRUPTIBLE :
-					  TASK_UNINTERRUPTIBLE);
-
-			if (command_status >= 2 || !NO_SIGNAL)
-				break;
-
-			is_alive(__func__, "");
-			schedule();
-		}
-
-		set_current_state(TASK_RUNNING);
-		remove_wait_queue(&command_done, &wait);
-	}
+	if (interruptible)
+		wait_event_interruptible(command_done, command_status >= 2);
+	else
+		wait_event(command_done, command_status >= 2);
 
 
 	if (command_status < 2) {
 	if (command_status < 2) {
 		cancel_activity();
 		cancel_activity();
@@ -2223,7 +2183,7 @@ static void redo_format(void)
 	debugt(__func__, "queue format request");
 	debugt(__func__, "queue format request");
 }
 }
 
 
-static struct cont_t format_cont = {
+static const struct cont_t format_cont = {
 	.interrupt	= format_interrupt,
 	.interrupt	= format_interrupt,
 	.redo		= redo_format,
 	.redo		= redo_format,
 	.error		= bad_flp_intr,
 	.error		= bad_flp_intr,
@@ -2583,10 +2543,8 @@ static int make_raw_rw_request(void)
 	int tracksize;
 	int tracksize;
 	int ssize;
 	int ssize;
 
 
-	if (max_buffer_sectors == 0) {
-		pr_info("VFS: Block I/O scheduled on unopened device\n");
+	if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n"))
 		return 0;
 		return 0;
-	}
 
 
 	set_fdc((long)current_req->rq_disk->private_data);
 	set_fdc((long)current_req->rq_disk->private_data);
 
 
@@ -2921,7 +2879,7 @@ do_request:
 	return;
 	return;
 }
 }
 
 
-static struct cont_t rw_cont = {
+static const struct cont_t rw_cont = {
 	.interrupt	= rw_interrupt,
 	.interrupt	= rw_interrupt,
 	.redo		= redo_fd_request,
 	.redo		= redo_fd_request,
 	.error		= bad_flp_intr,
 	.error		= bad_flp_intr,
@@ -2936,19 +2894,16 @@ static void process_fd_request(void)
 
 
 static void do_fd_request(struct request_queue *q)
 static void do_fd_request(struct request_queue *q)
 {
 {
-	if (max_buffer_sectors == 0) {
-		pr_info("VFS: %s called on non-open device\n", __func__);
+	if (WARN(max_buffer_sectors == 0,
+		 "VFS: %s called on non-open device\n", __func__))
 		return;
 		return;
-	}
 
 
-	if (usage_count == 0) {
-		pr_info("warning: usage count=0, current_req=%p exiting\n",
-			current_req);
-		pr_info("sect=%ld type=%x flags=%x\n",
-			(long)blk_rq_pos(current_req), current_req->cmd_type,
-			current_req->cmd_flags);
+	if (WARN(atomic_read(&usage_count) == 0,
+		 "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%x\n",
+		 current_req, (long)blk_rq_pos(current_req), current_req->cmd_type,
+		 current_req->cmd_flags))
 		return;
 		return;
-	}
+
 	if (test_bit(0, &fdc_busy)) {
 	if (test_bit(0, &fdc_busy)) {
 		/* fdc busy, this new request will be treated when the
 		/* fdc busy, this new request will be treated when the
 		   current one is done */
 		   current one is done */
@@ -2960,7 +2915,7 @@ static void do_fd_request(struct request_queue *q)
 	is_alive(__func__, "");
 	is_alive(__func__, "");
 }
 }
 
 
-static struct cont_t poll_cont = {
+static const struct cont_t poll_cont = {
 	.interrupt	= success_and_wakeup,
 	.interrupt	= success_and_wakeup,
 	.redo		= floppy_ready,
 	.redo		= floppy_ready,
 	.error		= generic_failure,
 	.error		= generic_failure,
@@ -2991,7 +2946,7 @@ static void reset_intr(void)
 	pr_info("weird, reset interrupt called\n");
 	pr_info("weird, reset interrupt called\n");
 }
 }
 
 
-static struct cont_t reset_cont = {
+static const struct cont_t reset_cont = {
 	.interrupt	= reset_intr,
 	.interrupt	= reset_intr,
 	.redo		= success_and_wakeup,
 	.redo		= success_and_wakeup,
 	.error		= generic_failure,
 	.error		= generic_failure,
@@ -3033,7 +2988,7 @@ static inline int fd_copyin(void __user *param, void *address,
 	return copy_from_user(address, param, size) ? -EFAULT : 0;
 	return copy_from_user(address, param, size) ? -EFAULT : 0;
 }
 }
 
 
-static inline const char *drive_name(int type, int drive)
+static const char *drive_name(int type, int drive)
 {
 {
 	struct floppy_struct *floppy;
 	struct floppy_struct *floppy;
 
 
@@ -3096,14 +3051,14 @@ static void raw_cmd_done(int flag)
 	generic_done(flag);
 	generic_done(flag);
 }
 }
 
 
-static struct cont_t raw_cmd_cont = {
+static const struct cont_t raw_cmd_cont = {
 	.interrupt	= success_and_wakeup,
 	.interrupt	= success_and_wakeup,
 	.redo		= floppy_start,
 	.redo		= floppy_start,
 	.error		= generic_failure,
 	.error		= generic_failure,
 	.done		= raw_cmd_done
 	.done		= raw_cmd_done
 };
 };
 
 
-static inline int raw_cmd_copyout(int cmd, void __user *param,
+static int raw_cmd_copyout(int cmd, void __user *param,
 				  struct floppy_raw_cmd *ptr)
 				  struct floppy_raw_cmd *ptr)
 {
 {
 	int ret;
 	int ret;
@@ -3148,7 +3103,7 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr)
 	}
 	}
 }
 }
 
 
-static inline int raw_cmd_copyin(int cmd, void __user *param,
+static int raw_cmd_copyin(int cmd, void __user *param,
 				 struct floppy_raw_cmd **rcmd)
 				 struct floppy_raw_cmd **rcmd)
 {
 {
 	struct floppy_raw_cmd *ptr;
 	struct floppy_raw_cmd *ptr;
@@ -3266,7 +3221,7 @@ static int invalidate_drive(struct block_device *bdev)
 	return 0;
 	return 0;
 }
 }
 
 
-static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
+static int set_geometry(unsigned int cmd, struct floppy_struct *g,
 			       int drive, int type, struct block_device *bdev)
 			       int drive, int type, struct block_device *bdev)
 {
 {
 	int cnt;
 	int cnt;
@@ -3337,7 +3292,7 @@ static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
 }
 }
 
 
 /* handle obsolete ioctl's */
 /* handle obsolete ioctl's */
-static int ioctl_table[] = {
+static unsigned int ioctl_table[] = {
 	FDCLRPRM,
 	FDCLRPRM,
 	FDSETPRM,
 	FDSETPRM,
 	FDDEFPRM,
 	FDDEFPRM,
@@ -3365,7 +3320,7 @@ static int ioctl_table[] = {
 	FDTWADDLE
 	FDTWADDLE
 };
 };
 
 
-static inline int normalize_ioctl(int *cmd, int *size)
+static int normalize_ioctl(unsigned int *cmd, int *size)
 {
 {
 	int i;
 	int i;
 
 
@@ -3417,7 +3372,7 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 	return 0;
 	return 0;
 }
 }
 
 
-static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
 		    unsigned long param)
 		    unsigned long param)
 {
 {
 	int drive = (long)bdev->bd_disk->private_data;
 	int drive = (long)bdev->bd_disk->private_data;
@@ -3593,6 +3548,18 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
 	return 0;
 	return 0;
 }
 }
 
 
+static int fd_ioctl(struct block_device *bdev, fmode_t mode,
+			     unsigned int cmd, unsigned long param)
+{
+	int ret;
+
+	lock_kernel();
+	ret = fd_locked_ioctl(bdev, mode, cmd, param);
+	unlock_kernel();
+
+	return ret;
+}
+
 static void __init config_types(void)
 static void __init config_types(void)
 {
 {
 	bool has_drive = false;
 	bool has_drive = false;
@@ -3649,6 +3616,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	int drive = (long)disk->private_data;
 	int drive = (long)disk->private_data;
 
 
+	lock_kernel();
 	mutex_lock(&open_lock);
 	mutex_lock(&open_lock);
 	if (UDRS->fd_ref < 0)
 	if (UDRS->fd_ref < 0)
 		UDRS->fd_ref = 0;
 		UDRS->fd_ref = 0;
@@ -3659,6 +3627,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
 	if (!UDRS->fd_ref)
 	if (!UDRS->fd_ref)
 		opened_bdev[drive] = NULL;
 		opened_bdev[drive] = NULL;
 	mutex_unlock(&open_lock);
 	mutex_unlock(&open_lock);
+	unlock_kernel();
 
 
 	return 0;
 	return 0;
 }
 }
@@ -3676,6 +3645,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
 	int res = -EBUSY;
 	int res = -EBUSY;
 	char *tmp;
 	char *tmp;
 
 
+	lock_kernel();
 	mutex_lock(&open_lock);
 	mutex_lock(&open_lock);
 	old_dev = UDRS->fd_device;
 	old_dev = UDRS->fd_device;
 	if (opened_bdev[drive] && opened_bdev[drive] != bdev)
 	if (opened_bdev[drive] && opened_bdev[drive] != bdev)
@@ -3752,6 +3722,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
 			goto out;
 			goto out;
 	}
 	}
 	mutex_unlock(&open_lock);
 	mutex_unlock(&open_lock);
+	unlock_kernel();
 	return 0;
 	return 0;
 out:
 out:
 	if (UDRS->fd_ref < 0)
 	if (UDRS->fd_ref < 0)
@@ -3762,6 +3733,7 @@ out:
 		opened_bdev[drive] = NULL;
 		opened_bdev[drive] = NULL;
 out2:
 out2:
 	mutex_unlock(&open_lock);
 	mutex_unlock(&open_lock);
+	unlock_kernel();
 	return res;
 	return res;
 }
 }
 
 
@@ -3829,6 +3801,7 @@ static int __floppy_read_block_0(struct block_device *bdev)
 	bio.bi_size = size;
 	bio.bi_size = size;
 	bio.bi_bdev = bdev;
 	bio.bi_bdev = bdev;
 	bio.bi_sector = 0;
 	bio.bi_sector = 0;
+	bio.bi_flags = BIO_QUIET;
 	init_completion(&complete);
 	init_completion(&complete);
 	bio.bi_private = &complete;
 	bio.bi_private = &complete;
 	bio.bi_end_io = floppy_rb0_complete;
 	bio.bi_end_io = floppy_rb0_complete;
@@ -3857,10 +3830,10 @@ static int floppy_revalidate(struct gendisk *disk)
 	if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
 	if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
 	    test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
 	    test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
 	    test_bit(drive, &fake_change) || NO_GEOM) {
 	    test_bit(drive, &fake_change) || NO_GEOM) {
-		if (usage_count == 0) {
-			pr_info("VFS: revalidate called on non-open device.\n");
+		if (WARN(atomic_read(&usage_count) == 0,
+			 "VFS: revalidate called on non-open device.\n"))
 			return -EFAULT;
 			return -EFAULT;
-		}
+
 		lock_fdc(drive, false);
 		lock_fdc(drive, false);
 		cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
 		cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
 		      test_bit(FD_VERIFY_BIT, &UDRS->flags));
 		      test_bit(FD_VERIFY_BIT, &UDRS->flags));
@@ -3893,7 +3866,7 @@ static const struct block_device_operations floppy_fops = {
 	.owner			= THIS_MODULE,
 	.owner			= THIS_MODULE,
 	.open			= floppy_open,
 	.open			= floppy_open,
 	.release		= floppy_release,
 	.release		= floppy_release,
-	.locked_ioctl		= fd_ioctl,
+	.ioctl			= fd_ioctl,
 	.getgeo			= fd_getgeo,
 	.getgeo			= fd_getgeo,
 	.media_changed		= check_floppy_change,
 	.media_changed		= check_floppy_change,
 	.revalidate_disk	= floppy_revalidate,
 	.revalidate_disk	= floppy_revalidate,
@@ -4126,7 +4099,7 @@ static ssize_t floppy_cmos_show(struct device *dev,
 	return sprintf(buf, "%X\n", UDP->cmos);
 	return sprintf(buf, "%X\n", UDP->cmos);
 }
 }
 
 
-DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL);
+static DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL);
 
 
 static void floppy_device_release(struct device *dev)
 static void floppy_device_release(struct device *dev)
 {
 {
@@ -4175,6 +4148,9 @@ static int __init floppy_init(void)
 	int i, unit, drive;
 	int i, unit, drive;
 	int err, dr;
 	int err, dr;
 
 
+	set_debugt();
+	interruptjiffies = resultjiffies = jiffies;
+
 #if defined(CONFIG_PPC)
 #if defined(CONFIG_PPC)
 	if (check_legacy_ioport(FDC1))
 	if (check_legacy_ioport(FDC1))
 		return -ENODEV;
 		return -ENODEV;
@@ -4353,7 +4329,7 @@ out_unreg_platform_dev:
 	platform_device_unregister(&floppy_device[drive]);
 	platform_device_unregister(&floppy_device[drive]);
 out_flush_work:
 out_flush_work:
 	flush_scheduled_work();
 	flush_scheduled_work();
-	if (usage_count)
+	if (atomic_read(&usage_count))
 		floppy_release_irq_and_dma();
 		floppy_release_irq_and_dma();
 out_unreg_region:
 out_unreg_region:
 	blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
 	blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
@@ -4370,8 +4346,6 @@ out_put_disk:
 	return err;
 	return err;
 }
 }
 
 
-static DEFINE_SPINLOCK(floppy_usage_lock);
-
 static const struct io_region {
 static const struct io_region {
 	int offset;
 	int offset;
 	int size;
 	int size;
@@ -4417,14 +4391,8 @@ static void floppy_release_regions(int fdc)
 
 
 static int floppy_grab_irq_and_dma(void)
 static int floppy_grab_irq_and_dma(void)
 {
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&floppy_usage_lock, flags);
-	if (usage_count++) {
-		spin_unlock_irqrestore(&floppy_usage_lock, flags);
+	if (atomic_inc_return(&usage_count) > 1)
 		return 0;
 		return 0;
-	}
-	spin_unlock_irqrestore(&floppy_usage_lock, flags);
 
 
 	/*
 	/*
 	 * We might have scheduled a free_irq(), wait it to
 	 * We might have scheduled a free_irq(), wait it to
@@ -4435,9 +4403,7 @@ static int floppy_grab_irq_and_dma(void)
 	if (fd_request_irq()) {
 	if (fd_request_irq()) {
 		DPRINT("Unable to grab IRQ%d for the floppy driver\n",
 		DPRINT("Unable to grab IRQ%d for the floppy driver\n",
 		       FLOPPY_IRQ);
 		       FLOPPY_IRQ);
-		spin_lock_irqsave(&floppy_usage_lock, flags);
-		usage_count--;
-		spin_unlock_irqrestore(&floppy_usage_lock, flags);
+		atomic_dec(&usage_count);
 		return -1;
 		return -1;
 	}
 	}
 	if (fd_request_dma()) {
 	if (fd_request_dma()) {
@@ -4447,9 +4413,7 @@ static int floppy_grab_irq_and_dma(void)
 			use_virtual_dma = can_use_virtual_dma = 1;
 			use_virtual_dma = can_use_virtual_dma = 1;
 		if (!(can_use_virtual_dma & 1)) {
 		if (!(can_use_virtual_dma & 1)) {
 			fd_free_irq();
 			fd_free_irq();
-			spin_lock_irqsave(&floppy_usage_lock, flags);
-			usage_count--;
-			spin_unlock_irqrestore(&floppy_usage_lock, flags);
+			atomic_dec(&usage_count);
 			return -1;
 			return -1;
 		}
 		}
 	}
 	}
@@ -4484,9 +4448,7 @@ cleanup:
 	fd_free_dma();
 	fd_free_dma();
 	while (--fdc >= 0)
 	while (--fdc >= 0)
 		floppy_release_regions(fdc);
 		floppy_release_regions(fdc);
-	spin_lock_irqsave(&floppy_usage_lock, flags);
-	usage_count--;
-	spin_unlock_irqrestore(&floppy_usage_lock, flags);
+	atomic_dec(&usage_count);
 	return -1;
 	return -1;
 }
 }
 
 
@@ -4498,14 +4460,10 @@ static void floppy_release_irq_and_dma(void)
 #endif
 #endif
 	long tmpsize;
 	long tmpsize;
 	unsigned long tmpaddr;
 	unsigned long tmpaddr;
-	unsigned long flags;
 
 
-	spin_lock_irqsave(&floppy_usage_lock, flags);
-	if (--usage_count) {
-		spin_unlock_irqrestore(&floppy_usage_lock, flags);
+	if (!atomic_dec_and_test(&usage_count))
 		return;
 		return;
-	}
-	spin_unlock_irqrestore(&floppy_usage_lock, flags);
+
 	if (irqdma_allocated) {
 	if (irqdma_allocated) {
 		fd_disable_dma();
 		fd_disable_dma();
 		fd_free_dma();
 		fd_free_dma();
@@ -4598,7 +4556,7 @@ static void __exit floppy_module_exit(void)
 	del_timer_sync(&fd_timer);
 	del_timer_sync(&fd_timer);
 	blk_cleanup_queue(floppy_queue);
 	blk_cleanup_queue(floppy_queue);
 
 
-	if (usage_count)
+	if (atomic_read(&usage_count))
 		floppy_release_irq_and_dma();
 		floppy_release_irq_and_dma();
 
 
 	/* eject disk, if any */
 	/* eject disk, if any */

+ 1 - 1
drivers/block/hd.c

@@ -627,7 +627,7 @@ repeat:
 		req_data_dir(req) == READ ? "read" : "writ",
 		req_data_dir(req) == READ ? "read" : "writ",
 		cyl, head, sec, nsect, req->buffer);
 		cyl, head, sec, nsect, req->buffer);
 #endif
 #endif
-	if (blk_fs_request(req)) {
+	if (req->cmd_type == REQ_TYPE_FS) {
 		switch (rq_data_dir(req)) {
 		switch (rq_data_dir(req)) {
 		case READ:
 		case READ:
 			hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
 			hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,

+ 7 - 2
drivers/block/loop.c

@@ -67,6 +67,7 @@
 #include <linux/compat.h>
 #include <linux/compat.h>
 #include <linux/suspend.h>
 #include <linux/suspend.h>
 #include <linux/freezer.h>
 #include <linux/freezer.h>
+#include <linux/smp_lock.h>
 #include <linux/writeback.h>
 #include <linux/writeback.h>
 #include <linux/buffer_head.h>		/* for invalidate_bdev() */
 #include <linux/buffer_head.h>		/* for invalidate_bdev() */
 #include <linux/completion.h>
 #include <linux/completion.h>
@@ -476,7 +477,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
 	pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 	pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 
 
 	if (bio_rw(bio) == WRITE) {
 	if (bio_rw(bio) == WRITE) {
-		bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
+		bool barrier = (bio->bi_rw & REQ_HARDBARRIER);
 		struct file *file = lo->lo_backing_file;
 		struct file *file = lo->lo_backing_file;
 
 
 		if (barrier) {
 		if (barrier) {
@@ -831,7 +832,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 	lo->lo_queue->unplug_fn = loop_unplug;
 	lo->lo_queue->unplug_fn = loop_unplug;
 
 
 	if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
 	if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
-		blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN, NULL);
+		blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN);
 
 
 	set_capacity(lo->lo_disk, size);
 	set_capacity(lo->lo_disk, size);
 	bd_set_size(bdev, size << 9);
 	bd_set_size(bdev, size << 9);
@@ -1408,9 +1409,11 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
 {
 {
 	struct loop_device *lo = bdev->bd_disk->private_data;
 	struct loop_device *lo = bdev->bd_disk->private_data;
 
 
+	lock_kernel();
 	mutex_lock(&lo->lo_ctl_mutex);
 	mutex_lock(&lo->lo_ctl_mutex);
 	lo->lo_refcnt++;
 	lo->lo_refcnt++;
 	mutex_unlock(&lo->lo_ctl_mutex);
 	mutex_unlock(&lo->lo_ctl_mutex);
+	unlock_kernel();
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1420,6 +1423,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
 	struct loop_device *lo = disk->private_data;
 	struct loop_device *lo = disk->private_data;
 	int err;
 	int err;
 
 
+	lock_kernel();
 	mutex_lock(&lo->lo_ctl_mutex);
 	mutex_lock(&lo->lo_ctl_mutex);
 
 
 	if (--lo->lo_refcnt)
 	if (--lo->lo_refcnt)
@@ -1444,6 +1448,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
 out:
 out:
 	mutex_unlock(&lo->lo_ctl_mutex);
 	mutex_unlock(&lo->lo_ctl_mutex);
 out_unlocked:
 out_unlocked:
+	lock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 

+ 2 - 2
drivers/block/mg_disk.c

@@ -670,7 +670,7 @@ static void mg_request_poll(struct request_queue *q)
 				break;
 				break;
 		}
 		}
 
 
-		if (unlikely(!blk_fs_request(host->req))) {
+		if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) {
 			mg_end_request_cur(host, -EIO);
 			mg_end_request_cur(host, -EIO);
 			continue;
 			continue;
 		}
 		}
@@ -756,7 +756,7 @@ static void mg_request(struct request_queue *q)
 			continue;
 			continue;
 		}
 		}
 
 
-		if (unlikely(!blk_fs_request(req))) {
+		if (unlikely(req->cmd_type != REQ_TYPE_FS)) {
 			mg_end_request_cur(host, -EIO);
 			mg_end_request_cur(host, -EIO);
 			continue;
 			continue;
 		}
 		}

+ 5 - 2
drivers/block/nbd.c

@@ -24,6 +24,7 @@
 #include <linux/errno.h>
 #include <linux/errno.h>
 #include <linux/file.h>
 #include <linux/file.h>
 #include <linux/ioctl.h>
 #include <linux/ioctl.h>
+#include <linux/smp_lock.h>
 #include <linux/compiler.h>
 #include <linux/compiler.h>
 #include <linux/err.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
@@ -448,7 +449,7 @@ static void nbd_clear_que(struct nbd_device *lo)
 
 
 static void nbd_handle_req(struct nbd_device *lo, struct request *req)
 static void nbd_handle_req(struct nbd_device *lo, struct request *req)
 {
 {
-	if (!blk_fs_request(req))
+	if (req->cmd_type != REQ_TYPE_FS)
 		goto error_out;
 		goto error_out;
 
 
 	nbd_cmd(req) = NBD_CMD_READ;
 	nbd_cmd(req) = NBD_CMD_READ;
@@ -716,9 +717,11 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
 	dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
 	dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
 			lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
 			lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
 
 
+	lock_kernel();
 	mutex_lock(&lo->tx_lock);
 	mutex_lock(&lo->tx_lock);
 	error = __nbd_ioctl(bdev, lo, cmd, arg);
 	error = __nbd_ioctl(bdev, lo, cmd, arg);
 	mutex_unlock(&lo->tx_lock);
 	mutex_unlock(&lo->tx_lock);
+	unlock_kernel();
 
 
 	return error;
 	return error;
 }
 }
@@ -726,7 +729,7 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
 static const struct block_device_operations nbd_fops =
 static const struct block_device_operations nbd_fops =
 {
 {
 	.owner =	THIS_MODULE,
 	.owner =	THIS_MODULE,
-	.locked_ioctl =	nbd_ioctl,
+	.ioctl =	nbd_ioctl,
 };
 };
 
 
 /*
 /*

+ 4 - 11
drivers/block/osdblk.c

@@ -310,7 +310,8 @@ static void osdblk_rq_fn(struct request_queue *q)
 			break;
 			break;
 
 
 		/* filter out block requests we don't understand */
 		/* filter out block requests we don't understand */
-		if (!blk_fs_request(rq) && !blk_barrier_rq(rq)) {
+		if (rq->cmd_type != REQ_TYPE_FS &&
+		    !(rq->cmd_flags & REQ_HARDBARRIER)) {
 			blk_end_request_all(rq, 0);
 			blk_end_request_all(rq, 0);
 			continue;
 			continue;
 		}
 		}
@@ -322,7 +323,7 @@ static void osdblk_rq_fn(struct request_queue *q)
 		 * driver-specific, etc.
 		 * driver-specific, etc.
 		 */
 		 */
 
 
-		do_flush = (rq->special == (void *) 0xdeadbeefUL);
+		do_flush = rq->cmd_flags & REQ_FLUSH;
 		do_write = (rq_data_dir(rq) == WRITE);
 		do_write = (rq_data_dir(rq) == WRITE);
 
 
 		if (!do_flush) { /* osd_flush does not use a bio */
 		if (!do_flush) { /* osd_flush does not use a bio */
@@ -379,14 +380,6 @@ static void osdblk_rq_fn(struct request_queue *q)
 	}
 	}
 }
 }
 
 
-static void osdblk_prepare_flush(struct request_queue *q, struct request *rq)
-{
-	/* add driver-specific marker, to indicate that this request
-	 * is a flush command
-	 */
-	rq->special = (void *) 0xdeadbeefUL;
-}
-
 static void osdblk_free_disk(struct osdblk_device *osdev)
 static void osdblk_free_disk(struct osdblk_device *osdev)
 {
 {
 	struct gendisk *disk = osdev->disk;
 	struct gendisk *disk = osdev->disk;
@@ -446,7 +439,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
 	blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
 	blk_queue_stack_limits(q, osd_request_queue(osdev->osd));
 
 
 	blk_queue_prep_rq(q, blk_queue_start_tag);
 	blk_queue_prep_rq(q, blk_queue_start_tag);
-	blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, osdblk_prepare_flush);
+	blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
 
 
 	disk->queue = q;
 	disk->queue = q;
 
 

+ 18 - 3
drivers/block/paride/pcd.c

@@ -138,6 +138,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
 #include <linux/cdrom.h>
 #include <linux/cdrom.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
+#include <linux/smp_lock.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 
 
 static DEFINE_SPINLOCK(pcd_lock);
 static DEFINE_SPINLOCK(pcd_lock);
@@ -224,13 +225,21 @@ static char *pcd_buf;		/* buffer for request in progress */
 static int pcd_block_open(struct block_device *bdev, fmode_t mode)
 static int pcd_block_open(struct block_device *bdev, fmode_t mode)
 {
 {
 	struct pcd_unit *cd = bdev->bd_disk->private_data;
 	struct pcd_unit *cd = bdev->bd_disk->private_data;
-	return cdrom_open(&cd->info, bdev, mode);
+	int ret;
+
+	lock_kernel();
+	ret = cdrom_open(&cd->info, bdev, mode);
+	unlock_kernel();
+
+	return ret;
 }
 }
 
 
 static int pcd_block_release(struct gendisk *disk, fmode_t mode)
 static int pcd_block_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct pcd_unit *cd = disk->private_data;
 	struct pcd_unit *cd = disk->private_data;
+	lock_kernel();
 	cdrom_release(&cd->info, mode);
 	cdrom_release(&cd->info, mode);
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 
@@ -238,7 +247,13 @@ static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
 				unsigned cmd, unsigned long arg)
 				unsigned cmd, unsigned long arg)
 {
 {
 	struct pcd_unit *cd = bdev->bd_disk->private_data;
 	struct pcd_unit *cd = bdev->bd_disk->private_data;
-	return cdrom_ioctl(&cd->info, bdev, mode, cmd, arg);
+	int ret;
+
+	lock_kernel();
+	ret = cdrom_ioctl(&cd->info, bdev, mode, cmd, arg);
+	unlock_kernel();
+
+	return ret;
 }
 }
 
 
 static int pcd_block_media_changed(struct gendisk *disk)
 static int pcd_block_media_changed(struct gendisk *disk)
@@ -251,7 +266,7 @@ static const struct block_device_operations pcd_bdops = {
 	.owner		= THIS_MODULE,
 	.owner		= THIS_MODULE,
 	.open		= pcd_block_open,
 	.open		= pcd_block_open,
 	.release	= pcd_block_release,
 	.release	= pcd_block_release,
-	.locked_ioctl	= pcd_block_ioctl,
+	.ioctl		= pcd_block_ioctl,
 	.media_changed	= pcd_block_media_changed,
 	.media_changed	= pcd_block_media_changed,
 };
 };
 
 

+ 9 - 2
drivers/block/paride/pd.c

@@ -153,6 +153,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
 #include <linux/blkpg.h>
 #include <linux/blkpg.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
+#include <linux/smp_lock.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 #include <linux/workqueue.h>
 #include <linux/workqueue.h>
 
 
@@ -439,7 +440,7 @@ static char *pd_buf;		/* buffer for request in progress */
 
 
 static enum action do_pd_io_start(void)
 static enum action do_pd_io_start(void)
 {
 {
-	if (blk_special_request(pd_req)) {
+	if (pd_req->cmd_type == REQ_TYPE_SPECIAL) {
 		phase = pd_special;
 		phase = pd_special;
 		return pd_special();
 		return pd_special();
 	}
 	}
@@ -735,12 +736,14 @@ static int pd_open(struct block_device *bdev, fmode_t mode)
 {
 {
 	struct pd_unit *disk = bdev->bd_disk->private_data;
 	struct pd_unit *disk = bdev->bd_disk->private_data;
 
 
+	lock_kernel();
 	disk->access++;
 	disk->access++;
 
 
 	if (disk->removable) {
 	if (disk->removable) {
 		pd_special_command(disk, pd_media_check);
 		pd_special_command(disk, pd_media_check);
 		pd_special_command(disk, pd_door_lock);
 		pd_special_command(disk, pd_door_lock);
 	}
 	}
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 
@@ -768,8 +771,10 @@ static int pd_ioctl(struct block_device *bdev, fmode_t mode,
 
 
 	switch (cmd) {
 	switch (cmd) {
 	case CDROMEJECT:
 	case CDROMEJECT:
+		lock_kernel();
 		if (disk->access == 1)
 		if (disk->access == 1)
 			pd_special_command(disk, pd_eject);
 			pd_special_command(disk, pd_eject);
+		unlock_kernel();
 		return 0;
 		return 0;
 	default:
 	default:
 		return -EINVAL;
 		return -EINVAL;
@@ -780,8 +785,10 @@ static int pd_release(struct gendisk *p, fmode_t mode)
 {
 {
 	struct pd_unit *disk = p->private_data;
 	struct pd_unit *disk = p->private_data;
 
 
+	lock_kernel();
 	if (!--disk->access && disk->removable)
 	if (!--disk->access && disk->removable)
 		pd_special_command(disk, pd_door_unlock);
 		pd_special_command(disk, pd_door_unlock);
+	unlock_kernel();
 
 
 	return 0;
 	return 0;
 }
 }
@@ -812,7 +819,7 @@ static const struct block_device_operations pd_fops = {
 	.owner		= THIS_MODULE,
 	.owner		= THIS_MODULE,
 	.open		= pd_open,
 	.open		= pd_open,
 	.release	= pd_release,
 	.release	= pd_release,
-	.locked_ioctl	= pd_ioctl,
+	.ioctl		= pd_ioctl,
 	.getgeo		= pd_getgeo,
 	.getgeo		= pd_getgeo,
 	.media_changed	= pd_check_media,
 	.media_changed	= pd_check_media,
 	.revalidate_disk= pd_revalidate
 	.revalidate_disk= pd_revalidate

+ 20 - 6
drivers/block/paride/pf.c

@@ -152,6 +152,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
 #include <linux/blkpg.h>
 #include <linux/blkpg.h>
+#include <linux/smp_lock.h>
 #include <asm/uaccess.h>
 #include <asm/uaccess.h>
 
 
 static DEFINE_SPINLOCK(pf_spin_lock);
 static DEFINE_SPINLOCK(pf_spin_lock);
@@ -266,7 +267,7 @@ static const struct block_device_operations pf_fops = {
 	.owner		= THIS_MODULE,
 	.owner		= THIS_MODULE,
 	.open		= pf_open,
 	.open		= pf_open,
 	.release	= pf_release,
 	.release	= pf_release,
-	.locked_ioctl	= pf_ioctl,
+	.ioctl		= pf_ioctl,
 	.getgeo		= pf_getgeo,
 	.getgeo		= pf_getgeo,
 	.media_changed	= pf_check_media,
 	.media_changed	= pf_check_media,
 };
 };
@@ -299,20 +300,26 @@ static void __init pf_init_units(void)
 static int pf_open(struct block_device *bdev, fmode_t mode)
 static int pf_open(struct block_device *bdev, fmode_t mode)
 {
 {
 	struct pf_unit *pf = bdev->bd_disk->private_data;
 	struct pf_unit *pf = bdev->bd_disk->private_data;
+	int ret;
 
 
+	lock_kernel();
 	pf_identify(pf);
 	pf_identify(pf);
 
 
+	ret = -ENODEV;
 	if (pf->media_status == PF_NM)
 	if (pf->media_status == PF_NM)
-		return -ENODEV;
+		goto out;
 
 
+	ret = -EROFS;
 	if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE))
 	if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE))
-		return -EROFS;
+		goto out;
 
 
+	ret = 0;
 	pf->access++;
 	pf->access++;
 	if (pf->removable)
 	if (pf->removable)
 		pf_lock(pf, 1);
 		pf_lock(pf, 1);
-
-	return 0;
+out:
+	unlock_kernel();
+	return ret;
 }
 }
 
 
 static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -342,7 +349,10 @@ static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, u
 
 
 	if (pf->access != 1)
 	if (pf->access != 1)
 		return -EBUSY;
 		return -EBUSY;
+	lock_kernel();
 	pf_eject(pf);
 	pf_eject(pf);
+	unlock_kernel();
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -350,14 +360,18 @@ static int pf_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct pf_unit *pf = disk->private_data;
 	struct pf_unit *pf = disk->private_data;
 
 
-	if (pf->access <= 0)
+	lock_kernel();
+	if (pf->access <= 0) {
+		unlock_kernel();
 		return -EINVAL;
 		return -EINVAL;
+	}
 
 
 	pf->access--;
 	pf->access--;
 
 
 	if (!pf->access && pf->removable)
 	if (!pf->access && pf->removable)
 		pf_lock(pf, 0);
 		pf_lock(pf, 0);
 
 
+	unlock_kernel();
 	return 0;
 	return 0;
 
 
 }
 }

+ 15 - 5
drivers/block/pktcdvd.c

@@ -57,6 +57,7 @@
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
 #include <linux/miscdevice.h>
 #include <linux/miscdevice.h>
 #include <linux/freezer.h>
 #include <linux/freezer.h>
+#include <linux/smp_lock.h>
 #include <linux/mutex.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_cmnd.h>
@@ -1221,7 +1222,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
 	pkt->bio->bi_flags = 1 << BIO_UPTODATE;
 	pkt->bio->bi_flags = 1 << BIO_UPTODATE;
 	pkt->bio->bi_idx = 0;
 	pkt->bio->bi_idx = 0;
 
 
-	BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));
+	BUG_ON(pkt->bio->bi_rw != REQ_WRITE);
 	BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
 	BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
 	BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
 	BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
 	BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
 	BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
@@ -2382,6 +2383,7 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
 
 
 	VPRINTK(DRIVER_NAME": entering open\n");
 	VPRINTK(DRIVER_NAME": entering open\n");
 
 
+	lock_kernel();
 	mutex_lock(&ctl_mutex);
 	mutex_lock(&ctl_mutex);
 	pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
 	pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
 	if (!pd) {
 	if (!pd) {
@@ -2409,6 +2411,7 @@ static int pkt_open(struct block_device *bdev, fmode_t mode)
 	}
 	}
 
 
 	mutex_unlock(&ctl_mutex);
 	mutex_unlock(&ctl_mutex);
+	unlock_kernel();
 	return 0;
 	return 0;
 
 
 out_dec:
 out_dec:
@@ -2416,6 +2419,7 @@ out_dec:
 out:
 out:
 	VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
 	VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
 	mutex_unlock(&ctl_mutex);
 	mutex_unlock(&ctl_mutex);
+	unlock_kernel();
 	return ret;
 	return ret;
 }
 }
 
 
@@ -2424,6 +2428,7 @@ static int pkt_close(struct gendisk *disk, fmode_t mode)
 	struct pktcdvd_device *pd = disk->private_data;
 	struct pktcdvd_device *pd = disk->private_data;
 	int ret = 0;
 	int ret = 0;
 
 
+	lock_kernel();
 	mutex_lock(&ctl_mutex);
 	mutex_lock(&ctl_mutex);
 	pd->refcnt--;
 	pd->refcnt--;
 	BUG_ON(pd->refcnt < 0);
 	BUG_ON(pd->refcnt < 0);
@@ -2432,6 +2437,7 @@ static int pkt_close(struct gendisk *disk, fmode_t mode)
 		pkt_release_dev(pd, flush);
 		pkt_release_dev(pd, flush);
 	}
 	}
 	mutex_unlock(&ctl_mutex);
 	mutex_unlock(&ctl_mutex);
+	unlock_kernel();
 	return ret;
 	return ret;
 }
 }
 
 
@@ -2762,10 +2768,12 @@ out_mem:
 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
 {
 {
 	struct pktcdvd_device *pd = bdev->bd_disk->private_data;
 	struct pktcdvd_device *pd = bdev->bd_disk->private_data;
+	int ret;
 
 
 	VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
 	VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
 		MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
 		MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
 
 
+	lock_kernel();
 	switch (cmd) {
 	switch (cmd) {
 	case CDROMEJECT:
 	case CDROMEJECT:
 		/*
 		/*
@@ -2783,14 +2791,16 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
 	case CDROM_LAST_WRITTEN:
 	case CDROM_LAST_WRITTEN:
 	case CDROM_SEND_PACKET:
 	case CDROM_SEND_PACKET:
 	case SCSI_IOCTL_SEND_COMMAND:
 	case SCSI_IOCTL_SEND_COMMAND:
-		return __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
+		ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
+		break;
 
 
 	default:
 	default:
 		VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
 		VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
-		return -ENOTTY;
+		ret = -ENOTTY;
 	}
 	}
+	unlock_kernel();
 
 
-	return 0;
+	return ret;
 }
 }
 
 
 static int pkt_media_changed(struct gendisk *disk)
 static int pkt_media_changed(struct gendisk *disk)
@@ -2812,7 +2822,7 @@ static const struct block_device_operations pktcdvd_ops = {
 	.owner =		THIS_MODULE,
 	.owner =		THIS_MODULE,
 	.open =			pkt_open,
 	.open =			pkt_open,
 	.release =		pkt_close,
 	.release =		pkt_close,
-	.locked_ioctl =		pkt_ioctl,
+	.ioctl =		pkt_ioctl,
 	.media_changed =	pkt_media_changed,
 	.media_changed =	pkt_media_changed,
 };
 };
 
 

+ 6 - 19
drivers/block/ps3disk.c

@@ -196,13 +196,12 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
 	dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
 	dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
 
 
 	while ((req = blk_fetch_request(q))) {
 	while ((req = blk_fetch_request(q))) {
-		if (blk_fs_request(req)) {
-			if (ps3disk_submit_request_sg(dev, req))
-				break;
-		} else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
-			   req->cmd[0] == REQ_LB_OP_FLUSH) {
+		if (req->cmd_flags & REQ_FLUSH) {
 			if (ps3disk_submit_flush_request(dev, req))
 			if (ps3disk_submit_flush_request(dev, req))
 				break;
 				break;
+		} else if (req->cmd_type == REQ_TYPE_FS) {
+			if (ps3disk_submit_request_sg(dev, req))
+				break;
 		} else {
 		} else {
 			blk_dump_rq_flags(req, DEVICE_NAME " bad request");
 			blk_dump_rq_flags(req, DEVICE_NAME " bad request");
 			__blk_end_request_all(req, -EIO);
 			__blk_end_request_all(req, -EIO);
@@ -257,8 +256,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
 		return IRQ_HANDLED;
 		return IRQ_HANDLED;
 	}
 	}
 
 
-	if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
-	    req->cmd[0] == REQ_LB_OP_FLUSH) {
+	if (req->cmd_flags & REQ_FLUSH) {
 		read = 0;
 		read = 0;
 		op = "flush";
 		op = "flush";
 	} else {
 	} else {
@@ -398,16 +396,6 @@ static int ps3disk_identify(struct ps3_storage_device *dev)
 	return 0;
 	return 0;
 }
 }
 
 
-static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
-{
-	struct ps3_storage_device *dev = q->queuedata;
-
-	dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
-
-	req->cmd_type = REQ_TYPE_LINUX_BLOCK;
-	req->cmd[0] = REQ_LB_OP_FLUSH;
-}
-
 static unsigned long ps3disk_mask;
 static unsigned long ps3disk_mask;
 
 
 static DEFINE_MUTEX(ps3disk_mask_mutex);
 static DEFINE_MUTEX(ps3disk_mask_mutex);
@@ -480,8 +468,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
 	blk_queue_dma_alignment(queue, dev->blk_size-1);
 	blk_queue_dma_alignment(queue, dev->blk_size-1);
 	blk_queue_logical_block_size(queue, dev->blk_size);
 	blk_queue_logical_block_size(queue, dev->blk_size);
 
 
-	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
-			  ps3disk_prepare_flush);
+	blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH);
 
 
 	blk_queue_max_segments(queue, -1);
 	blk_queue_max_segments(queue, -1);
 	blk_queue_max_segment_size(queue, dev->bounce_size);
 	blk_queue_max_segment_size(queue, dev->bounce_size);

+ 18 - 2
drivers/block/swim.c

@@ -20,6 +20,7 @@
 #include <linux/fd.h>
 #include <linux/fd.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
+#include <linux/smp_lock.h>
 #include <linux/hdreg.h>
 #include <linux/hdreg.h>
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
@@ -661,11 +662,23 @@ out:
 	return err;
 	return err;
 }
 }
 
 
+static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+	int ret;
+
+	lock_kernel();
+	ret = floppy_open(bdev, mode);
+	unlock_kernel();
+
+	return ret;
+}
+
 static int floppy_release(struct gendisk *disk, fmode_t mode)
 static int floppy_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct floppy_state *fs = disk->private_data;
 	struct floppy_state *fs = disk->private_data;
 	struct swim __iomem *base = fs->swd->base;
 	struct swim __iomem *base = fs->swd->base;
 
 
+	lock_kernel();
 	if (fs->ref_count < 0)
 	if (fs->ref_count < 0)
 		fs->ref_count = 0;
 		fs->ref_count = 0;
 	else if (fs->ref_count > 0)
 	else if (fs->ref_count > 0)
@@ -673,6 +686,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
 
 
 	if (fs->ref_count == 0)
 	if (fs->ref_count == 0)
 		swim_motor(base, OFF);
 		swim_motor(base, OFF);
+	unlock_kernel();
 
 
 	return 0;
 	return 0;
 }
 }
@@ -690,7 +704,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
 	case FDEJECT:
 	case FDEJECT:
 		if (fs->ref_count != 1)
 		if (fs->ref_count != 1)
 			return -EBUSY;
 			return -EBUSY;
+		lock_kernel();
 		err = floppy_eject(fs);
 		err = floppy_eject(fs);
+		unlock_kernel();
 		return err;
 		return err;
 
 
 	case FDGETPRM:
 	case FDGETPRM:
@@ -751,9 +767,9 @@ static int floppy_revalidate(struct gendisk *disk)
 
 
 static const struct block_device_operations floppy_fops = {
 static const struct block_device_operations floppy_fops = {
 	.owner		 = THIS_MODULE,
 	.owner		 = THIS_MODULE,
-	.open		 = floppy_open,
+	.open		 = floppy_unlocked_open,
 	.release	 = floppy_release,
 	.release	 = floppy_release,
-	.locked_ioctl	 = floppy_ioctl,
+	.ioctl		 = floppy_ioctl,
 	.getgeo		 = floppy_getgeo,
 	.getgeo		 = floppy_getgeo,
 	.media_changed	 = floppy_check_change,
 	.media_changed	 = floppy_check_change,
 	.revalidate_disk = floppy_revalidate,
 	.revalidate_disk = floppy_revalidate,

+ 29 - 3
drivers/block/swim3.c

@@ -25,6 +25,7 @@
 #include <linux/ioctl.h>
 #include <linux/ioctl.h>
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
+#include <linux/smp_lock.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <asm/io.h>
 #include <asm/io.h>
@@ -839,7 +840,7 @@ static int fd_eject(struct floppy_state *fs)
 static struct floppy_struct floppy_type =
 static struct floppy_struct floppy_type =
 	{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL };	/*  7 1.44MB 3.5"   */
 	{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL };	/*  7 1.44MB 3.5"   */
 
 
-static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
+static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode,
 			unsigned int cmd, unsigned long param)
 			unsigned int cmd, unsigned long param)
 {
 {
 	struct floppy_state *fs = bdev->bd_disk->private_data;
 	struct floppy_state *fs = bdev->bd_disk->private_data;
@@ -867,6 +868,18 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
 	return -ENOTTY;
 	return -ENOTTY;
 }
 }
 
 
+static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
+				 unsigned int cmd, unsigned long param)
+{
+	int ret;
+
+	lock_kernel();
+	ret = floppy_locked_ioctl(bdev, mode, cmd, param);
+	unlock_kernel();
+
+	return ret;
+}
+
 static int floppy_open(struct block_device *bdev, fmode_t mode)
 static int floppy_open(struct block_device *bdev, fmode_t mode)
 {
 {
 	struct floppy_state *fs = bdev->bd_disk->private_data;
 	struct floppy_state *fs = bdev->bd_disk->private_data;
@@ -936,15 +949,28 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
 	return 0;
 	return 0;
 }
 }
 
 
+static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+	int ret;
+
+	lock_kernel();
+	ret = floppy_open(bdev, mode);
+	unlock_kernel();
+
+	return ret;
+}
+
 static int floppy_release(struct gendisk *disk, fmode_t mode)
 static int floppy_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct floppy_state *fs = disk->private_data;
 	struct floppy_state *fs = disk->private_data;
 	struct swim3 __iomem *sw = fs->swim3;
 	struct swim3 __iomem *sw = fs->swim3;
+	lock_kernel();
 	if (fs->ref_count > 0 && --fs->ref_count == 0) {
 	if (fs->ref_count > 0 && --fs->ref_count == 0) {
 		swim3_action(fs, MOTOR_OFF);
 		swim3_action(fs, MOTOR_OFF);
 		out_8(&sw->control_bic, 0xff);
 		out_8(&sw->control_bic, 0xff);
 		swim3_select(fs, RELAX);
 		swim3_select(fs, RELAX);
 	}
 	}
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 
@@ -995,9 +1021,9 @@ static int floppy_revalidate(struct gendisk *disk)
 }
 }
 
 
 static const struct block_device_operations floppy_fops = {
 static const struct block_device_operations floppy_fops = {
-	.open		= floppy_open,
+	.open		= floppy_unlocked_open,
 	.release	= floppy_release,
 	.release	= floppy_release,
-	.locked_ioctl	= floppy_ioctl,
+	.ioctl		= floppy_ioctl,
 	.media_changed	= floppy_check_change,
 	.media_changed	= floppy_check_change,
 	.revalidate_disk= floppy_revalidate,
 	.revalidate_disk= floppy_revalidate,
 };
 };

+ 28 - 7
drivers/block/ub.c

@@ -28,6 +28,7 @@
 #include <linux/timer.h>
 #include <linux/timer.h>
 #include <linux/scatterlist.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
+#include <linux/smp_lock.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi.h>
 
 
 #define DRV_NAME "ub"
 #define DRV_NAME "ub"
@@ -648,7 +649,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
 		return 0;
 		return 0;
 	}
 	}
 
 
-	if (lun->changed && !blk_pc_request(rq)) {
+	if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) {
 		blk_start_request(rq);
 		blk_start_request(rq);
 		ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
 		ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
 		return 0;
 		return 0;
@@ -684,7 +685,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
 	}
 	}
 	urq->nsg = n_elem;
 	urq->nsg = n_elem;
 
 
-	if (blk_pc_request(rq)) {
+	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 		ub_cmd_build_packet(sc, lun, cmd, urq);
 		ub_cmd_build_packet(sc, lun, cmd, urq);
 	} else {
 	} else {
 		ub_cmd_build_block(sc, lun, cmd, urq);
 		ub_cmd_build_block(sc, lun, cmd, urq);
@@ -781,7 +782,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 	rq = urq->rq;
 	rq = urq->rq;
 
 
 	if (cmd->error == 0) {
 	if (cmd->error == 0) {
-		if (blk_pc_request(rq)) {
+		if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 			if (cmd->act_len >= rq->resid_len)
 			if (cmd->act_len >= rq->resid_len)
 				rq->resid_len = 0;
 				rq->resid_len = 0;
 			else
 			else
@@ -795,7 +796,7 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
 			}
 			}
 		}
 		}
 	} else {
 	} else {
-		if (blk_pc_request(rq)) {
+		if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 			/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
 			/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
 			memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
 			memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
 			rq->sense_len = UB_SENSE_SIZE;
 			rq->sense_len = UB_SENSE_SIZE;
@@ -1710,6 +1711,18 @@ err_open:
 	return rc;
 	return rc;
 }
 }
 
 
+static int ub_bd_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+	int ret;
+
+	lock_kernel();
+	ret = ub_bd_open(bdev, mode);
+	unlock_kernel();
+
+	return ret;
+}
+
+
 /*
 /*
  */
  */
 static int ub_bd_release(struct gendisk *disk, fmode_t mode)
 static int ub_bd_release(struct gendisk *disk, fmode_t mode)
@@ -1717,7 +1730,10 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
 	struct ub_lun *lun = disk->private_data;
 	struct ub_lun *lun = disk->private_data;
 	struct ub_dev *sc = lun->udev;
 	struct ub_dev *sc = lun->udev;
 
 
+	lock_kernel();
 	ub_put(sc);
 	ub_put(sc);
+	unlock_kernel();
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1729,8 +1745,13 @@ static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
 {
 {
 	struct gendisk *disk = bdev->bd_disk;
 	struct gendisk *disk = bdev->bd_disk;
 	void __user *usermem = (void __user *) arg;
 	void __user *usermem = (void __user *) arg;
+	int ret;
+
+	lock_kernel();
+	ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
+	unlock_kernel();
 
 
-	return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
+	return ret;
 }
 }
 
 
 /*
 /*
@@ -1792,9 +1813,9 @@ static int ub_bd_media_changed(struct gendisk *disk)
 
 
 static const struct block_device_operations ub_bd_fops = {
 static const struct block_device_operations ub_bd_fops = {
 	.owner		= THIS_MODULE,
 	.owner		= THIS_MODULE,
-	.open		= ub_bd_open,
+	.open		= ub_bd_unlocked_open,
 	.release	= ub_bd_release,
 	.release	= ub_bd_release,
-	.locked_ioctl	= ub_bd_ioctl,
+	.ioctl		= ub_bd_ioctl,
 	.media_changed	= ub_bd_media_changed,
 	.media_changed	= ub_bd_media_changed,
 	.revalidate_disk = ub_bd_revalidate,
 	.revalidate_disk = ub_bd_revalidate,
 };
 };

+ 1 - 1
drivers/block/umem.c

@@ -478,7 +478,7 @@ static void process_page(unsigned long data)
 				le32_to_cpu(desc->local_addr)>>9,
 				le32_to_cpu(desc->local_addr)>>9,
 				le32_to_cpu(desc->transfer_size));
 				le32_to_cpu(desc->transfer_size));
 			dump_dmastat(card, control);
 			dump_dmastat(card, control);
-		} else if (test_bit(BIO_RW, &bio->bi_rw) &&
+		} else if ((bio->bi_rw & REQ_WRITE) &&
 			   le32_to_cpu(desc->local_addr) >> 9 ==
 			   le32_to_cpu(desc->local_addr) >> 9 ==
 				card->init_size) {
 				card->init_size) {
 			card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
 			card->init_size += le32_to_cpu(desc->transfer_size) >> 9;

+ 19 - 2
drivers/block/viodasd.c

@@ -41,6 +41,7 @@
 #include <linux/errno.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/string.h>
+#include <linux/smp_lock.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
 #include <linux/completion.h>
 #include <linux/completion.h>
 #include <linux/device.h>
 #include <linux/device.h>
@@ -175,6 +176,18 @@ static int viodasd_open(struct block_device *bdev, fmode_t mode)
 	return 0;
 	return 0;
 }
 }
 
 
+static int viodasd_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+	int ret;
+
+	lock_kernel();
+	ret = viodasd_open(bdev, mode);
+	unlock_kernel();
+
+	return ret;
+}
+
+
 /*
 /*
  * External release entry point.
  * External release entry point.
  */
  */
@@ -183,6 +196,7 @@ static int viodasd_release(struct gendisk *disk, fmode_t mode)
 	struct viodasd_device *d = disk->private_data;
 	struct viodasd_device *d = disk->private_data;
 	HvLpEvent_Rc hvrc;
 	HvLpEvent_Rc hvrc;
 
 
+	lock_kernel();
 	/* Send the event to OS/400.  We DON'T expect a response */
 	/* Send the event to OS/400.  We DON'T expect a response */
 	hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
 	hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
 			HvLpEvent_Type_VirtualIo,
 			HvLpEvent_Type_VirtualIo,
@@ -195,6 +209,9 @@ static int viodasd_release(struct gendisk *disk, fmode_t mode)
 			0, 0, 0);
 			0, 0, 0);
 	if (hvrc != 0)
 	if (hvrc != 0)
 		pr_warning("HV close call failed %d\n", (int)hvrc);
 		pr_warning("HV close call failed %d\n", (int)hvrc);
+
+	unlock_kernel();
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -219,7 +236,7 @@ static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  */
  */
 static const struct block_device_operations viodasd_fops = {
 static const struct block_device_operations viodasd_fops = {
 	.owner = THIS_MODULE,
 	.owner = THIS_MODULE,
-	.open = viodasd_open,
+	.open = viodasd_unlocked_open,
 	.release = viodasd_release,
 	.release = viodasd_release,
 	.getgeo = viodasd_getgeo,
 	.getgeo = viodasd_getgeo,
 };
 };
@@ -361,7 +378,7 @@ static void do_viodasd_request(struct request_queue *q)
 		if (req == NULL)
 		if (req == NULL)
 			return;
 			return;
 		/* check that request contains a valid command */
 		/* check that request contains a valid command */
-		if (!blk_fs_request(req)) {
+		if (req->cmd_type != REQ_TYPE_FS) {
 			viodasd_end_request(req, -EIO, blk_rq_sectors(req));
 			viodasd_end_request(req, -EIO, blk_rq_sectors(req));
 			continue;
 			continue;
 		}
 		}

+ 49 - 39
drivers/block/virtio_blk.c

@@ -2,6 +2,7 @@
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
+#include <linux/smp_lock.h>
 #include <linux/hdreg.h>
 #include <linux/hdreg.h>
 #include <linux/virtio.h>
 #include <linux/virtio.h>
 #include <linux/virtio_blk.h>
 #include <linux/virtio_blk.h>
@@ -65,13 +66,18 @@ static void blk_done(struct virtqueue *vq)
 			break;
 			break;
 		}
 		}
 
 
-		if (blk_pc_request(vbr->req)) {
+		switch (vbr->req->cmd_type) {
+		case REQ_TYPE_BLOCK_PC:
 			vbr->req->resid_len = vbr->in_hdr.residual;
 			vbr->req->resid_len = vbr->in_hdr.residual;
 			vbr->req->sense_len = vbr->in_hdr.sense_len;
 			vbr->req->sense_len = vbr->in_hdr.sense_len;
 			vbr->req->errors = vbr->in_hdr.errors;
 			vbr->req->errors = vbr->in_hdr.errors;
-		}
-		if (blk_special_request(vbr->req))
+			break;
+		case REQ_TYPE_SPECIAL:
 			vbr->req->errors = (error != 0);
 			vbr->req->errors = (error != 0);
+			break;
+		default:
+			break;
+		}
 
 
 		__blk_end_request_all(vbr->req, error);
 		__blk_end_request_all(vbr->req, error);
 		list_del(&vbr->list);
 		list_del(&vbr->list);
@@ -94,36 +100,35 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
 		return false;
 		return false;
 
 
 	vbr->req = req;
 	vbr->req = req;
-	switch (req->cmd_type) {
-	case REQ_TYPE_FS:
-		vbr->out_hdr.type = 0;
-		vbr->out_hdr.sector = blk_rq_pos(vbr->req);
-		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
-		break;
-	case REQ_TYPE_BLOCK_PC:
-		vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
-		vbr->out_hdr.sector = 0;
-		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
-		break;
-	case REQ_TYPE_SPECIAL:
-		vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
+
+	if (req->cmd_flags & REQ_FLUSH) {
+		vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
 		vbr->out_hdr.sector = 0;
 		vbr->out_hdr.sector = 0;
 		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
 		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
-		break;
-	case REQ_TYPE_LINUX_BLOCK:
-		if (req->cmd[0] == REQ_LB_OP_FLUSH) {
-			vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
+	} else {
+		switch (req->cmd_type) {
+		case REQ_TYPE_FS:
+			vbr->out_hdr.type = 0;
+			vbr->out_hdr.sector = blk_rq_pos(vbr->req);
+			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+			break;
+		case REQ_TYPE_BLOCK_PC:
+			vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
 			vbr->out_hdr.sector = 0;
 			vbr->out_hdr.sector = 0;
 			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
 			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
 			break;
 			break;
+		case REQ_TYPE_SPECIAL:
+			vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
+			vbr->out_hdr.sector = 0;
+			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
+			break;
+		default:
+			/* We don't put anything else in the queue. */
+			BUG();
 		}
 		}
-		/*FALLTHRU*/
-	default:
-		/* We don't put anything else in the queue. */
-		BUG();
 	}
 	}
 
 
-	if (blk_barrier_rq(vbr->req))
+	if (vbr->req->cmd_flags & REQ_HARDBARRIER)
 		vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
 		vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
 
 
 	sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
 	sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
@@ -134,12 +139,12 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
 	 * block, and before the normal inhdr we put the sense data and the
 	 * block, and before the normal inhdr we put the sense data and the
 	 * inhdr with additional status information before the normal inhdr.
 	 * inhdr with additional status information before the normal inhdr.
 	 */
 	 */
-	if (blk_pc_request(vbr->req))
+	if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
 		sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
 		sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
 
 
 	num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
 	num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
 
 
-	if (blk_pc_request(vbr->req)) {
+	if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
 		sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
 		sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
 		sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
 		sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
 			   sizeof(vbr->in_hdr));
 			   sizeof(vbr->in_hdr));
@@ -190,12 +195,6 @@ static void do_virtblk_request(struct request_queue *q)
 		virtqueue_kick(vblk->vq);
 		virtqueue_kick(vblk->vq);
 }
 }
 
 
-static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
-{
-	req->cmd_type = REQ_TYPE_LINUX_BLOCK;
-	req->cmd[0] = REQ_LB_OP_FLUSH;
-}
-
 /* return id (s/n) string for *disk to *id_str
 /* return id (s/n) string for *disk to *id_str
  */
  */
 static int virtblk_get_id(struct gendisk *disk, char *id_str)
 static int virtblk_get_id(struct gendisk *disk, char *id_str)
@@ -219,7 +218,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
 	return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
 	return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
 }
 }
 
 
-static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
+static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
 			 unsigned cmd, unsigned long data)
 			 unsigned cmd, unsigned long data)
 {
 {
 	struct gendisk *disk = bdev->bd_disk;
 	struct gendisk *disk = bdev->bd_disk;
@@ -235,6 +234,18 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
 			      (void __user *)data);
 			      (void __user *)data);
 }
 }
 
 
+static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
+			     unsigned int cmd, unsigned long param)
+{
+	int ret;
+
+	lock_kernel();
+	ret = virtblk_locked_ioctl(bdev, mode, cmd, param);
+	unlock_kernel();
+
+	return ret;
+}
+
 /* We provide getgeo only to please some old bootloader/partitioning tools */
 /* We provide getgeo only to please some old bootloader/partitioning tools */
 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
 {
 {
@@ -261,7 +272,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
 }
 }
 
 
 static const struct block_device_operations virtblk_fops = {
 static const struct block_device_operations virtblk_fops = {
-	.locked_ioctl = virtblk_ioctl,
+	.ioctl  = virtblk_ioctl,
 	.owner  = THIS_MODULE,
 	.owner  = THIS_MODULE,
 	.getgeo = virtblk_getgeo,
 	.getgeo = virtblk_getgeo,
 };
 };
@@ -383,8 +394,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
 		 * flushing a volatile write cache on the host.  Use that
 		 * flushing a volatile write cache on the host.  Use that
 		 * to implement write barrier support.
 		 * to implement write barrier support.
 		 */
 		 */
-		blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH,
-				  virtblk_prepare_flush);
+		blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
 	} else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) {
 	} else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) {
 		/*
 		/*
 		 * If the BARRIER feature is supported the host expects us
 		 * If the BARRIER feature is supported the host expects us
@@ -393,7 +403,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
 		 * never re-orders outstanding I/O.  This feature is not
 		 * never re-orders outstanding I/O.  This feature is not
 		 * useful for real life scenarious and deprecated.
 		 * useful for real life scenarious and deprecated.
 		 */
 		 */
-		blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
+		blk_queue_ordered(q, QUEUE_ORDERED_TAG);
 	} else {
 	} else {
 		/*
 		/*
 		 * If the FLUSH feature is not supported we must assume that
 		 * If the FLUSH feature is not supported we must assume that
@@ -401,7 +411,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
 		 * caching. We still need to drain the queue to provider
 		 * caching. We still need to drain the queue to provider
 		 * proper barrier semantics.
 		 * proper barrier semantics.
 		 */
 		 */
-		blk_queue_ordered(q, QUEUE_ORDERED_DRAIN, NULL);
+		blk_queue_ordered(q, QUEUE_ORDERED_DRAIN);
 	}
 	}
 
 
 	/* If disk is read-only in the host, the guest should obey */
 	/* If disk is read-only in the host, the guest should obey */

+ 16 - 3
drivers/block/xd.c

@@ -46,6 +46,7 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/wait.h>
 #include <linux/wait.h>
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
+#include <linux/smp_lock.h>
 #include <linux/blkpg.h>
 #include <linux/blkpg.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/io.h>
@@ -133,7 +134,7 @@ static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 
 
 static const struct block_device_operations xd_fops = {
 static const struct block_device_operations xd_fops = {
 	.owner	= THIS_MODULE,
 	.owner	= THIS_MODULE,
-	.locked_ioctl	= xd_ioctl,
+	.ioctl	= xd_ioctl,
 	.getgeo = xd_getgeo,
 	.getgeo = xd_getgeo,
 };
 };
 static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
 static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
@@ -322,7 +323,7 @@ static void do_xd_request (struct request_queue * q)
 		int res = -EIO;
 		int res = -EIO;
 		int retry;
 		int retry;
 
 
-		if (!blk_fs_request(req))
+		if (req->cmd_type != REQ_TYPE_FS)
 			goto done;
 			goto done;
 		if (block + count > get_capacity(req->rq_disk))
 		if (block + count > get_capacity(req->rq_disk))
 			goto done;
 			goto done;
@@ -347,7 +348,7 @@ static int xd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 }
 }
 
 
 /* xd_ioctl: handle device ioctl's */
 /* xd_ioctl: handle device ioctl's */
-static int xd_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg)
+static int xd_locked_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg)
 {
 {
 	switch (cmd) {
 	switch (cmd) {
 		case HDIO_SET_DMA:
 		case HDIO_SET_DMA:
@@ -375,6 +376,18 @@ static int xd_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long a
 	}
 	}
 }
 }
 
 
+static int xd_ioctl(struct block_device *bdev, fmode_t mode,
+			     unsigned int cmd, unsigned long param)
+{
+	int ret;
+
+	lock_kernel();
+	ret = xd_locked_ioctl(bdev, mode, cmd, param);
+	unlock_kernel();
+
+	return ret;
+}
+
 /* xd_readwrite: handle a read/write request */
 /* xd_readwrite: handle a read/write request */
 static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count)
 static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count)
 {
 {

+ 300 - 103
drivers/block/xen-blkfront.c

@@ -41,6 +41,7 @@
 #include <linux/cdrom.h>
 #include <linux/cdrom.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
+#include <linux/smp_lock.h>
 #include <linux/scatterlist.h>
 #include <linux/scatterlist.h>
 
 
 #include <xen/xen.h>
 #include <xen/xen.h>
@@ -79,6 +80,7 @@ static const struct block_device_operations xlvbd_block_fops;
  */
  */
 struct blkfront_info
 struct blkfront_info
 {
 {
+	struct mutex mutex;
 	struct xenbus_device *xbdev;
 	struct xenbus_device *xbdev;
 	struct gendisk *gd;
 	struct gendisk *gd;
 	int vdevice;
 	int vdevice;
@@ -95,16 +97,14 @@ struct blkfront_info
 	unsigned long shadow_free;
 	unsigned long shadow_free;
 	int feature_barrier;
 	int feature_barrier;
 	int is_ready;
 	int is_ready;
-
-	/**
-	 * The number of people holding this device open.  We won't allow a
-	 * hot-unplug unless this is 0.
-	 */
-	int users;
 };
 };
 
 
 static DEFINE_SPINLOCK(blkif_io_lock);
 static DEFINE_SPINLOCK(blkif_io_lock);
 
 
+static unsigned int nr_minors;
+static unsigned long *minors;
+static DEFINE_SPINLOCK(minor_lock);
+
 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
 	(BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
 	(BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
 #define GRANT_INVALID_REF	0
 #define GRANT_INVALID_REF	0
@@ -139,6 +139,55 @@ static void add_id_to_freelist(struct blkfront_info *info,
 	info->shadow_free = id;
 	info->shadow_free = id;
 }
 }
 
 
+static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
+{
+	unsigned int end = minor + nr;
+	int rc;
+
+	if (end > nr_minors) {
+		unsigned long *bitmap, *old;
+
+		bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap),
+				 GFP_KERNEL);
+		if (bitmap == NULL)
+			return -ENOMEM;
+
+		spin_lock(&minor_lock);
+		if (end > nr_minors) {
+			old = minors;
+			memcpy(bitmap, minors,
+			       BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
+			minors = bitmap;
+			nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
+		} else
+			old = bitmap;
+		spin_unlock(&minor_lock);
+		kfree(old);
+	}
+
+	spin_lock(&minor_lock);
+	if (find_next_bit(minors, end, minor) >= end) {
+		for (; minor < end; ++minor)
+			__set_bit(minor, minors);
+		rc = 0;
+	} else
+		rc = -EBUSY;
+	spin_unlock(&minor_lock);
+
+	return rc;
+}
+
+static void xlbd_release_minors(unsigned int minor, unsigned int nr)
+{
+	unsigned int end = minor + nr;
+
+	BUG_ON(end > nr_minors);
+	spin_lock(&minor_lock);
+	for (; minor < end; ++minor)
+		__clear_bit(minor, minors);
+	spin_unlock(&minor_lock);
+}
+
 static void blkif_restart_queue_callback(void *arg)
 static void blkif_restart_queue_callback(void *arg)
 {
 {
 	struct blkfront_info *info = (struct blkfront_info *)arg;
 	struct blkfront_info *info = (struct blkfront_info *)arg;
@@ -239,7 +288,7 @@ static int blkif_queue_request(struct request *req)
 
 
 	ring_req->operation = rq_data_dir(req) ?
 	ring_req->operation = rq_data_dir(req) ?
 		BLKIF_OP_WRITE : BLKIF_OP_READ;
 		BLKIF_OP_WRITE : BLKIF_OP_READ;
-	if (blk_barrier_rq(req))
+	if (req->cmd_flags & REQ_HARDBARRIER)
 		ring_req->operation = BLKIF_OP_WRITE_BARRIER;
 		ring_req->operation = BLKIF_OP_WRITE_BARRIER;
 
 
 	ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
 	ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
@@ -310,7 +359,7 @@ static void do_blkif_request(struct request_queue *rq)
 
 
 		blk_start_request(req);
 		blk_start_request(req);
 
 
-		if (!blk_fs_request(req)) {
+		if (req->cmd_type != REQ_TYPE_FS) {
 			__blk_end_request_all(req, -EIO);
 			__blk_end_request_all(req, -EIO);
 			continue;
 			continue;
 		}
 		}
@@ -372,17 +421,22 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
 static int xlvbd_barrier(struct blkfront_info *info)
 static int xlvbd_barrier(struct blkfront_info *info)
 {
 {
 	int err;
 	int err;
+	const char *barrier;
 
 
-	err = blk_queue_ordered(info->rq,
-				info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE,
-				NULL);
+	switch (info->feature_barrier) {
+	case QUEUE_ORDERED_DRAIN:	barrier = "enabled (drain)"; break;
+	case QUEUE_ORDERED_TAG:		barrier = "enabled (tag)"; break;
+	case QUEUE_ORDERED_NONE:	barrier = "disabled"; break;
+	default:			return -EINVAL;
+	}
+
+	err = blk_queue_ordered(info->rq, info->feature_barrier);
 
 
 	if (err)
 	if (err)
 		return err;
 		return err;
 
 
 	printk(KERN_INFO "blkfront: %s: barriers %s\n",
 	printk(KERN_INFO "blkfront: %s: barriers %s\n",
-	       info->gd->disk_name,
-	       info->feature_barrier ? "enabled" : "disabled");
+	       info->gd->disk_name, barrier);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -418,9 +472,14 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
 	if ((minor % nr_parts) == 0)
 	if ((minor % nr_parts) == 0)
 		nr_minors = nr_parts;
 		nr_minors = nr_parts;
 
 
+	err = xlbd_reserve_minors(minor, nr_minors);
+	if (err)
+		goto out;
+	err = -ENODEV;
+
 	gd = alloc_disk(nr_minors);
 	gd = alloc_disk(nr_minors);
 	if (gd == NULL)
 	if (gd == NULL)
-		goto out;
+		goto release;
 
 
 	offset = minor / nr_parts;
 	offset = minor / nr_parts;
 
 
@@ -451,14 +510,13 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
 
 
 	if (xlvbd_init_blk_queue(gd, sector_size)) {
 	if (xlvbd_init_blk_queue(gd, sector_size)) {
 		del_gendisk(gd);
 		del_gendisk(gd);
-		goto out;
+		goto release;
 	}
 	}
 
 
 	info->rq = gd->queue;
 	info->rq = gd->queue;
 	info->gd = gd;
 	info->gd = gd;
 
 
-	if (info->feature_barrier)
-		xlvbd_barrier(info);
+	xlvbd_barrier(info);
 
 
 	if (vdisk_info & VDISK_READONLY)
 	if (vdisk_info & VDISK_READONLY)
 		set_disk_ro(gd, 1);
 		set_disk_ro(gd, 1);
@@ -471,10 +529,45 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
 
 
 	return 0;
 	return 0;
 
 
+ release:
+	xlbd_release_minors(minor, nr_minors);
  out:
  out:
 	return err;
 	return err;
 }
 }
 
 
+static void xlvbd_release_gendisk(struct blkfront_info *info)
+{
+	unsigned int minor, nr_minors;
+	unsigned long flags;
+
+	if (info->rq == NULL)
+		return;
+
+	spin_lock_irqsave(&blkif_io_lock, flags);
+
+	/* No more blkif_request(). */
+	blk_stop_queue(info->rq);
+
+	/* No more gnttab callback work. */
+	gnttab_cancel_free_callback(&info->callback);
+	spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+	/* Flush gnttab callback work. Must be done with no locks held. */
+	flush_scheduled_work();
+
+	del_gendisk(info->gd);
+
+	minor = info->gd->first_minor;
+	nr_minors = info->gd->minors;
+	xlbd_release_minors(minor, nr_minors);
+
+	blk_cleanup_queue(info->rq);
+	info->rq = NULL;
+
+	put_disk(info->gd);
+	info->gd = NULL;
+}
+
 static void kick_pending_request_queues(struct blkfront_info *info)
 static void kick_pending_request_queues(struct blkfront_info *info)
 {
 {
 	if (!RING_FULL(&info->ring)) {
 	if (!RING_FULL(&info->ring)) {
@@ -569,7 +662,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
 				printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
 				printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
 				       info->gd->disk_name);
 				       info->gd->disk_name);
 				error = -EOPNOTSUPP;
 				error = -EOPNOTSUPP;
-				info->feature_barrier = 0;
+				info->feature_barrier = QUEUE_ORDERED_NONE;
 				xlvbd_barrier(info);
 				xlvbd_barrier(info);
 			}
 			}
 			/* fall through */
 			/* fall through */
@@ -652,7 +745,7 @@ fail:
 
 
 
 
 /* Common code used when first setting up, and when resuming. */
 /* Common code used when first setting up, and when resuming. */
-static int talk_to_backend(struct xenbus_device *dev,
+static int talk_to_blkback(struct xenbus_device *dev,
 			   struct blkfront_info *info)
 			   struct blkfront_info *info)
 {
 {
 	const char *message = NULL;
 	const char *message = NULL;
@@ -712,7 +805,6 @@ again:
 	return err;
 	return err;
 }
 }
 
 
-
 /**
 /**
  * Entry point to this code when a new device is created.  Allocate the basic
  * Entry point to this code when a new device is created.  Allocate the basic
  * structures and the ring buffer for communication with the backend, and
  * structures and the ring buffer for communication with the backend, and
@@ -773,6 +865,7 @@ static int blkfront_probe(struct xenbus_device *dev,
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
+	mutex_init(&info->mutex);
 	info->xbdev = dev;
 	info->xbdev = dev;
 	info->vdevice = vdevice;
 	info->vdevice = vdevice;
 	info->connected = BLKIF_STATE_DISCONNECTED;
 	info->connected = BLKIF_STATE_DISCONNECTED;
@@ -786,7 +879,7 @@ static int blkfront_probe(struct xenbus_device *dev,
 	info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
 	info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
 	dev_set_drvdata(&dev->dev, info);
 	dev_set_drvdata(&dev->dev, info);
 
 
-	err = talk_to_backend(dev, info);
+	err = talk_to_blkback(dev, info);
 	if (err) {
 	if (err) {
 		kfree(info);
 		kfree(info);
 		dev_set_drvdata(&dev->dev, NULL);
 		dev_set_drvdata(&dev->dev, NULL);
@@ -881,13 +974,50 @@ static int blkfront_resume(struct xenbus_device *dev)
 
 
 	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
 	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
 
 
-	err = talk_to_backend(dev, info);
+	err = talk_to_blkback(dev, info);
 	if (info->connected == BLKIF_STATE_SUSPENDED && !err)
 	if (info->connected == BLKIF_STATE_SUSPENDED && !err)
 		err = blkif_recover(info);
 		err = blkif_recover(info);
 
 
 	return err;
 	return err;
 }
 }
 
 
+static void
+blkfront_closing(struct blkfront_info *info)
+{
+	struct xenbus_device *xbdev = info->xbdev;
+	struct block_device *bdev = NULL;
+
+	mutex_lock(&info->mutex);
+
+	if (xbdev->state == XenbusStateClosing) {
+		mutex_unlock(&info->mutex);
+		return;
+	}
+
+	if (info->gd)
+		bdev = bdget_disk(info->gd, 0);
+
+	mutex_unlock(&info->mutex);
+
+	if (!bdev) {
+		xenbus_frontend_closed(xbdev);
+		return;
+	}
+
+	mutex_lock(&bdev->bd_mutex);
+
+	if (bdev->bd_openers) {
+		xenbus_dev_error(xbdev, -EBUSY,
+				 "Device in use; refusing to close");
+		xenbus_switch_state(xbdev, XenbusStateClosing);
+	} else {
+		xlvbd_release_gendisk(info);
+		xenbus_frontend_closed(xbdev);
+	}
+
+	mutex_unlock(&bdev->bd_mutex);
+	bdput(bdev);
+}
 
 
 /*
 /*
  * Invoked when the backend is finally 'ready' (and has told produced
  * Invoked when the backend is finally 'ready' (and has told produced
@@ -899,11 +1029,31 @@ static void blkfront_connect(struct blkfront_info *info)
 	unsigned long sector_size;
 	unsigned long sector_size;
 	unsigned int binfo;
 	unsigned int binfo;
 	int err;
 	int err;
-
-	if ((info->connected == BLKIF_STATE_CONNECTED) ||
-	    (info->connected == BLKIF_STATE_SUSPENDED) )
+	int barrier;
+
+	switch (info->connected) {
+	case BLKIF_STATE_CONNECTED:
+		/*
+		 * Potentially, the back-end may be signalling
+		 * a capacity change; update the capacity.
+		 */
+		err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+				   "sectors", "%Lu", &sectors);
+		if (XENBUS_EXIST_ERR(err))
+			return;
+		printk(KERN_INFO "Setting capacity to %Lu\n",
+		       sectors);
+		set_capacity(info->gd, sectors);
+		revalidate_disk(info->gd);
+
+		/* fall through */
+	case BLKIF_STATE_SUSPENDED:
 		return;
 		return;
 
 
+	default:
+		break;
+	}
+
 	dev_dbg(&info->xbdev->dev, "%s:%s.\n",
 	dev_dbg(&info->xbdev->dev, "%s:%s.\n",
 		__func__, info->xbdev->otherend);
 		__func__, info->xbdev->otherend);
 
 
@@ -920,10 +1070,26 @@ static void blkfront_connect(struct blkfront_info *info)
 	}
 	}
 
 
 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
 	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-			    "feature-barrier", "%lu", &info->feature_barrier,
+			    "feature-barrier", "%lu", &barrier,
 			    NULL);
 			    NULL);
+
+	/*
+	 * If there's no "feature-barrier" defined, then it means
+	 * we're dealing with a very old backend which writes
+	 * synchronously; draining will do what needs to get done.
+	 *
+	 * If there are barriers, then we can do full queued writes
+	 * with tagged barriers.
+	 *
+	 * If barriers are not supported, then there's no much we can
+	 * do, so just set ordering to NONE.
+	 */
 	if (err)
 	if (err)
-		info->feature_barrier = 0;
+		info->feature_barrier = QUEUE_ORDERED_DRAIN;
+	else if (barrier)
+		info->feature_barrier = QUEUE_ORDERED_TAG;
+	else
+		info->feature_barrier = QUEUE_ORDERED_NONE;
 
 
 	err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
 	err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
 	if (err) {
 	if (err) {
@@ -945,53 +1111,15 @@ static void blkfront_connect(struct blkfront_info *info)
 	info->is_ready = 1;
 	info->is_ready = 1;
 }
 }
 
 
-/**
- * Handle the change of state of the backend to Closing.  We must delete our
- * device-layer structures now, to ensure that writes are flushed through to
- * the backend.  Once is this done, we can switch to Closed in
- * acknowledgement.
- */
-static void blkfront_closing(struct xenbus_device *dev)
-{
-	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
-	unsigned long flags;
-
-	dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
-
-	if (info->rq == NULL)
-		goto out;
-
-	spin_lock_irqsave(&blkif_io_lock, flags);
-
-	/* No more blkif_request(). */
-	blk_stop_queue(info->rq);
-
-	/* No more gnttab callback work. */
-	gnttab_cancel_free_callback(&info->callback);
-	spin_unlock_irqrestore(&blkif_io_lock, flags);
-
-	/* Flush gnttab callback work. Must be done with no locks held. */
-	flush_scheduled_work();
-
-	blk_cleanup_queue(info->rq);
-	info->rq = NULL;
-
-	del_gendisk(info->gd);
-
- out:
-	xenbus_frontend_closed(dev);
-}
-
 /**
 /**
  * Callback received when the backend's state changes.
  * Callback received when the backend's state changes.
  */
  */
-static void backend_changed(struct xenbus_device *dev,
+static void blkback_changed(struct xenbus_device *dev,
 			    enum xenbus_state backend_state)
 			    enum xenbus_state backend_state)
 {
 {
 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
-	struct block_device *bd;
 
 
-	dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
+	dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
 
 
 	switch (backend_state) {
 	switch (backend_state) {
 	case XenbusStateInitialising:
 	case XenbusStateInitialising:
@@ -1006,35 +1134,56 @@ static void backend_changed(struct xenbus_device *dev,
 		break;
 		break;
 
 
 	case XenbusStateClosing:
 	case XenbusStateClosing:
-		if (info->gd == NULL) {
-			xenbus_frontend_closed(dev);
-			break;
-		}
-		bd = bdget_disk(info->gd, 0);
-		if (bd == NULL)
-			xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
-
-		mutex_lock(&bd->bd_mutex);
-		if (info->users > 0)
-			xenbus_dev_error(dev, -EBUSY,
-					 "Device in use; refusing to close");
-		else
-			blkfront_closing(dev);
-		mutex_unlock(&bd->bd_mutex);
-		bdput(bd);
+		blkfront_closing(info);
 		break;
 		break;
 	}
 	}
 }
 }
 
 
-static int blkfront_remove(struct xenbus_device *dev)
+static int blkfront_remove(struct xenbus_device *xbdev)
 {
 {
-	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
+	struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
+	struct block_device *bdev = NULL;
+	struct gendisk *disk;
 
 
-	dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
+	dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
 
 
 	blkif_free(info, 0);
 	blkif_free(info, 0);
 
 
-	kfree(info);
+	mutex_lock(&info->mutex);
+
+	disk = info->gd;
+	if (disk)
+		bdev = bdget_disk(disk, 0);
+
+	info->xbdev = NULL;
+	mutex_unlock(&info->mutex);
+
+	if (!bdev) {
+		kfree(info);
+		return 0;
+	}
+
+	/*
+	 * The xbdev was removed before we reached the Closed
+	 * state. See if it's safe to remove the disk. If the bdev
+	 * isn't closed yet, we let release take care of it.
+	 */
+
+	mutex_lock(&bdev->bd_mutex);
+	info = disk->private_data;
+
+	dev_warn(disk_to_dev(disk),
+		 "%s was hot-unplugged, %d stale handles\n",
+		 xbdev->nodename, bdev->bd_openers);
+
+	if (info && !bdev->bd_openers) {
+		xlvbd_release_gendisk(info);
+		disk->private_data = NULL;
+		kfree(info);
+	}
+
+	mutex_unlock(&bdev->bd_mutex);
+	bdput(bdev);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1043,30 +1192,78 @@ static int blkfront_is_ready(struct xenbus_device *dev)
 {
 {
 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
 	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
 
 
-	return info->is_ready;
+	return info->is_ready && info->xbdev;
 }
 }
 
 
 static int blkif_open(struct block_device *bdev, fmode_t mode)
 static int blkif_open(struct block_device *bdev, fmode_t mode)
 {
 {
-	struct blkfront_info *info = bdev->bd_disk->private_data;
-	info->users++;
-	return 0;
+	struct gendisk *disk = bdev->bd_disk;
+	struct blkfront_info *info;
+	int err = 0;
+
+	lock_kernel();
+
+	info = disk->private_data;
+	if (!info) {
+		/* xbdev gone */
+		err = -ERESTARTSYS;
+		goto out;
+	}
+
+	mutex_lock(&info->mutex);
+
+	if (!info->gd)
+		/* xbdev is closed */
+		err = -ERESTARTSYS;
+
+	mutex_unlock(&info->mutex);
+
+out:
+	unlock_kernel();
+	return err;
 }
 }
 
 
 static int blkif_release(struct gendisk *disk, fmode_t mode)
 static int blkif_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct blkfront_info *info = disk->private_data;
 	struct blkfront_info *info = disk->private_data;
-	info->users--;
-	if (info->users == 0) {
-		/* Check whether we have been instructed to close.  We will
-		   have ignored this request initially, as the device was
-		   still mounted. */
-		struct xenbus_device *dev = info->xbdev;
-		enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
-
-		if (state == XenbusStateClosing && info->is_ready)
-			blkfront_closing(dev);
+	struct block_device *bdev;
+	struct xenbus_device *xbdev;
+
+	lock_kernel();
+
+	bdev = bdget_disk(disk, 0);
+	bdput(bdev);
+
+	if (bdev->bd_openers)
+		goto out;
+
+	/*
+	 * Check if we have been instructed to close. We will have
+	 * deferred this request, because the bdev was still open.
+	 */
+
+	mutex_lock(&info->mutex);
+	xbdev = info->xbdev;
+
+	if (xbdev && xbdev->state == XenbusStateClosing) {
+		/* pending switch to state closed */
+		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
+		xlvbd_release_gendisk(info);
+		xenbus_frontend_closed(info->xbdev);
+ 	}
+
+	mutex_unlock(&info->mutex);
+
+	if (!xbdev) {
+		/* sudden device removal */
+		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
+		xlvbd_release_gendisk(info);
+		disk->private_data = NULL;
+		kfree(info);
 	}
 	}
+
+out:
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1076,7 +1273,7 @@ static const struct block_device_operations xlvbd_block_fops =
 	.open = blkif_open,
 	.open = blkif_open,
 	.release = blkif_release,
 	.release = blkif_release,
 	.getgeo = blkif_getgeo,
 	.getgeo = blkif_getgeo,
-	.locked_ioctl = blkif_ioctl,
+	.ioctl = blkif_ioctl,
 };
 };
 
 
 
 
@@ -1092,7 +1289,7 @@ static struct xenbus_driver blkfront = {
 	.probe = blkfront_probe,
 	.probe = blkfront_probe,
 	.remove = blkfront_remove,
 	.remove = blkfront_remove,
 	.resume = blkfront_resume,
 	.resume = blkfront_resume,
-	.otherend_changed = backend_changed,
+	.otherend_changed = blkback_changed,
 	.is_ready = blkfront_is_ready,
 	.is_ready = blkfront_is_ready,
 };
 };
 
 

+ 7 - 1
drivers/block/xsysace.c

@@ -89,6 +89,7 @@
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
+#include <linux/smp_lock.h>
 #include <linux/ata.h>
 #include <linux/ata.h>
 #include <linux/hdreg.h>
 #include <linux/hdreg.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
@@ -465,7 +466,7 @@ struct request *ace_get_next_request(struct request_queue * q)
 	struct request *req;
 	struct request *req;
 
 
 	while ((req = blk_peek_request(q)) != NULL) {
 	while ((req = blk_peek_request(q)) != NULL) {
-		if (blk_fs_request(req))
+		if (req->cmd_type == REQ_TYPE_FS)
 			break;
 			break;
 		blk_start_request(req);
 		blk_start_request(req);
 		__blk_end_request_all(req, -EIO);
 		__blk_end_request_all(req, -EIO);
@@ -901,11 +902,14 @@ static int ace_open(struct block_device *bdev, fmode_t mode)
 
 
 	dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
 	dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1);
 
 
+	lock_kernel();
 	spin_lock_irqsave(&ace->lock, flags);
 	spin_lock_irqsave(&ace->lock, flags);
 	ace->users++;
 	ace->users++;
 	spin_unlock_irqrestore(&ace->lock, flags);
 	spin_unlock_irqrestore(&ace->lock, flags);
 
 
 	check_disk_change(bdev);
 	check_disk_change(bdev);
+	unlock_kernel();
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -917,6 +921,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode)
 
 
 	dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
 	dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1);
 
 
+	lock_kernel();
 	spin_lock_irqsave(&ace->lock, flags);
 	spin_lock_irqsave(&ace->lock, flags);
 	ace->users--;
 	ace->users--;
 	if (ace->users == 0) {
 	if (ace->users == 0) {
@@ -924,6 +929,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode)
 		ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
 		ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ);
 	}
 	}
 	spin_unlock_irqrestore(&ace->lock, flags);
 	spin_unlock_irqrestore(&ace->lock, flags);
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 

+ 10 - 3
drivers/block/z2ram.c

@@ -33,6 +33,7 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
 #include <linux/bitops.h>
 #include <linux/bitops.h>
+#include <linux/smp_lock.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 
 
 #include <asm/setup.h>
 #include <asm/setup.h>
@@ -153,6 +154,7 @@ static int z2_open(struct block_device *bdev, fmode_t mode)
 
 
     device = MINOR(bdev->bd_dev);
     device = MINOR(bdev->bd_dev);
 
 
+    lock_kernel();
     if ( current_device != -1 && current_device != device )
     if ( current_device != -1 && current_device != device )
     {
     {
 	rc = -EBUSY;
 	rc = -EBUSY;
@@ -294,20 +296,25 @@ static int z2_open(struct block_device *bdev, fmode_t mode)
 	set_capacity(z2ram_gendisk, z2ram_size >> 9);
 	set_capacity(z2ram_gendisk, z2ram_size >> 9);
     }
     }
 
 
+    unlock_kernel();
     return 0;
     return 0;
 
 
 err_out_kfree:
 err_out_kfree:
     kfree(z2ram_map);
     kfree(z2ram_map);
 err_out:
 err_out:
+    unlock_kernel();
     return rc;
     return rc;
 }
 }
 
 
 static int
 static int
 z2_release(struct gendisk *disk, fmode_t mode)
 z2_release(struct gendisk *disk, fmode_t mode)
 {
 {
-    if ( current_device == -1 )
-	return 0;     
-
+    lock_kernel();
+    if ( current_device == -1 ) {
+    	unlock_kernel();
+    	return 0;
+    }
+    unlock_kernel();
     /*
     /*
      * FIXME: unmap memory
      * FIXME: unmap memory
      */
      */

+ 26 - 20
drivers/cdrom/cdrom.c

@@ -242,6 +242,8 @@
 
 
 -------------------------------------------------------------------------*/
 -------------------------------------------------------------------------*/
 
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #define REVISION "Revision: 3.20"
 #define REVISION "Revision: 3.20"
 #define VERSION "Id: cdrom.c 3.20 2003/12/17"
 #define VERSION "Id: cdrom.c 3.20 2003/12/17"
 
 
@@ -314,11 +316,17 @@ static const char *mrw_format_status[] = {
 static const char *mrw_address_space[] = { "DMA", "GAA" };
 static const char *mrw_address_space[] = { "DMA", "GAA" };
 
 
 #if (ERRLOGMASK!=CD_NOTHING)
 #if (ERRLOGMASK!=CD_NOTHING)
-#define cdinfo(type, fmt, args...) \
-        if ((ERRLOGMASK & type) || debug==1 ) \
-            printk(KERN_INFO "cdrom: " fmt, ## args)
+#define cdinfo(type, fmt, args...)			\
+do {							\
+	if ((ERRLOGMASK & type) || debug == 1)		\
+		pr_info(fmt, ##args);			\
+} while (0)
 #else
 #else
-#define cdinfo(type, fmt, args...) 
+#define cdinfo(type, fmt, args...)			\
+do {							\
+	if (0 && (ERRLOGMASK & type) || debug == 1)	\
+		pr_info(fmt, ##args);			\
+} while (0)
 #endif
 #endif
 
 
 /* These are used to simplify getting data in from and back to user land */
 /* These are used to simplify getting data in from and back to user land */
@@ -395,7 +403,7 @@ int register_cdrom(struct cdrom_device_info *cdi)
 	if (cdo->open == NULL || cdo->release == NULL)
 	if (cdo->open == NULL || cdo->release == NULL)
 		return -EINVAL;
 		return -EINVAL;
 	if (!banner_printed) {
 	if (!banner_printed) {
-		printk(KERN_INFO "Uniform CD-ROM driver " REVISION "\n");
+		pr_info("Uniform CD-ROM driver " REVISION "\n");
 		banner_printed = 1;
 		banner_printed = 1;
 		cdrom_sysctl_register();
 		cdrom_sysctl_register();
 	}
 	}
@@ -546,7 +554,7 @@ static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
 	unsigned char buffer[12];
 	unsigned char buffer[12];
 	int ret;
 	int ret;
 
 
-	printk(KERN_INFO "cdrom: %sstarting format\n", cont ? "Re" : "");
+	pr_info("%sstarting format\n", cont ? "Re" : "");
 
 
 	/*
 	/*
 	 * FmtData bit set (bit 4), format type is 1
 	 * FmtData bit set (bit 4), format type is 1
@@ -576,7 +584,7 @@ static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
 
 
 	ret = cdi->ops->generic_packet(cdi, &cgc);
 	ret = cdi->ops->generic_packet(cdi, &cgc);
 	if (ret)
 	if (ret)
-		printk(KERN_INFO "cdrom: bgformat failed\n");
+		pr_info("bgformat failed\n");
 
 
 	return ret;
 	return ret;
 }
 }
@@ -622,8 +630,7 @@ static int cdrom_mrw_exit(struct cdrom_device_info *cdi)
 
 
 	ret = 0;
 	ret = 0;
 	if (di.mrw_status == CDM_MRW_BGFORMAT_ACTIVE) {
 	if (di.mrw_status == CDM_MRW_BGFORMAT_ACTIVE) {
-		printk(KERN_INFO "cdrom: issuing MRW back ground "
-				"format suspend\n");
+		pr_info("issuing MRW background format suspend\n");
 		ret = cdrom_mrw_bgformat_susp(cdi, 0);
 		ret = cdrom_mrw_bgformat_susp(cdi, 0);
 	}
 	}
 
 
@@ -658,7 +665,8 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
 	if ((ret = cdrom_mode_select(cdi, &cgc)))
 	if ((ret = cdrom_mode_select(cdi, &cgc)))
 		return ret;
 		return ret;
 
 
-	printk(KERN_INFO "cdrom: %s: mrw address space %s selected\n", cdi->name, mrw_address_space[space]);
+	pr_info("%s: mrw address space %s selected\n",
+		cdi->name, mrw_address_space[space]);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -762,7 +770,7 @@ static int cdrom_mrw_open_write(struct cdrom_device_info *cdi)
 	 * always reset to DMA lba space on open
 	 * always reset to DMA lba space on open
 	 */
 	 */
 	if (cdrom_mrw_set_lba_space(cdi, MRW_LBA_DMA)) {
 	if (cdrom_mrw_set_lba_space(cdi, MRW_LBA_DMA)) {
-		printk(KERN_ERR "cdrom: failed setting lba address space\n");
+		pr_err("failed setting lba address space\n");
 		return 1;
 		return 1;
 	}
 	}
 
 
@@ -781,8 +789,7 @@ static int cdrom_mrw_open_write(struct cdrom_device_info *cdi)
 	 * 3	-	MRW formatting complete
 	 * 3	-	MRW formatting complete
 	 */
 	 */
 	ret = 0;
 	ret = 0;
-	printk(KERN_INFO "cdrom open: mrw_status '%s'\n",
-			mrw_format_status[di.mrw_status]);
+	pr_info("open: mrw_status '%s'\n", mrw_format_status[di.mrw_status]);
 	if (!di.mrw_status)
 	if (!di.mrw_status)
 		ret = 1;
 		ret = 1;
 	else if (di.mrw_status == CDM_MRW_BGFORMAT_INACTIVE &&
 	else if (di.mrw_status == CDM_MRW_BGFORMAT_INACTIVE &&
@@ -932,8 +939,7 @@ static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi)
 		return;
 		return;
 	}
 	}
 
 
-	printk(KERN_INFO "cdrom: %s: dirty DVD+RW media, \"finalizing\"\n",
-	       cdi->name);
+	pr_info("%s: dirty DVD+RW media, \"finalizing\"\n", cdi->name);
 
 
 	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
 	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
 	cgc.cmd[0] = GPCMD_FLUSH_CACHE;
 	cgc.cmd[0] = GPCMD_FLUSH_CACHE;
@@ -2176,7 +2182,7 @@ retry:
 	 * frame dma, so drop to single frame dma if we need to
 	 * frame dma, so drop to single frame dma if we need to
 	 */
 	 */
 	if (cdi->cdda_method == CDDA_BPC_FULL && nframes > 1) {
 	if (cdi->cdda_method == CDDA_BPC_FULL && nframes > 1) {
-		printk("cdrom: dropping to single frame dma\n");
+		pr_info("dropping to single frame dma\n");
 		cdi->cdda_method = CDDA_BPC_SINGLE;
 		cdi->cdda_method = CDDA_BPC_SINGLE;
 		goto retry;
 		goto retry;
 	}
 	}
@@ -2189,7 +2195,7 @@ retry:
 	if (cdi->last_sense != 0x04 && cdi->last_sense != 0x0b)
 	if (cdi->last_sense != 0x04 && cdi->last_sense != 0x0b)
 		return ret;
 		return ret;
 
 
-	printk("cdrom: dropping to old style cdda (sense=%x)\n", cdi->last_sense);
+	pr_info("dropping to old style cdda (sense=%x)\n", cdi->last_sense);
 	cdi->cdda_method = CDDA_OLD;
 	cdi->cdda_method = CDDA_OLD;
 	return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);	
 	return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);	
 }
 }
@@ -3401,7 +3407,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
 					"\t%d", CDROM_CAN(val) != 0);
 					"\t%d", CDROM_CAN(val) != 0);
 			break;
 			break;
 		default:
 		default:
-			printk(KERN_INFO "cdrom: invalid option%d\n", option);
+			pr_info("invalid option%d\n", option);
 			return 1;
 			return 1;
 		}
 		}
 		if (!ret)
 		if (!ret)
@@ -3491,7 +3497,7 @@ doit:
 	mutex_unlock(&cdrom_mutex);
 	mutex_unlock(&cdrom_mutex);
 	return proc_dostring(ctl, write, buffer, lenp, ppos);
 	return proc_dostring(ctl, write, buffer, lenp, ppos);
 done:
 done:
-	printk(KERN_INFO "cdrom: info buffer too small\n");
+	pr_info("info buffer too small\n");
 	goto doit;
 	goto doit;
 }
 }
 
 
@@ -3665,7 +3671,7 @@ static int __init cdrom_init(void)
 
 
 static void __exit cdrom_exit(void)
 static void __exit cdrom_exit(void)
 {
 {
-	printk(KERN_INFO "Uniform CD-ROM driver unloaded\n");
+	pr_info("Uniform CD-ROM driver unloaded\n");
 	cdrom_sysctl_unregister();
 	cdrom_sysctl_unregister();
 }
 }
 
 

+ 30 - 18
drivers/cdrom/gdrom.c

@@ -19,6 +19,8 @@
  *
  *
  */
  */
 
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/fs.h>
@@ -32,6 +34,7 @@
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/device.h>
 #include <linux/device.h>
+#include <linux/smp_lock.h>
 #include <linux/wait.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 #include <linux/workqueue.h>
 #include <linux/platform_device.h>
 #include <linux/platform_device.h>
@@ -339,8 +342,7 @@ static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
 		tocuse = 0;
 		tocuse = 0;
 		err = gdrom_readtoc_cmd(gd.toc, 0);
 		err = gdrom_readtoc_cmd(gd.toc, 0);
 		if (err) {
 		if (err) {
-			printk(KERN_INFO "GDROM: Could not get CD "
-				"table of contents\n");
+			pr_info("Could not get CD table of contents\n");
 			return -ENXIO;
 			return -ENXIO;
 		}
 		}
 	}
 	}
@@ -357,8 +359,7 @@ static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
 	} while (track >= fentry);
 	} while (track >= fentry);
 
 
 	if ((track > 100) || (track < get_entry_track(gd.toc->first))) {
 	if ((track > 100) || (track < get_entry_track(gd.toc->first))) {
-		printk(KERN_INFO "GDROM: No data on the last "
-			"session of the CD\n");
+		pr_info("No data on the last session of the CD\n");
 		gdrom_getsense(NULL);
 		gdrom_getsense(NULL);
 		return -ENXIO;
 		return -ENXIO;
 	}
 	}
@@ -451,14 +452,14 @@ static int gdrom_getsense(short *bufstring)
 		goto cleanup_sense;
 		goto cleanup_sense;
 	insw(GDROM_DATA_REG, &sense, sense_command->buflen/2);
 	insw(GDROM_DATA_REG, &sense, sense_command->buflen/2);
 	if (sense[1] & 40) {
 	if (sense[1] & 40) {
-		printk(KERN_INFO "GDROM: Drive not ready - command aborted\n");
+		pr_info("Drive not ready - command aborted\n");
 		goto cleanup_sense;
 		goto cleanup_sense;
 	}
 	}
 	sense_key = sense[1] & 0x0F;
 	sense_key = sense[1] & 0x0F;
 	if (sense_key < ARRAY_SIZE(sense_texts))
 	if (sense_key < ARRAY_SIZE(sense_texts))
-		printk(KERN_INFO "GDROM: %s\n", sense_texts[sense_key].text);
+		pr_info("%s\n", sense_texts[sense_key].text);
 	else
 	else
-		printk(KERN_ERR "GDROM: Unknown sense key: %d\n", sense_key);
+		pr_err("Unknown sense key: %d\n", sense_key);
 	if (bufstring) /* return addional sense data */
 	if (bufstring) /* return addional sense data */
 		memcpy(bufstring, &sense[4], 2);
 		memcpy(bufstring, &sense[4], 2);
 	if (sense_key < 2)
 	if (sense_key < 2)
@@ -492,12 +493,18 @@ static struct cdrom_device_ops gdrom_ops = {
 
 
 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
 {
 {
-	return cdrom_open(gd.cd_info, bdev, mode);
+	int ret;
+	lock_kernel();
+	ret = cdrom_open(gd.cd_info, bdev, mode);
+	unlock_kernel();
+	return ret;
 }
 }
 
 
 static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
 static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode)
 {
 {
+	lock_kernel();
 	cdrom_release(gd.cd_info, mode);
 	cdrom_release(gd.cd_info, mode);
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 
@@ -509,7 +516,13 @@ static int gdrom_bdops_mediachanged(struct gendisk *disk)
 static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
 static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode,
 	unsigned cmd, unsigned long arg)
 	unsigned cmd, unsigned long arg)
 {
 {
-	return cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
+	int ret;
+
+	lock_kernel();
+	ret = cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg);
+	unlock_kernel();
+
+	return ret;
 }
 }
 
 
 static const struct block_device_operations gdrom_bdops = {
 static const struct block_device_operations gdrom_bdops = {
@@ -517,7 +530,7 @@ static const struct block_device_operations gdrom_bdops = {
 	.open			= gdrom_bdops_open,
 	.open			= gdrom_bdops_open,
 	.release		= gdrom_bdops_release,
 	.release		= gdrom_bdops_release,
 	.media_changed		= gdrom_bdops_mediachanged,
 	.media_changed		= gdrom_bdops_mediachanged,
-	.locked_ioctl		= gdrom_bdops_ioctl,
+	.ioctl			= gdrom_bdops_ioctl,
 };
 };
 
 
 static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
 static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id)
@@ -643,14 +656,13 @@ static void gdrom_request(struct request_queue *rq)
 	struct request *req;
 	struct request *req;
 
 
 	while ((req = blk_fetch_request(rq)) != NULL) {
 	while ((req = blk_fetch_request(rq)) != NULL) {
-		if (!blk_fs_request(req)) {
-			printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
+		if (req->cmd_type != REQ_TYPE_FS) {
+			printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
 			__blk_end_request_all(req, -EIO);
 			__blk_end_request_all(req, -EIO);
 			continue;
 			continue;
 		}
 		}
 		if (rq_data_dir(req) != READ) {
 		if (rq_data_dir(req) != READ) {
-			printk(KERN_NOTICE "GDROM: Read only device -");
-			printk(" write request ignored\n");
+			pr_notice("Read only device - write request ignored\n");
 			__blk_end_request_all(req, -EIO);
 			__blk_end_request_all(req, -EIO);
 			continue;
 			continue;
 		}
 		}
@@ -685,7 +697,7 @@ static int __devinit gdrom_outputversion(void)
 	firmw_ver = kstrndup(id->firmver, 16, GFP_KERNEL);
 	firmw_ver = kstrndup(id->firmver, 16, GFP_KERNEL);
 	if (!firmw_ver)
 	if (!firmw_ver)
 		goto free_manuf_name;
 		goto free_manuf_name;
-	printk(KERN_INFO "GDROM: %s from %s with firmware %s\n",
+	pr_info("%s from %s with firmware %s\n",
 		model_name, manuf_name, firmw_ver);
 		model_name, manuf_name, firmw_ver);
 	err = 0;
 	err = 0;
 	kfree(firmw_ver);
 	kfree(firmw_ver);
@@ -757,7 +769,7 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
 	int err;
 	int err;
 	/* Start the device */
 	/* Start the device */
 	if (gdrom_execute_diagnostic() != 1) {
 	if (gdrom_execute_diagnostic() != 1) {
-		printk(KERN_WARNING "GDROM: ATA Probe for GDROM failed.\n");
+		pr_warning("ATA Probe for GDROM failed\n");
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 	/* Print out firmware ID */
 	/* Print out firmware ID */
@@ -767,7 +779,7 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
 	gdrom_major = register_blkdev(0, GDROM_DEV_NAME);
 	gdrom_major = register_blkdev(0, GDROM_DEV_NAME);
 	if (gdrom_major <= 0)
 	if (gdrom_major <= 0)
 		return gdrom_major;
 		return gdrom_major;
-	printk(KERN_INFO "GDROM: Registered with major number %d\n",
+	pr_info("Registered with major number %d\n",
 		gdrom_major);
 		gdrom_major);
 	/* Specify basic properties of drive */
 	/* Specify basic properties of drive */
 	gd.cd_info = kzalloc(sizeof(struct cdrom_device_info), GFP_KERNEL);
 	gd.cd_info = kzalloc(sizeof(struct cdrom_device_info), GFP_KERNEL);
@@ -818,7 +830,7 @@ probe_fail_no_disk:
 	unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
 	unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
 	gdrom_major = 0;
 	gdrom_major = 0;
 probe_fail_no_mem:
 probe_fail_no_mem:
-	printk(KERN_WARNING "GDROM: Probe failed - error is 0x%X\n", err);
+	pr_warning("Probe failed - error is 0x%X\n", err);
 	return err;
 	return err;
 }
 }
 
 

+ 53 - 53
drivers/cdrom/viocd.c

@@ -31,6 +31,8 @@
  * the OS/400 partition.
  * the OS/400 partition.
  */
  */
 
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/major.h>
 #include <linux/major.h>
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
 #include <linux/cdrom.h>
 #include <linux/cdrom.h>
@@ -40,6 +42,7 @@
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/completion.h>
 #include <linux/completion.h>
 #include <linux/proc_fs.h>
 #include <linux/proc_fs.h>
+#include <linux/smp_lock.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
 #include <linux/scatterlist.h>
 #include <linux/scatterlist.h>
 
 
@@ -53,9 +56,6 @@
 
 
 #define VIOCD_VERS "1.06"
 #define VIOCD_VERS "1.06"
 
 
-#define VIOCD_KERN_WARNING		KERN_WARNING "viocd: "
-#define VIOCD_KERN_INFO			KERN_INFO "viocd: "
-
 /*
 /*
  * Should probably make this a module parameter....sigh
  * Should probably make this a module parameter....sigh
  */
  */
@@ -154,13 +154,21 @@ static const struct file_operations proc_viocd_operations = {
 static int viocd_blk_open(struct block_device *bdev, fmode_t mode)
 static int viocd_blk_open(struct block_device *bdev, fmode_t mode)
 {
 {
 	struct disk_info *di = bdev->bd_disk->private_data;
 	struct disk_info *di = bdev->bd_disk->private_data;
-	return cdrom_open(&di->viocd_info, bdev, mode);
+	int ret;
+
+	lock_kernel();
+	ret = cdrom_open(&di->viocd_info, bdev, mode);
+	unlock_kernel();
+
+	return ret;
 }
 }
 
 
 static int viocd_blk_release(struct gendisk *disk, fmode_t mode)
 static int viocd_blk_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct disk_info *di = disk->private_data;
 	struct disk_info *di = disk->private_data;
+	lock_kernel();
 	cdrom_release(&di->viocd_info, mode);
 	cdrom_release(&di->viocd_info, mode);
+	unlock_kernel();
 	return 0;
 	return 0;
 }
 }
 
 
@@ -168,7 +176,13 @@ static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode,
 		unsigned cmd, unsigned long arg)
 		unsigned cmd, unsigned long arg)
 {
 {
 	struct disk_info *di = bdev->bd_disk->private_data;
 	struct disk_info *di = bdev->bd_disk->private_data;
-	return cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg);
+	int ret;
+
+	lock_kernel();
+	ret = cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg);
+	unlock_kernel();
+
+	return ret;
 }
 }
 
 
 static int viocd_blk_media_changed(struct gendisk *disk)
 static int viocd_blk_media_changed(struct gendisk *disk)
@@ -181,7 +195,7 @@ static const struct block_device_operations viocd_fops = {
 	.owner =		THIS_MODULE,
 	.owner =		THIS_MODULE,
 	.open =			viocd_blk_open,
 	.open =			viocd_blk_open,
 	.release =		viocd_blk_release,
 	.release =		viocd_blk_release,
-	.locked_ioctl =		viocd_blk_ioctl,
+	.ioctl =		viocd_blk_ioctl,
 	.media_changed =	viocd_blk_media_changed,
 	.media_changed =	viocd_blk_media_changed,
 };
 };
 
 
@@ -202,9 +216,8 @@ static int viocd_open(struct cdrom_device_info *cdi, int purpose)
 			(u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
 			(u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
 			0, 0, 0);
 			0, 0, 0);
 	if (hvrc != 0) {
 	if (hvrc != 0) {
-		printk(VIOCD_KERN_WARNING
-				"bad rc on HvCallEvent_signalLpEventFast %d\n",
-				(int)hvrc);
+		pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
+			   (int)hvrc);
 		return -EIO;
 		return -EIO;
 	}
 	}
 
 
@@ -213,8 +226,8 @@ static int viocd_open(struct cdrom_device_info *cdi, int purpose)
 	if (we.rc) {
 	if (we.rc) {
 		const struct vio_error_entry *err =
 		const struct vio_error_entry *err =
 			vio_lookup_rc(viocd_err_table, we.sub_result);
 			vio_lookup_rc(viocd_err_table, we.sub_result);
-		printk(VIOCD_KERN_WARNING "bad rc %d:0x%04X on open: %s\n",
-				we.rc, we.sub_result, err->msg);
+		pr_warning("bad rc %d:0x%04X on open: %s\n",
+			   we.rc, we.sub_result, err->msg);
 		return -err->errno;
 		return -err->errno;
 	}
 	}
 
 
@@ -234,9 +247,8 @@ static void viocd_release(struct cdrom_device_info *cdi)
 			viopath_targetinst(viopath_hostLp), 0,
 			viopath_targetinst(viopath_hostLp), 0,
 			VIOVERSION << 16, ((u64)device_no << 48), 0, 0, 0);
 			VIOVERSION << 16, ((u64)device_no << 48), 0, 0, 0);
 	if (hvrc != 0)
 	if (hvrc != 0)
-		printk(VIOCD_KERN_WARNING
-				"bad rc on HvCallEvent_signalLpEventFast %d\n",
-				(int)hvrc);
+		pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
+			   (int)hvrc);
 }
 }
 
 
 /* Send a read or write request to OS/400 */
 /* Send a read or write request to OS/400 */
@@ -262,13 +274,12 @@ static int send_request(struct request *req)
 
 
 	sg_init_table(&sg, 1);
 	sg_init_table(&sg, 1);
         if (blk_rq_map_sg(req->q, req, &sg) == 0) {
         if (blk_rq_map_sg(req->q, req, &sg) == 0) {
-		printk(VIOCD_KERN_WARNING
-				"error setting up scatter/gather list\n");
+		pr_warning("error setting up scatter/gather list\n");
 		return -1;
 		return -1;
 	}
 	}
 
 
 	if (dma_map_sg(diskinfo->dev, &sg, 1, direction) == 0) {
 	if (dma_map_sg(diskinfo->dev, &sg, 1, direction) == 0) {
-		printk(VIOCD_KERN_WARNING "error allocating sg tce\n");
+		pr_warning("error allocating sg tce\n");
 		return -1;
 		return -1;
 	}
 	}
 	dmaaddr = sg_dma_address(&sg);
 	dmaaddr = sg_dma_address(&sg);
@@ -284,7 +295,7 @@ static int send_request(struct request *req)
 			((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
 			((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
 			(u64)blk_rq_pos(req) * 512, len, 0);
 			(u64)blk_rq_pos(req) * 512, len, 0);
 	if (hvrc != HvLpEvent_Rc_Good) {
 	if (hvrc != HvLpEvent_Rc_Good) {
-		printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
+		pr_warning("hv error on op %d\n", (int)hvrc);
 		return -1;
 		return -1;
 	}
 	}
 
 
@@ -298,11 +309,10 @@ static void do_viocd_request(struct request_queue *q)
 	struct request *req;
 	struct request *req;
 
 
 	while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
 	while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
-		if (!blk_fs_request(req))
+		if (req->cmd_type != REQ_TYPE_FS)
 			__blk_end_request_all(req, -EIO);
 			__blk_end_request_all(req, -EIO);
 		else if (send_request(req) < 0) {
 		else if (send_request(req) < 0) {
-			printk(VIOCD_KERN_WARNING
-					"unable to send message to OS/400!");
+			pr_warning("unable to send message to OS/400!\n");
 			__blk_end_request_all(req, -EIO);
 			__blk_end_request_all(req, -EIO);
 		} else
 		} else
 			rwreq++;
 			rwreq++;
@@ -327,8 +337,8 @@ static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr)
 			(u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
 			(u64)&we, VIOVERSION << 16, ((u64)device_no << 48),
 			0, 0, 0);
 			0, 0, 0);
 	if (hvrc != 0) {
 	if (hvrc != 0) {
-		printk(VIOCD_KERN_WARNING "bad rc on HvCallEvent_signalLpEventFast %d\n",
-				(int)hvrc);
+		pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
+			   (int)hvrc);
 		return -EIO;
 		return -EIO;
 	}
 	}
 
 
@@ -338,9 +348,8 @@ static int viocd_media_changed(struct cdrom_device_info *cdi, int disc_nr)
 	if (we.rc) {
 	if (we.rc) {
 		const struct vio_error_entry *err =
 		const struct vio_error_entry *err =
 			vio_lookup_rc(viocd_err_table, we.sub_result);
 			vio_lookup_rc(viocd_err_table, we.sub_result);
-		printk(VIOCD_KERN_WARNING
-				"bad rc %d:0x%04X on check_change: %s; Assuming no change\n",
-				we.rc, we.sub_result, err->msg);
+		pr_warning("bad rc %d:0x%04X on check_change: %s; Assuming no change\n",
+			   we.rc, we.sub_result, err->msg);
 		return 0;
 		return 0;
 	}
 	}
 
 
@@ -367,8 +376,8 @@ static int viocd_lock_door(struct cdrom_device_info *cdi, int locking)
 			(u64)&we, VIOVERSION << 16,
 			(u64)&we, VIOVERSION << 16,
 			(device_no << 48) | (flags << 32), 0, 0, 0);
 			(device_no << 48) | (flags << 32), 0, 0, 0);
 	if (hvrc != 0) {
 	if (hvrc != 0) {
-		printk(VIOCD_KERN_WARNING "bad rc on HvCallEvent_signalLpEventFast %d\n",
-				(int)hvrc);
+		pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n",
+			   (int)hvrc);
 		return -EIO;
 		return -EIO;
 	}
 	}
 
 
@@ -455,8 +464,7 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
 		return;
 		return;
 	/* First, we should NEVER get an int here...only acks */
 	/* First, we should NEVER get an int here...only acks */
 	if (hvlpevent_is_int(event)) {
 	if (hvlpevent_is_int(event)) {
-		printk(VIOCD_KERN_WARNING
-				"Yikes! got an int in viocd event handler!\n");
+		pr_warning("Yikes! got an int in viocd event handler!\n");
 		if (hvlpevent_need_ack(event)) {
 		if (hvlpevent_need_ack(event)) {
 			event->xRc = HvLpEvent_Rc_InvalidSubtype;
 			event->xRc = HvLpEvent_Rc_InvalidSubtype;
 			HvCallEvent_ackLpEvent(event);
 			HvCallEvent_ackLpEvent(event);
@@ -510,10 +518,9 @@ return_complete:
 			const struct vio_error_entry *err =
 			const struct vio_error_entry *err =
 				vio_lookup_rc(viocd_err_table,
 				vio_lookup_rc(viocd_err_table,
 						bevent->sub_result);
 						bevent->sub_result);
-			printk(VIOCD_KERN_WARNING "request %p failed "
-					"with rc %d:0x%04X: %s\n",
-					req, event->xRc,
-					bevent->sub_result, err->msg);
+			pr_warning("request %p failed with rc %d:0x%04X: %s\n",
+				   req, event->xRc,
+				   bevent->sub_result, err->msg);
 			__blk_end_request_all(req, -EIO);
 			__blk_end_request_all(req, -EIO);
 		} else
 		} else
 			__blk_end_request_all(req, 0);
 			__blk_end_request_all(req, 0);
@@ -524,9 +531,8 @@ return_complete:
 		break;
 		break;
 
 
 	default:
 	default:
-		printk(VIOCD_KERN_WARNING
-				"message with invalid subtype %0x04X!\n",
-				event->xSubtype & VIOMINOR_SUBTYPE_MASK);
+		pr_warning("message with invalid subtype %0x04X!\n",
+			   event->xSubtype & VIOMINOR_SUBTYPE_MASK);
 		if (hvlpevent_need_ack(event)) {
 		if (hvlpevent_need_ack(event)) {
 			event->xRc = HvLpEvent_Rc_InvalidSubtype;
 			event->xRc = HvLpEvent_Rc_InvalidSubtype;
 			HvCallEvent_ackLpEvent(event);
 			HvCallEvent_ackLpEvent(event);
@@ -593,23 +599,19 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 	sprintf(c->name, VIOCD_DEVICE "%c", 'a' + deviceno);
 	sprintf(c->name, VIOCD_DEVICE "%c", 'a' + deviceno);
 
 
 	if (register_cdrom(c) != 0) {
 	if (register_cdrom(c) != 0) {
-		printk(VIOCD_KERN_WARNING "Cannot register viocd CD-ROM %s!\n",
-				c->name);
+		pr_warning("Cannot register viocd CD-ROM %s!\n", c->name);
 		goto out;
 		goto out;
 	}
 	}
-	printk(VIOCD_KERN_INFO "cd %s is iSeries resource %10.10s "
-			"type %4.4s, model %3.3s\n",
-			c->name, d->rsrcname, d->type, d->model);
+	pr_info("cd %s is iSeries resource %10.10s type %4.4s, model %3.3s\n",
+		c->name, d->rsrcname, d->type, d->model);
 	q = blk_init_queue(do_viocd_request, &viocd_reqlock);
 	q = blk_init_queue(do_viocd_request, &viocd_reqlock);
 	if (q == NULL) {
 	if (q == NULL) {
-		printk(VIOCD_KERN_WARNING "Cannot allocate queue for %s!\n",
-				c->name);
+		pr_warning("Cannot allocate queue for %s!\n", c->name);
 		goto out_unregister_cdrom;
 		goto out_unregister_cdrom;
 	}
 	}
 	gendisk = alloc_disk(1);
 	gendisk = alloc_disk(1);
 	if (gendisk == NULL) {
 	if (gendisk == NULL) {
-		printk(VIOCD_KERN_WARNING "Cannot create gendisk for %s!\n",
-				c->name);
+		pr_warning("Cannot create gendisk for %s!\n", c->name);
 		goto out_cleanup_queue;
 		goto out_cleanup_queue;
 	}
 	}
 	gendisk->major = VIOCD_MAJOR;
 	gendisk->major = VIOCD_MAJOR;
@@ -682,21 +684,19 @@ static int __init viocd_init(void)
 			return -ENODEV;
 			return -ENODEV;
 	}
 	}
 
 
-	printk(VIOCD_KERN_INFO "vers " VIOCD_VERS ", hosting partition %d\n",
-			viopath_hostLp);
+	pr_info("vers " VIOCD_VERS ", hosting partition %d\n", viopath_hostLp);
 
 
 	if (register_blkdev(VIOCD_MAJOR, VIOCD_DEVICE) != 0) {
 	if (register_blkdev(VIOCD_MAJOR, VIOCD_DEVICE) != 0) {
-		printk(VIOCD_KERN_WARNING "Unable to get major %d for %s\n",
-				VIOCD_MAJOR, VIOCD_DEVICE);
+		pr_warning("Unable to get major %d for %s\n",
+			   VIOCD_MAJOR, VIOCD_DEVICE);
 		return -EIO;
 		return -EIO;
 	}
 	}
 
 
 	ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio,
 	ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio,
 			MAX_CD_REQ + 2);
 			MAX_CD_REQ + 2);
 	if (ret) {
 	if (ret) {
-		printk(VIOCD_KERN_WARNING
-				"error opening path to host partition %d\n",
-				viopath_hostLp);
+		pr_warning("error opening path to host partition %d\n",
+			   viopath_hostLp);
 		goto out_unregister;
 		goto out_unregister;
 	}
 	}
 
 

+ 10 - 7
drivers/ide/ide-atapi.c

@@ -190,7 +190,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
 
 
 	BUG_ON(sense_len > sizeof(*sense));
 	BUG_ON(sense_len > sizeof(*sense));
 
 
-	if (blk_sense_request(rq) || drive->sense_rq_armed)
+	if (rq->cmd_type == REQ_TYPE_SENSE || drive->sense_rq_armed)
 		return;
 		return;
 
 
 	memset(sense, 0, sizeof(*sense));
 	memset(sense, 0, sizeof(*sense));
@@ -307,13 +307,16 @@ EXPORT_SYMBOL_GPL(ide_cd_expiry);
 
 
 int ide_cd_get_xferlen(struct request *rq)
 int ide_cd_get_xferlen(struct request *rq)
 {
 {
-	if (blk_fs_request(rq))
+	switch (rq->cmd_type) {
+	case REQ_TYPE_FS:
 		return 32768;
 		return 32768;
-	else if (blk_sense_request(rq) || blk_pc_request(rq) ||
-			 rq->cmd_type == REQ_TYPE_ATA_PC)
+	case REQ_TYPE_SENSE:
+	case REQ_TYPE_BLOCK_PC:
+	case REQ_TYPE_ATA_PC:
 		return blk_rq_bytes(rq);
 		return blk_rq_bytes(rq);
-	else
+	default:
 		return 0;
 		return 0;
+	}
 }
 }
 EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
 EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
 
 
@@ -474,12 +477,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
 		if (uptodate == 0)
 		if (uptodate == 0)
 			drive->failed_pc = NULL;
 			drive->failed_pc = NULL;
 
 
-		if (blk_special_request(rq)) {
+		if (rq->cmd_type == REQ_TYPE_SPECIAL) {
 			rq->errors = 0;
 			rq->errors = 0;
 			error = 0;
 			error = 0;
 		} else {
 		} else {
 
 
-			if (blk_fs_request(rq) == 0 && uptodate <= 0) {
+			if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) {
 				if (rq->errors == 0)
 				if (rq->errors == 0)
 					rq->errors = -EIO;
 					rq->errors = -EIO;
 			}
 			}

+ 61 - 37
drivers/ide/ide-cd.c

@@ -31,6 +31,7 @@
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/timer.h>
 #include <linux/timer.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
+#include <linux/smp_lock.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/errno.h>
 #include <linux/errno.h>
@@ -176,7 +177,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
 			if (!sense->valid)
 			if (!sense->valid)
 				break;
 				break;
 			if (failed_command == NULL ||
 			if (failed_command == NULL ||
-					!blk_fs_request(failed_command))
+			    failed_command->cmd_type != REQ_TYPE_FS)
 				break;
 				break;
 			sector = (sense->information[0] << 24) |
 			sector = (sense->information[0] << 24) |
 				 (sense->information[1] << 16) |
 				 (sense->information[1] << 16) |
@@ -292,7 +293,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 				  "stat 0x%x",
 				  "stat 0x%x",
 				  rq->cmd[0], rq->cmd_type, err, stat);
 				  rq->cmd[0], rq->cmd_type, err, stat);
 
 
-	if (blk_sense_request(rq)) {
+	if (rq->cmd_type == REQ_TYPE_SENSE) {
 		/*
 		/*
 		 * We got an error trying to get sense info from the drive
 		 * We got an error trying to get sense info from the drive
 		 * (probably while trying to recover from a former error).
 		 * (probably while trying to recover from a former error).
@@ -303,7 +304,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 	}
 	}
 
 
 	/* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
 	/* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
-	if (blk_pc_request(rq) && !rq->errors)
+	if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors)
 		rq->errors = SAM_STAT_CHECK_CONDITION;
 		rq->errors = SAM_STAT_CHECK_CONDITION;
 
 
 	if (blk_noretry_request(rq))
 	if (blk_noretry_request(rq))
@@ -311,13 +312,14 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 
 
 	switch (sense_key) {
 	switch (sense_key) {
 	case NOT_READY:
 	case NOT_READY:
-		if (blk_fs_request(rq) && rq_data_dir(rq) == WRITE) {
+		if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) {
 			if (ide_cd_breathe(drive, rq))
 			if (ide_cd_breathe(drive, rq))
 				return 1;
 				return 1;
 		} else {
 		} else {
 			cdrom_saw_media_change(drive);
 			cdrom_saw_media_change(drive);
 
 
-			if (blk_fs_request(rq) && !blk_rq_quiet(rq))
+			if (rq->cmd_type == REQ_TYPE_FS &&
+			    !(rq->cmd_flags & REQ_QUIET))
 				printk(KERN_ERR PFX "%s: tray open\n",
 				printk(KERN_ERR PFX "%s: tray open\n",
 					drive->name);
 					drive->name);
 		}
 		}
@@ -326,7 +328,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 	case UNIT_ATTENTION:
 	case UNIT_ATTENTION:
 		cdrom_saw_media_change(drive);
 		cdrom_saw_media_change(drive);
 
 
-		if (blk_fs_request(rq) == 0)
+		if (rq->cmd_type != REQ_TYPE_FS)
 			return 0;
 			return 0;
 
 
 		/*
 		/*
@@ -352,7 +354,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 		 * No point in retrying after an illegal request or data
 		 * No point in retrying after an illegal request or data
 		 * protect error.
 		 * protect error.
 		 */
 		 */
-		if (!blk_rq_quiet(rq))
+		if (!(rq->cmd_flags & REQ_QUIET))
 			ide_dump_status(drive, "command error", stat);
 			ide_dump_status(drive, "command error", stat);
 		do_end_request = 1;
 		do_end_request = 1;
 		break;
 		break;
@@ -361,20 +363,20 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 		 * No point in re-trying a zillion times on a bad sector.
 		 * No point in re-trying a zillion times on a bad sector.
 		 * If we got here the error is not correctable.
 		 * If we got here the error is not correctable.
 		 */
 		 */
-		if (!blk_rq_quiet(rq))
+		if (!(rq->cmd_flags & REQ_QUIET))
 			ide_dump_status(drive, "media error "
 			ide_dump_status(drive, "media error "
 					"(bad sector)", stat);
 					"(bad sector)", stat);
 		do_end_request = 1;
 		do_end_request = 1;
 		break;
 		break;
 	case BLANK_CHECK:
 	case BLANK_CHECK:
 		/* disk appears blank? */
 		/* disk appears blank? */
-		if (!blk_rq_quiet(rq))
+		if (!(rq->cmd_flags & REQ_QUIET))
 			ide_dump_status(drive, "media error (blank)",
 			ide_dump_status(drive, "media error (blank)",
 					stat);
 					stat);
 		do_end_request = 1;
 		do_end_request = 1;
 		break;
 		break;
 	default:
 	default:
-		if (blk_fs_request(rq) == 0)
+		if (rq->cmd_type != REQ_TYPE_FS)
 			break;
 			break;
 		if (err & ~ATA_ABORTED) {
 		if (err & ~ATA_ABORTED) {
 			/* go to the default handler for other errors */
 			/* go to the default handler for other errors */
@@ -385,7 +387,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
 			do_end_request = 1;
 			do_end_request = 1;
 	}
 	}
 
 
-	if (blk_fs_request(rq) == 0) {
+	if (rq->cmd_type != REQ_TYPE_FS) {
 		rq->cmd_flags |= REQ_FAILED;
 		rq->cmd_flags |= REQ_FAILED;
 		do_end_request = 1;
 		do_end_request = 1;
 	}
 	}
@@ -532,7 +534,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 	ide_expiry_t *expiry = NULL;
 	ide_expiry_t *expiry = NULL;
 	int dma_error = 0, dma, thislen, uptodate = 0;
 	int dma_error = 0, dma, thislen, uptodate = 0;
 	int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
 	int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
-	int sense = blk_sense_request(rq);
+	int sense = (rq->cmd_type == REQ_TYPE_SENSE);
 	unsigned int timeout;
 	unsigned int timeout;
 	u16 len;
 	u16 len;
 	u8 ireason, stat;
 	u8 ireason, stat;
@@ -575,7 +577,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 
 
 	ide_read_bcount_and_ireason(drive, &len, &ireason);
 	ide_read_bcount_and_ireason(drive, &len, &ireason);
 
 
-	thislen = blk_fs_request(rq) ? len : cmd->nleft;
+	thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft;
 	if (thislen > len)
 	if (thislen > len)
 		thislen = len;
 		thislen = len;
 
 
@@ -584,7 +586,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 
 
 	/* If DRQ is clear, the command has completed. */
 	/* If DRQ is clear, the command has completed. */
 	if ((stat & ATA_DRQ) == 0) {
 	if ((stat & ATA_DRQ) == 0) {
-		if (blk_fs_request(rq)) {
+		if (rq->cmd_type == REQ_TYPE_FS) {
 			/*
 			/*
 			 * If we're not done reading/writing, complain.
 			 * If we're not done reading/writing, complain.
 			 * Otherwise, complete the command normally.
 			 * Otherwise, complete the command normally.
@@ -598,7 +600,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 					rq->cmd_flags |= REQ_FAILED;
 					rq->cmd_flags |= REQ_FAILED;
 				uptodate = 0;
 				uptodate = 0;
 			}
 			}
-		} else if (!blk_pc_request(rq)) {
+		} else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
 			ide_cd_request_sense_fixup(drive, cmd);
 			ide_cd_request_sense_fixup(drive, cmd);
 
 
 			uptodate = cmd->nleft ? 0 : 1;
 			uptodate = cmd->nleft ? 0 : 1;
@@ -647,7 +649,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 
 
 	/* pad, if necessary */
 	/* pad, if necessary */
 	if (len > 0) {
 	if (len > 0) {
-		if (blk_fs_request(rq) == 0 || write == 0)
+		if (rq->cmd_type != REQ_TYPE_FS || write == 0)
 			ide_pad_transfer(drive, write, len);
 			ide_pad_transfer(drive, write, len);
 		else {
 		else {
 			printk(KERN_ERR PFX "%s: confused, missing data\n",
 			printk(KERN_ERR PFX "%s: confused, missing data\n",
@@ -656,11 +658,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 		}
 		}
 	}
 	}
 
 
-	if (blk_pc_request(rq)) {
+	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 		timeout = rq->timeout;
 		timeout = rq->timeout;
 	} else {
 	} else {
 		timeout = ATAPI_WAIT_PC;
 		timeout = ATAPI_WAIT_PC;
-		if (!blk_fs_request(rq))
+		if (rq->cmd_type != REQ_TYPE_FS)
 			expiry = ide_cd_expiry;
 			expiry = ide_cd_expiry;
 	}
 	}
 
 
@@ -669,7 +671,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 	return ide_started;
 	return ide_started;
 
 
 out_end:
 out_end:
-	if (blk_pc_request(rq) && rc == 0) {
+	if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) {
 		rq->resid_len = 0;
 		rq->resid_len = 0;
 		blk_end_request_all(rq, 0);
 		blk_end_request_all(rq, 0);
 		hwif->rq = NULL;
 		hwif->rq = NULL;
@@ -677,7 +679,7 @@ out_end:
 		if (sense && uptodate)
 		if (sense && uptodate)
 			ide_cd_complete_failed_rq(drive, rq);
 			ide_cd_complete_failed_rq(drive, rq);
 
 
-		if (blk_fs_request(rq)) {
+		if (rq->cmd_type == REQ_TYPE_FS) {
 			if (cmd->nleft == 0)
 			if (cmd->nleft == 0)
 				uptodate = 1;
 				uptodate = 1;
 		} else {
 		} else {
@@ -690,7 +692,7 @@ out_end:
 				return ide_stopped;
 				return ide_stopped;
 
 
 		/* make sure it's fully ended */
 		/* make sure it's fully ended */
-		if (blk_fs_request(rq) == 0) {
+		if (rq->cmd_type != REQ_TYPE_FS) {
 			rq->resid_len -= cmd->nbytes - cmd->nleft;
 			rq->resid_len -= cmd->nbytes - cmd->nleft;
 			if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
 			if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
 				rq->resid_len += cmd->last_xfer_len;
 				rq->resid_len += cmd->last_xfer_len;
@@ -750,7 +752,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
 	ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
 	ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
 				  rq->cmd[0], rq->cmd_type);
 				  rq->cmd[0], rq->cmd_type);
 
 
-	if (blk_pc_request(rq))
+	if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
 		rq->cmd_flags |= REQ_QUIET;
 		rq->cmd_flags |= REQ_QUIET;
 	else
 	else
 		rq->cmd_flags &= ~REQ_FAILED;
 		rq->cmd_flags &= ~REQ_FAILED;
@@ -791,21 +793,26 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
 	if (drive->debug_mask & IDE_DBG_RQ)
 	if (drive->debug_mask & IDE_DBG_RQ)
 		blk_dump_rq_flags(rq, "ide_cd_do_request");
 		blk_dump_rq_flags(rq, "ide_cd_do_request");
 
 
-	if (blk_fs_request(rq)) {
+	switch (rq->cmd_type) {
+	case REQ_TYPE_FS:
 		if (cdrom_start_rw(drive, rq) == ide_stopped)
 		if (cdrom_start_rw(drive, rq) == ide_stopped)
 			goto out_end;
 			goto out_end;
-	} else if (blk_sense_request(rq) || blk_pc_request(rq) ||
-		   rq->cmd_type == REQ_TYPE_ATA_PC) {
+		break;
+	case REQ_TYPE_SENSE:
+	case REQ_TYPE_BLOCK_PC:
+	case REQ_TYPE_ATA_PC:
 		if (!rq->timeout)
 		if (!rq->timeout)
 			rq->timeout = ATAPI_WAIT_PC;
 			rq->timeout = ATAPI_WAIT_PC;
 
 
 		cdrom_do_block_pc(drive, rq);
 		cdrom_do_block_pc(drive, rq);
-	} else if (blk_special_request(rq)) {
+		break;
+	case REQ_TYPE_SPECIAL:
 		/* right now this can only be a reset... */
 		/* right now this can only be a reset... */
 		uptodate = 1;
 		uptodate = 1;
 		goto out_end;
 		goto out_end;
-	} else
+	default:
 		BUG();
 		BUG();
+	}
 
 
 	/* prepare sense request for this command */
 	/* prepare sense request for this command */
 	ide_prep_sense(drive, rq);
 	ide_prep_sense(drive, rq);
@@ -817,7 +824,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
 
 
 	cmd.rq = rq;
 	cmd.rq = rq;
 
 
-	if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
+	if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
 		ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
 		ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
 		ide_map_sg(drive, &cmd);
 		ide_map_sg(drive, &cmd);
 	}
 	}
@@ -1373,9 +1380,9 @@ static int ide_cdrom_prep_pc(struct request *rq)
 
 
 static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
 static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
 {
 {
-	if (blk_fs_request(rq))
+	if (rq->cmd_type == REQ_TYPE_FS)
 		return ide_cdrom_prep_fs(q, rq);
 		return ide_cdrom_prep_fs(q, rq);
-	else if (blk_pc_request(rq))
+	else if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
 		return ide_cdrom_prep_pc(rq);
 		return ide_cdrom_prep_pc(rq);
 
 
 	return 0;
 	return 0;
@@ -1592,17 +1599,19 @@ static struct ide_driver ide_cdrom_driver = {
 
 
 static int idecd_open(struct block_device *bdev, fmode_t mode)
 static int idecd_open(struct block_device *bdev, fmode_t mode)
 {
 {
-	struct cdrom_info *info = ide_cd_get(bdev->bd_disk);
-	int rc = -ENOMEM;
+	struct cdrom_info *info;
+	int rc = -ENXIO;
 
 
+	lock_kernel();
+	info = ide_cd_get(bdev->bd_disk);
 	if (!info)
 	if (!info)
-		return -ENXIO;
+		goto out;
 
 
 	rc = cdrom_open(&info->devinfo, bdev, mode);
 	rc = cdrom_open(&info->devinfo, bdev, mode);
-
 	if (rc < 0)
 	if (rc < 0)
 		ide_cd_put(info);
 		ide_cd_put(info);
-
+out:
+	unlock_kernel();
 	return rc;
 	return rc;
 }
 }
 
 
@@ -1610,9 +1619,11 @@ static int idecd_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
 	struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
 
 
+	lock_kernel();
 	cdrom_release(&info->devinfo, mode);
 	cdrom_release(&info->devinfo, mode);
 
 
 	ide_cd_put(info);
 	ide_cd_put(info);
+	unlock_kernel();
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1656,7 +1667,7 @@ static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
 	return 0;
 	return 0;
 }
 }
 
 
-static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
+static int idecd_locked_ioctl(struct block_device *bdev, fmode_t mode,
 			unsigned int cmd, unsigned long arg)
 			unsigned int cmd, unsigned long arg)
 {
 {
 	struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
 	struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info);
@@ -1678,6 +1689,19 @@ static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
 	return err;
 	return err;
 }
 }
 
 
+static int idecd_ioctl(struct block_device *bdev, fmode_t mode,
+			     unsigned int cmd, unsigned long arg)
+{
+	int ret;
+
+	lock_kernel();
+	ret = idecd_locked_ioctl(bdev, mode, cmd, arg);
+	unlock_kernel();
+
+	return ret;
+}
+
+
 static int idecd_media_changed(struct gendisk *disk)
 static int idecd_media_changed(struct gendisk *disk)
 {
 {
 	struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
 	struct cdrom_info *info = ide_drv_g(disk, cdrom_info);
@@ -1698,7 +1722,7 @@ static const struct block_device_operations idecd_ops = {
 	.owner			= THIS_MODULE,
 	.owner			= THIS_MODULE,
 	.open			= idecd_open,
 	.open			= idecd_open,
 	.release		= idecd_release,
 	.release		= idecd_release,
-	.locked_ioctl		= idecd_ioctl,
+	.ioctl			= idecd_ioctl,
 	.media_changed		= idecd_media_changed,
 	.media_changed		= idecd_media_changed,
 	.revalidate_disk	= idecd_revalidate_disk
 	.revalidate_disk	= idecd_revalidate_disk
 };
 };

+ 1 - 1
drivers/ide/ide-cd_ioctl.c

@@ -454,7 +454,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
 	   touch it at all. */
 	   touch it at all. */
 
 
 	if (cgc->data_direction == CGC_DATA_WRITE)
 	if (cgc->data_direction == CGC_DATA_WRITE)
-		flags |= REQ_RW;
+		flags |= REQ_WRITE;
 
 
 	if (cgc->sense)
 	if (cgc->sense)
 		memset(cgc->sense, 0, sizeof(struct request_sense));
 		memset(cgc->sense, 0, sizeof(struct request_sense));

+ 12 - 6
drivers/ide/ide-disk.c

@@ -184,7 +184,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
 	ide_hwif_t *hwif = drive->hwif;
 	ide_hwif_t *hwif = drive->hwif;
 
 
 	BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
 	BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
-	BUG_ON(!blk_fs_request(rq));
+	BUG_ON(rq->cmd_type != REQ_TYPE_FS);
 
 
 	ledtrig_ide_activity();
 	ledtrig_ide_activity();
 
 
@@ -427,10 +427,15 @@ static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
 		drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
 		drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
 }
 }
 
 
-static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
+static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
 {
 {
 	ide_drive_t *drive = q->queuedata;
 	ide_drive_t *drive = q->queuedata;
-	struct ide_cmd *cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+	struct ide_cmd *cmd;
+
+	if (!(rq->cmd_flags & REQ_FLUSH))
+		return BLKPREP_OK;
+
+	cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
 
 
 	/* FIXME: map struct ide_taskfile on rq->cmd[] */
 	/* FIXME: map struct ide_taskfile on rq->cmd[] */
 	BUG_ON(cmd == NULL);
 	BUG_ON(cmd == NULL);
@@ -448,6 +453,8 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
 	rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 	rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
 	rq->special = cmd;
 	rq->special = cmd;
 	cmd->rq = rq;
 	cmd->rq = rq;
+
+	return BLKPREP_OK;
 }
 }
 
 
 ide_devset_get(multcount, mult_count);
 ide_devset_get(multcount, mult_count);
@@ -513,7 +520,6 @@ static void update_ordered(ide_drive_t *drive)
 {
 {
 	u16 *id = drive->id;
 	u16 *id = drive->id;
 	unsigned ordered = QUEUE_ORDERED_NONE;
 	unsigned ordered = QUEUE_ORDERED_NONE;
-	prepare_flush_fn *prep_fn = NULL;
 
 
 	if (drive->dev_flags & IDE_DFLAG_WCACHE) {
 	if (drive->dev_flags & IDE_DFLAG_WCACHE) {
 		unsigned long long capacity;
 		unsigned long long capacity;
@@ -538,12 +544,12 @@ static void update_ordered(ide_drive_t *drive)
 
 
 		if (barrier) {
 		if (barrier) {
 			ordered = QUEUE_ORDERED_DRAIN_FLUSH;
 			ordered = QUEUE_ORDERED_DRAIN_FLUSH;
-			prep_fn = idedisk_prepare_flush;
+			blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
 		}
 		}
 	} else
 	} else
 		ordered = QUEUE_ORDERED_DRAIN;
 		ordered = QUEUE_ORDERED_DRAIN;
 
 
-	blk_queue_ordered(drive->queue, ordered, prep_fn);
+	blk_queue_ordered(drive->queue, ordered);
 }
 }
 
 
 ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
 ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);

+ 7 - 2
drivers/ide/ide-disk_ioctl.c

@@ -1,6 +1,7 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/ide.h>
 #include <linux/ide.h>
 #include <linux/hdreg.h>
 #include <linux/hdreg.h>
+#include <linux/smp_lock.h>
 
 
 #include "ide-disk.h"
 #include "ide-disk.h"
 
 
@@ -18,9 +19,13 @@ int ide_disk_ioctl(ide_drive_t *drive, struct block_device *bdev, fmode_t mode,
 {
 {
 	int err;
 	int err;
 
 
+	lock_kernel();
 	err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings);
 	err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings);
 	if (err != -EOPNOTSUPP)
 	if (err != -EOPNOTSUPP)
-		return err;
+		goto out;
 
 
-	return generic_ide_ioctl(drive, bdev, cmd, arg);
+	err = generic_ide_ioctl(drive, bdev, cmd, arg);
+out:
+	unlock_kernel();
+	return err;
 }
 }

+ 3 - 2
drivers/ide/ide-eh.c

@@ -122,7 +122,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
 		return ide_stopped;
 		return ide_stopped;
 
 
 	/* retry only "normal" I/O: */
 	/* retry only "normal" I/O: */
-	if (!blk_fs_request(rq)) {
+	if (rq->cmd_type != REQ_TYPE_FS) {
 		if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
 		if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
 			struct ide_cmd *cmd = rq->special;
 			struct ide_cmd *cmd = rq->special;
 
 
@@ -146,7 +146,8 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
 {
 {
 	struct request *rq = drive->hwif->rq;
 	struct request *rq = drive->hwif->rq;
 
 
-	if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) {
+	if (rq && rq->cmd_type == REQ_TYPE_SPECIAL &&
+	    rq->cmd[0] == REQ_DRIVE_RESET) {
 		if (err <= 0 && rq->errors == 0)
 		if (err <= 0 && rq->errors == 0)
 			rq->errors = -EIO;
 			rq->errors = -EIO;
 		ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
 		ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));

+ 17 - 10
drivers/ide/ide-floppy.c

@@ -73,7 +73,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
 		drive->failed_pc = NULL;
 		drive->failed_pc = NULL;
 
 
 	if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
 	if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
-	    (rq && blk_pc_request(rq)))
+	    (rq && rq->cmd_type == REQ_TYPE_BLOCK_PC))
 		uptodate = 1; /* FIXME */
 		uptodate = 1; /* FIXME */
 	else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
 	else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
 
 
@@ -98,7 +98,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
 			       "Aborting request!\n");
 			       "Aborting request!\n");
 	}
 	}
 
 
-	if (blk_special_request(rq))
+	if (rq->cmd_type == REQ_TYPE_SPECIAL)
 		rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
 		rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
 
 
 	return uptodate;
 	return uptodate;
@@ -207,7 +207,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
 	memcpy(rq->cmd, pc->c, 12);
 	memcpy(rq->cmd, pc->c, 12);
 
 
 	pc->rq = rq;
 	pc->rq = rq;
-	if (rq->cmd_flags & REQ_RW)
+	if (rq->cmd_flags & REQ_WRITE)
 		pc->flags |= PC_FLAG_WRITING;
 		pc->flags |= PC_FLAG_WRITING;
 
 
 	pc->flags |= PC_FLAG_DMA_OK;
 	pc->flags |= PC_FLAG_DMA_OK;
@@ -247,14 +247,16 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
 		} else
 		} else
 			printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
 			printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
 
 
-		if (blk_special_request(rq)) {
+		if (rq->cmd_type == REQ_TYPE_SPECIAL) {
 			rq->errors = 0;
 			rq->errors = 0;
 			ide_complete_rq(drive, 0, blk_rq_bytes(rq));
 			ide_complete_rq(drive, 0, blk_rq_bytes(rq));
 			return ide_stopped;
 			return ide_stopped;
 		} else
 		} else
 			goto out_end;
 			goto out_end;
 	}
 	}
-	if (blk_fs_request(rq)) {
+
+	switch (rq->cmd_type) {
+	case REQ_TYPE_FS:
 		if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
 		if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
 		    (blk_rq_sectors(rq) % floppy->bs_factor)) {
 		    (blk_rq_sectors(rq) % floppy->bs_factor)) {
 			printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
 			printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
@@ -263,13 +265,18 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
 		}
 		}
 		pc = &floppy->queued_pc;
 		pc = &floppy->queued_pc;
 		idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
 		idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
-	} else if (blk_special_request(rq) || blk_sense_request(rq)) {
+		break;
+	case REQ_TYPE_SPECIAL:
+	case REQ_TYPE_SENSE:
 		pc = (struct ide_atapi_pc *)rq->special;
 		pc = (struct ide_atapi_pc *)rq->special;
-	} else if (blk_pc_request(rq)) {
+		break;
+	case REQ_TYPE_BLOCK_PC:
 		pc = &floppy->queued_pc;
 		pc = &floppy->queued_pc;
 		idefloppy_blockpc_cmd(floppy, pc, rq);
 		idefloppy_blockpc_cmd(floppy, pc, rq);
-	} else
+		break;
+	default:
 		BUG();
 		BUG();
+	}
 
 
 	ide_prep_sense(drive, rq);
 	ide_prep_sense(drive, rq);
 
 
@@ -280,7 +287,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
 
 
 	cmd.rq = rq;
 	cmd.rq = rq;
 
 
-	if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
+	if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
 		ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
 		ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
 		ide_map_sg(drive, &cmd);
 		ide_map_sg(drive, &cmd);
 	}
 	}
@@ -290,7 +297,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
 	return ide_floppy_issue_pc(drive, &cmd, pc);
 	return ide_floppy_issue_pc(drive, &cmd, pc);
 out_end:
 out_end:
 	drive->failed_pc = NULL;
 	drive->failed_pc = NULL;
-	if (blk_fs_request(rq) == 0 && rq->errors == 0)
+	if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
 		rq->errors = -EIO;
 		rq->errors = -EIO;
 	ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
 	ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
 	return ide_stopped;
 	return ide_stopped;

+ 9 - 3
drivers/ide/ide-floppy_ioctl.c

@@ -5,6 +5,7 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/ide.h>
 #include <linux/ide.h>
 #include <linux/cdrom.h>
 #include <linux/cdrom.h>
+#include <linux/smp_lock.h>
 
 
 #include <asm/unaligned.h>
 #include <asm/unaligned.h>
 
 
@@ -275,12 +276,15 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
 	void __user *argp = (void __user *)arg;
 	void __user *argp = (void __user *)arg;
 	int err;
 	int err;
 
 
-	if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR)
-		return ide_floppy_lockdoor(drive, &pc, arg, cmd);
+	lock_kernel();
+	if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) {
+		err = ide_floppy_lockdoor(drive, &pc, arg, cmd);
+		goto out;
+	}
 
 
 	err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp);
 	err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp);
 	if (err != -ENOTTY)
 	if (err != -ENOTTY)
-		return err;
+		goto out;
 
 
 	/*
 	/*
 	 * skip SCSI_IOCTL_SEND_COMMAND (deprecated)
 	 * skip SCSI_IOCTL_SEND_COMMAND (deprecated)
@@ -293,5 +297,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
 	if (err == -ENOTTY)
 	if (err == -ENOTTY)
 		err = generic_ide_ioctl(drive, bdev, cmd, arg);
 		err = generic_ide_ioctl(drive, bdev, cmd, arg);
 
 
+out:
+	unlock_kernel();
 	return err;
 	return err;
 }
 }

+ 17 - 2
drivers/ide/ide-gd.c

@@ -1,3 +1,4 @@
+#include <linux/smp_lock.h>
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/string.h>
@@ -237,6 +238,18 @@ out_put_idkp:
 	return ret;
 	return ret;
 }
 }
 
 
+static int ide_gd_unlocked_open(struct block_device *bdev, fmode_t mode)
+{
+	int ret;
+
+	lock_kernel();
+	ret = ide_gd_open(bdev, mode);
+	unlock_kernel();
+
+	return ret;
+}
+
+
 static int ide_gd_release(struct gendisk *disk, fmode_t mode)
 static int ide_gd_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
 	struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
@@ -244,6 +257,7 @@ static int ide_gd_release(struct gendisk *disk, fmode_t mode)
 
 
 	ide_debug_log(IDE_DBG_FUNC, "enter");
 	ide_debug_log(IDE_DBG_FUNC, "enter");
 
 
+	lock_kernel();
 	if (idkp->openers == 1)
 	if (idkp->openers == 1)
 		drive->disk_ops->flush(drive);
 		drive->disk_ops->flush(drive);
 
 
@@ -255,6 +269,7 @@ static int ide_gd_release(struct gendisk *disk, fmode_t mode)
 	idkp->openers--;
 	idkp->openers--;
 
 
 	ide_disk_put(idkp);
 	ide_disk_put(idkp);
+	unlock_kernel();
 
 
 	return 0;
 	return 0;
 }
 }
@@ -321,9 +336,9 @@ static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
 
 
 static const struct block_device_operations ide_gd_ops = {
 static const struct block_device_operations ide_gd_ops = {
 	.owner			= THIS_MODULE,
 	.owner			= THIS_MODULE,
-	.open			= ide_gd_open,
+	.open			= ide_gd_unlocked_open,
 	.release		= ide_gd_release,
 	.release		= ide_gd_release,
-	.locked_ioctl		= ide_gd_ioctl,
+	.ioctl			= ide_gd_ioctl,
 	.getgeo			= ide_gd_getgeo,
 	.getgeo			= ide_gd_getgeo,
 	.media_changed		= ide_gd_media_changed,
 	.media_changed		= ide_gd_media_changed,
 	.unlock_native_capacity	= ide_gd_unlock_native_capacity,
 	.unlock_native_capacity	= ide_gd_unlock_native_capacity,

+ 4 - 4
drivers/ide/ide-io.c

@@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq);
 
 
 void ide_kill_rq(ide_drive_t *drive, struct request *rq)
 void ide_kill_rq(ide_drive_t *drive, struct request *rq)
 {
 {
-	u8 drv_req = blk_special_request(rq) && rq->rq_disk;
+	u8 drv_req = (rq->cmd_type == REQ_TYPE_SPECIAL) && rq->rq_disk;
 	u8 media = drive->media;
 	u8 media = drive->media;
 
 
 	drive->failed_pc = NULL;
 	drive->failed_pc = NULL;
@@ -145,7 +145,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
 	} else {
 	} else {
 		if (media == ide_tape)
 		if (media == ide_tape)
 			rq->errors = IDE_DRV_ERROR_GENERAL;
 			rq->errors = IDE_DRV_ERROR_GENERAL;
-		else if (blk_fs_request(rq) == 0 && rq->errors == 0)
+		else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
 			rq->errors = -EIO;
 			rq->errors = -EIO;
 	}
 	}
 
 
@@ -307,7 +307,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
 {
 {
 	ide_startstop_t startstop;
 	ide_startstop_t startstop;
 
 
-	BUG_ON(!blk_rq_started(rq));
+	BUG_ON(!(rq->cmd_flags & REQ_STARTED));
 
 
 #ifdef DEBUG
 #ifdef DEBUG
 	printk("%s: start_request: current=0x%08lx\n",
 	printk("%s: start_request: current=0x%08lx\n",
@@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
 			    pm->pm_step == IDE_PM_COMPLETED)
 			    pm->pm_step == IDE_PM_COMPLETED)
 				ide_complete_pm_rq(drive, rq);
 				ide_complete_pm_rq(drive, rq);
 			return startstop;
 			return startstop;
-		} else if (!rq->rq_disk && blk_special_request(rq))
+		} else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_SPECIAL)
 			/*
 			/*
 			 * TODO: Once all ULDs have been modified to
 			 * TODO: Once all ULDs have been modified to
 			 * check for specific op codes rather than
 			 * check for specific op codes rather than

+ 4 - 4
drivers/ide/ide-pm.c

@@ -191,10 +191,10 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
 
 
 #ifdef DEBUG_PM
 #ifdef DEBUG_PM
 	printk("%s: completing PM request, %s\n", drive->name,
 	printk("%s: completing PM request, %s\n", drive->name,
-	       blk_pm_suspend_request(rq) ? "suspend" : "resume");
+	       (rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume");
 #endif
 #endif
 	spin_lock_irqsave(q->queue_lock, flags);
 	spin_lock_irqsave(q->queue_lock, flags);
-	if (blk_pm_suspend_request(rq))
+	if (rq->cmd_type == REQ_TYPE_PM_SUSPEND)
 		blk_stop_queue(q);
 		blk_stop_queue(q);
 	else
 	else
 		drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
 		drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
@@ -210,11 +210,11 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
 {
 {
 	struct request_pm_state *pm = rq->special;
 	struct request_pm_state *pm = rq->special;
 
 
-	if (blk_pm_suspend_request(rq) &&
+	if (rq->cmd_type == REQ_TYPE_PM_SUSPEND &&
 	    pm->pm_step == IDE_PM_START_SUSPEND)
 	    pm->pm_step == IDE_PM_START_SUSPEND)
 		/* Mark drive blocked when starting the suspend sequence. */
 		/* Mark drive blocked when starting the suspend sequence. */
 		drive->dev_flags |= IDE_DFLAG_BLOCKED;
 		drive->dev_flags |= IDE_DFLAG_BLOCKED;
-	else if (blk_pm_resume_request(rq) &&
+	else if (rq->cmd_type == REQ_TYPE_PM_RESUME &&
 		 pm->pm_step == IDE_PM_START_RESUME) {
 		 pm->pm_step == IDE_PM_START_RESUME) {
 		/*
 		/*
 		 * The first thing we do on wakeup is to wait for BSY bit to
 		 * The first thing we do on wakeup is to wait for BSY bit to

+ 18 - 4
drivers/ide/ide-tape.c

@@ -32,6 +32,7 @@
 #include <linux/errno.h>
 #include <linux/errno.h>
 #include <linux/genhd.h>
 #include <linux/genhd.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
+#include <linux/smp_lock.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
 #include <linux/ide.h>
 #include <linux/ide.h>
@@ -577,7 +578,8 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
 		      rq->cmd[0], (unsigned long long)blk_rq_pos(rq),
 		      rq->cmd[0], (unsigned long long)blk_rq_pos(rq),
 		      blk_rq_sectors(rq));
 		      blk_rq_sectors(rq));
 
 
-	BUG_ON(!(blk_special_request(rq) || blk_sense_request(rq)));
+	BUG_ON(!(rq->cmd_type == REQ_TYPE_SPECIAL ||
+		 rq->cmd_type == REQ_TYPE_SENSE));
 
 
 	/* Retry a failed packet command */
 	/* Retry a failed packet command */
 	if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
 	if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
@@ -1905,7 +1907,11 @@ static const struct file_operations idetape_fops = {
 
 
 static int idetape_open(struct block_device *bdev, fmode_t mode)
 static int idetape_open(struct block_device *bdev, fmode_t mode)
 {
 {
-	struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk, false, 0);
+	struct ide_tape_obj *tape;
+
+	lock_kernel();
+	tape = ide_tape_get(bdev->bd_disk, false, 0);
+	unlock_kernel();
 
 
 	if (!tape)
 	if (!tape)
 		return -ENXIO;
 		return -ENXIO;
@@ -1917,7 +1923,10 @@ static int idetape_release(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj);
 	struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj);
 
 
+	lock_kernel();
 	ide_tape_put(tape);
 	ide_tape_put(tape);
+	unlock_kernel();
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1926,9 +1935,14 @@ static int idetape_ioctl(struct block_device *bdev, fmode_t mode,
 {
 {
 	struct ide_tape_obj *tape = ide_drv_g(bdev->bd_disk, ide_tape_obj);
 	struct ide_tape_obj *tape = ide_drv_g(bdev->bd_disk, ide_tape_obj);
 	ide_drive_t *drive = tape->drive;
 	ide_drive_t *drive = tape->drive;
-	int err = generic_ide_ioctl(drive, bdev, cmd, arg);
+	int err;
+
+	lock_kernel();
+	err = generic_ide_ioctl(drive, bdev, cmd, arg);
 	if (err == -EINVAL)
 	if (err == -EINVAL)
 		err = idetape_blkdev_ioctl(drive, cmd, arg);
 		err = idetape_blkdev_ioctl(drive, cmd, arg);
+	unlock_kernel();
+
 	return err;
 	return err;
 }
 }
 
 
@@ -1936,7 +1950,7 @@ static const struct block_device_operations idetape_block_ops = {
 	.owner		= THIS_MODULE,
 	.owner		= THIS_MODULE,
 	.open		= idetape_open,
 	.open		= idetape_open,
 	.release	= idetape_release,
 	.release	= idetape_release,
-	.locked_ioctl	= idetape_ioctl,
+	.ioctl		= idetape_ioctl,
 };
 };
 
 
 static int ide_tape_probe(ide_drive_t *drive)
 static int ide_tape_probe(ide_drive_t *drive)

+ 6 - 6
drivers/md/dm-io.c

@@ -356,7 +356,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
 	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
 	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
 
 
 	if (sync)
 	if (sync)
-		rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+		rw |= REQ_SYNC | REQ_UNPLUG;
 
 
 	/*
 	/*
 	 * For multiple regions we need to be careful to rewind
 	 * For multiple regions we need to be careful to rewind
@@ -364,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
 	 */
 	 */
 	for (i = 0; i < num_regions; i++) {
 	for (i = 0; i < num_regions; i++) {
 		*dp = old_pages;
 		*dp = old_pages;
-		if (where[i].count || (rw & (1 << BIO_RW_BARRIER)))
+		if (where[i].count || (rw & REQ_HARDBARRIER))
 			do_region(rw, i, where + i, dp, io);
 			do_region(rw, i, where + i, dp, io);
 	}
 	}
 
 
@@ -412,8 +412,8 @@ retry:
 	}
 	}
 	set_current_state(TASK_RUNNING);
 	set_current_state(TASK_RUNNING);
 
 
-	if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
-		rw &= ~(1 << BIO_RW_BARRIER);
+	if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
+		rw &= ~REQ_HARDBARRIER;
 		goto retry;
 		goto retry;
 	}
 	}
 
 
@@ -479,8 +479,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
  * New collapsed (a)synchronous interface.
  * New collapsed (a)synchronous interface.
  *
  *
  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
- * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
- * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
+ * the queue with blk_unplug() some time later or set REQ_SYNC in
+io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
  * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
  * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
  */
  */
 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
 int dm_io(struct dm_io_request *io_req, unsigned num_regions,

+ 1 - 1
drivers/md/dm-kcopyd.c

@@ -345,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job)
 {
 {
 	int r;
 	int r;
 	struct dm_io_request io_req = {
 	struct dm_io_request io_req = {
-		.bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG),
+		.bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
 		.mem.type = DM_IO_PAGE_LIST,
 		.mem.type = DM_IO_PAGE_LIST,
 		.mem.ptr.pl = job->pages,
 		.mem.ptr.pl = job->pages,
 		.mem.offset = job->offset,
 		.mem.offset = job->offset,

+ 1 - 1
drivers/md/dm-raid1.c

@@ -1211,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
 	if (error == -EOPNOTSUPP)
 	if (error == -EOPNOTSUPP)
 		goto out;
 		goto out;
 
 
-	if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
+	if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
 		goto out;
 		goto out;
 
 
 	if (unlikely(error)) {
 	if (unlikely(error)) {

+ 1 - 1
drivers/md/dm-stripe.c

@@ -284,7 +284,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
 	if (!error)
 	if (!error)
 		return 0; /* I/O complete */
 		return 0; /* I/O complete */
 
 
-	if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
+	if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
 		return error;
 		return error;
 
 
 	if (error == -EOPNOTSUPP)
 	if (error == -EOPNOTSUPP)

+ 21 - 26
drivers/md/dm.c

@@ -15,6 +15,7 @@
 #include <linux/blkpg.h>
 #include <linux/blkpg.h>
 #include <linux/bio.h>
 #include <linux/bio.h>
 #include <linux/buffer_head.h>
 #include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
 #include <linux/mempool.h>
 #include <linux/mempool.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/idr.h>
 #include <linux/idr.h>
@@ -338,6 +339,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
 {
 {
 	struct mapped_device *md;
 	struct mapped_device *md;
 
 
+	lock_kernel();
 	spin_lock(&_minor_lock);
 	spin_lock(&_minor_lock);
 
 
 	md = bdev->bd_disk->private_data;
 	md = bdev->bd_disk->private_data;
@@ -355,6 +357,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
 
 
 out:
 out:
 	spin_unlock(&_minor_lock);
 	spin_unlock(&_minor_lock);
+	unlock_kernel();
 
 
 	return md ? 0 : -ENXIO;
 	return md ? 0 : -ENXIO;
 }
 }
@@ -362,8 +365,12 @@ out:
 static int dm_blk_close(struct gendisk *disk, fmode_t mode)
 static int dm_blk_close(struct gendisk *disk, fmode_t mode)
 {
 {
 	struct mapped_device *md = disk->private_data;
 	struct mapped_device *md = disk->private_data;
+
+	lock_kernel();
 	atomic_dec(&md->open_count);
 	atomic_dec(&md->open_count);
 	dm_put(md);
 	dm_put(md);
+	unlock_kernel();
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -614,7 +621,7 @@ static void dec_pending(struct dm_io *io, int error)
 			 */
 			 */
 			spin_lock_irqsave(&md->deferred_lock, flags);
 			spin_lock_irqsave(&md->deferred_lock, flags);
 			if (__noflush_suspending(md)) {
 			if (__noflush_suspending(md)) {
-				if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
+				if (!(io->bio->bi_rw & REQ_HARDBARRIER))
 					bio_list_add_head(&md->deferred,
 					bio_list_add_head(&md->deferred,
 							  io->bio);
 							  io->bio);
 			} else
 			} else
@@ -626,7 +633,7 @@ static void dec_pending(struct dm_io *io, int error)
 		io_error = io->error;
 		io_error = io->error;
 		bio = io->bio;
 		bio = io->bio;
 
 
-		if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
+		if (bio->bi_rw & REQ_HARDBARRIER) {
 			/*
 			/*
 			 * There can be just one barrier request so we use
 			 * There can be just one barrier request so we use
 			 * a per-device variable for error reporting.
 			 * a per-device variable for error reporting.
@@ -792,12 +799,12 @@ static void dm_end_request(struct request *clone, int error)
 {
 {
 	int rw = rq_data_dir(clone);
 	int rw = rq_data_dir(clone);
 	int run_queue = 1;
 	int run_queue = 1;
-	bool is_barrier = blk_barrier_rq(clone);
+	bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
 	struct dm_rq_target_io *tio = clone->end_io_data;
 	struct dm_rq_target_io *tio = clone->end_io_data;
 	struct mapped_device *md = tio->md;
 	struct mapped_device *md = tio->md;
 	struct request *rq = tio->orig;
 	struct request *rq = tio->orig;
 
 
-	if (blk_pc_request(rq) && !is_barrier) {
+	if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) {
 		rq->errors = clone->errors;
 		rq->errors = clone->errors;
 		rq->resid_len = clone->resid_len;
 		rq->resid_len = clone->resid_len;
 
 
@@ -844,7 +851,7 @@ void dm_requeue_unmapped_request(struct request *clone)
 	struct request_queue *q = rq->q;
 	struct request_queue *q = rq->q;
 	unsigned long flags;
 	unsigned long flags;
 
 
-	if (unlikely(blk_barrier_rq(clone))) {
+	if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
 		/*
 		/*
 		 * Barrier clones share an original request.
 		 * Barrier clones share an original request.
 		 * Leave it to dm_end_request(), which handles this special
 		 * Leave it to dm_end_request(), which handles this special
@@ -943,7 +950,7 @@ static void dm_complete_request(struct request *clone, int error)
 	struct dm_rq_target_io *tio = clone->end_io_data;
 	struct dm_rq_target_io *tio = clone->end_io_data;
 	struct request *rq = tio->orig;
 	struct request *rq = tio->orig;
 
 
-	if (unlikely(blk_barrier_rq(clone))) {
+	if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
 		/*
 		/*
 		 * Barrier clones share an original request.  So can't use
 		 * Barrier clones share an original request.  So can't use
 		 * softirq_done with the original.
 		 * softirq_done with the original.
@@ -972,7 +979,7 @@ void dm_kill_unmapped_request(struct request *clone, int error)
 	struct dm_rq_target_io *tio = clone->end_io_data;
 	struct dm_rq_target_io *tio = clone->end_io_data;
 	struct request *rq = tio->orig;
 	struct request *rq = tio->orig;
 
 
-	if (unlikely(blk_barrier_rq(clone))) {
+	if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
 		/*
 		/*
 		 * Barrier clones share an original request.
 		 * Barrier clones share an original request.
 		 * Leave it to dm_end_request(), which handles this special
 		 * Leave it to dm_end_request(), which handles this special
@@ -1106,7 +1113,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
 
 
 	clone->bi_sector = sector;
 	clone->bi_sector = sector;
 	clone->bi_bdev = bio->bi_bdev;
 	clone->bi_bdev = bio->bi_bdev;
-	clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
+	clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
 	clone->bi_vcnt = 1;
 	clone->bi_vcnt = 1;
 	clone->bi_size = to_bytes(len);
 	clone->bi_size = to_bytes(len);
 	clone->bi_io_vec->bv_offset = offset;
 	clone->bi_io_vec->bv_offset = offset;
@@ -1133,7 +1140,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
 
 
 	clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
 	clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
 	__bio_clone(clone, bio);
 	__bio_clone(clone, bio);
-	clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
+	clone->bi_rw &= ~REQ_HARDBARRIER;
 	clone->bi_destructor = dm_bio_destructor;
 	clone->bi_destructor = dm_bio_destructor;
 	clone->bi_sector = sector;
 	clone->bi_sector = sector;
 	clone->bi_idx = idx;
 	clone->bi_idx = idx;
@@ -1301,7 +1308,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
 
 
 	ci.map = dm_get_live_table(md);
 	ci.map = dm_get_live_table(md);
 	if (unlikely(!ci.map)) {
 	if (unlikely(!ci.map)) {
-		if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
+		if (!(bio->bi_rw & REQ_HARDBARRIER))
 			bio_io_error(bio);
 			bio_io_error(bio);
 		else
 		else
 			if (!md->barrier_error)
 			if (!md->barrier_error)
@@ -1414,7 +1421,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
 	 * we have to queue this io for later.
 	 * we have to queue this io for later.
 	 */
 	 */
 	if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
 	if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
-	    unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+	    unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
 		up_read(&md->io_lock);
 		up_read(&md->io_lock);
 
 
 		if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
 		if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -1455,20 +1462,9 @@ static int dm_request(struct request_queue *q, struct bio *bio)
 	return _dm_request(q, bio);
 	return _dm_request(q, bio);
 }
 }
 
 
-/*
- * Mark this request as flush request, so that dm_request_fn() can
- * recognize.
- */
-static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq)
-{
-	rq->cmd_type = REQ_TYPE_LINUX_BLOCK;
-	rq->cmd[0] = REQ_LB_OP_FLUSH;
-}
-
 static bool dm_rq_is_flush_request(struct request *rq)
 static bool dm_rq_is_flush_request(struct request *rq)
 {
 {
-	if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK &&
-	    rq->cmd[0] == REQ_LB_OP_FLUSH)
+	if (rq->cmd_flags & REQ_FLUSH)
 		return true;
 		return true;
 	else
 	else
 		return false;
 		return false;
@@ -1912,8 +1908,7 @@ static struct mapped_device *alloc_dev(int minor)
 	blk_queue_softirq_done(md->queue, dm_softirq_done);
 	blk_queue_softirq_done(md->queue, dm_softirq_done);
 	blk_queue_prep_rq(md->queue, dm_prep_fn);
 	blk_queue_prep_rq(md->queue, dm_prep_fn);
 	blk_queue_lld_busy(md->queue, dm_lld_busy);
 	blk_queue_lld_busy(md->queue, dm_lld_busy);
-	blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH,
-			  dm_rq_prepare_flush);
+	blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
 
 
 	md->disk = alloc_disk(1);
 	md->disk = alloc_disk(1);
 	if (!md->disk)
 	if (!md->disk)
@@ -2296,7 +2291,7 @@ static void dm_wq_work(struct work_struct *work)
 		if (dm_request_based(md))
 		if (dm_request_based(md))
 			generic_make_request(c);
 			generic_make_request(c);
 		else {
 		else {
-			if (bio_rw_flagged(c, BIO_RW_BARRIER))
+			if (c->bi_rw & REQ_HARDBARRIER)
 				process_barrier(md, c);
 				process_barrier(md, c);
 			else
 			else
 				__split_and_process_bio(md, c);
 				__split_and_process_bio(md, c);

+ 1 - 1
drivers/md/linear.c

@@ -294,7 +294,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
 	dev_info_t *tmp_dev;
 	dev_info_t *tmp_dev;
 	sector_t start_sector;
 	sector_t start_sector;
 
 
-	if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+	if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
 		md_barrier_request(mddev, bio);
 		md_barrier_request(mddev, bio);
 		return 0;
 		return 0;
 	}
 	}

+ 11 - 5
drivers/md/md.c

@@ -36,6 +36,7 @@
 #include <linux/blkdev.h>
 #include <linux/blkdev.h>
 #include <linux/sysctl.h>
 #include <linux/sysctl.h>
 #include <linux/seq_file.h>
 #include <linux/seq_file.h>
+#include <linux/smp_lock.h>
 #include <linux/buffer_head.h> /* for invalidate_bdev */
 #include <linux/buffer_head.h> /* for invalidate_bdev */
 #include <linux/poll.h>
 #include <linux/poll.h>
 #include <linux/ctype.h>
 #include <linux/ctype.h>
@@ -353,7 +354,7 @@ static void md_submit_barrier(struct work_struct *ws)
 		/* an empty barrier - all done */
 		/* an empty barrier - all done */
 		bio_endio(bio, 0);
 		bio_endio(bio, 0);
 	else {
 	else {
-		bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
+		bio->bi_rw &= ~REQ_HARDBARRIER;
 		if (mddev->pers->make_request(mddev, bio))
 		if (mddev->pers->make_request(mddev, bio))
 			generic_make_request(bio);
 			generic_make_request(bio);
 		mddev->barrier = POST_REQUEST_BARRIER;
 		mddev->barrier = POST_REQUEST_BARRIER;
@@ -675,11 +676,11 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
 	 * if zero is reached.
 	 * if zero is reached.
 	 * If an error occurred, call md_error
 	 * If an error occurred, call md_error
 	 *
 	 *
-	 * As we might need to resubmit the request if BIO_RW_BARRIER
+	 * As we might need to resubmit the request if REQ_HARDBARRIER
 	 * causes ENOTSUPP, we allocate a spare bio...
 	 * causes ENOTSUPP, we allocate a spare bio...
 	 */
 	 */
 	struct bio *bio = bio_alloc(GFP_NOIO, 1);
 	struct bio *bio = bio_alloc(GFP_NOIO, 1);
-	int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
+	int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
 
 
 	bio->bi_bdev = rdev->bdev;
 	bio->bi_bdev = rdev->bdev;
 	bio->bi_sector = sector;
 	bio->bi_sector = sector;
@@ -691,7 +692,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
 	atomic_inc(&mddev->pending_writes);
 	atomic_inc(&mddev->pending_writes);
 	if (!test_bit(BarriersNotsupp, &rdev->flags)) {
 	if (!test_bit(BarriersNotsupp, &rdev->flags)) {
 		struct bio *rbio;
 		struct bio *rbio;
-		rw |= (1<<BIO_RW_BARRIER);
+		rw |= REQ_HARDBARRIER;
 		rbio = bio_clone(bio, GFP_NOIO);
 		rbio = bio_clone(bio, GFP_NOIO);
 		rbio->bi_private = bio;
 		rbio->bi_private = bio;
 		rbio->bi_end_io = super_written_barrier;
 		rbio->bi_end_io = super_written_barrier;
@@ -736,7 +737,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
 	struct completion event;
 	struct completion event;
 	int ret;
 	int ret;
 
 
-	rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+	rw |= REQ_SYNC | REQ_UNPLUG;
 
 
 	bio->bi_bdev = bdev;
 	bio->bi_bdev = bdev;
 	bio->bi_sector = sector;
 	bio->bi_sector = sector;
@@ -5902,6 +5903,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
 	mddev_t *mddev = mddev_find(bdev->bd_dev);
 	mddev_t *mddev = mddev_find(bdev->bd_dev);
 	int err;
 	int err;
 
 
+	lock_kernel();
 	if (mddev->gendisk != bdev->bd_disk) {
 	if (mddev->gendisk != bdev->bd_disk) {
 		/* we are racing with mddev_put which is discarding this
 		/* we are racing with mddev_put which is discarding this
 		 * bd_disk.
 		 * bd_disk.
@@ -5910,6 +5912,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
 		/* Wait until bdev->bd_disk is definitely gone */
 		/* Wait until bdev->bd_disk is definitely gone */
 		flush_scheduled_work();
 		flush_scheduled_work();
 		/* Then retry the open from the top */
 		/* Then retry the open from the top */
+		unlock_kernel();
 		return -ERESTARTSYS;
 		return -ERESTARTSYS;
 	}
 	}
 	BUG_ON(mddev != bdev->bd_disk->private_data);
 	BUG_ON(mddev != bdev->bd_disk->private_data);
@@ -5923,6 +5926,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
 
 
 	check_disk_size_change(mddev->gendisk, bdev);
 	check_disk_size_change(mddev->gendisk, bdev);
  out:
  out:
+	unlock_kernel();
 	return err;
 	return err;
 }
 }
 
 
@@ -5931,8 +5935,10 @@ static int md_release(struct gendisk *disk, fmode_t mode)
  	mddev_t *mddev = disk->private_data;
  	mddev_t *mddev = disk->private_data;
 
 
 	BUG_ON(!mddev);
 	BUG_ON(!mddev);
+	lock_kernel();
 	atomic_dec(&mddev->openers);
 	atomic_dec(&mddev->openers);
 	mddev_put(mddev);
 	mddev_put(mddev);
+	unlock_kernel();
 
 
 	return 0;
 	return 0;
 }
 }

+ 2 - 2
drivers/md/md.h

@@ -67,7 +67,7 @@ struct mdk_rdev_s
 #define	Faulty		1		/* device is known to have a fault */
 #define	Faulty		1		/* device is known to have a fault */
 #define	In_sync		2		/* device is in_sync with rest of array */
 #define	In_sync		2		/* device is in_sync with rest of array */
 #define	WriteMostly	4		/* Avoid reading if at all possible */
 #define	WriteMostly	4		/* Avoid reading if at all possible */
-#define	BarriersNotsupp	5		/* BIO_RW_BARRIER is not supported */
+#define	BarriersNotsupp	5		/* REQ_HARDBARRIER is not supported */
 #define	AllReserved	6		/* If whole device is reserved for
 #define	AllReserved	6		/* If whole device is reserved for
 					 * one array */
 					 * one array */
 #define	AutoDetected	7		/* added by auto-detect */
 #define	AutoDetected	7		/* added by auto-detect */
@@ -254,7 +254,7 @@ struct mddev_s
 							 * fails.  Only supported
 							 * fails.  Only supported
 							 */
 							 */
 	struct bio			*biolist; 	/* bios that need to be retried
 	struct bio			*biolist; 	/* bios that need to be retried
-							 * because BIO_RW_BARRIER is not supported
+							 * because REQ_HARDBARRIER is not supported
 							 */
 							 */
 
 
 	atomic_t			recovery_active; /* blocks scheduled, but not written */
 	atomic_t			recovery_active; /* blocks scheduled, but not written */

+ 4 - 4
drivers/md/multipath.c

@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio, int error)
 
 
 	if (uptodate)
 	if (uptodate)
 		multipath_end_bh_io(mp_bh, 0);
 		multipath_end_bh_io(mp_bh, 0);
-	else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
+	else if (!(bio->bi_rw & REQ_RAHEAD)) {
 		/*
 		/*
 		 * oops, IO error:
 		 * oops, IO error:
 		 */
 		 */
@@ -142,7 +142,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
 	struct multipath_bh * mp_bh;
 	struct multipath_bh * mp_bh;
 	struct multipath_info *multipath;
 	struct multipath_info *multipath;
 
 
-	if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+	if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
 		md_barrier_request(mddev, bio);
 		md_barrier_request(mddev, bio);
 		return 0;
 		return 0;
 	}
 	}
@@ -163,7 +163,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
 	mp_bh->bio = *bio;
 	mp_bh->bio = *bio;
 	mp_bh->bio.bi_sector += multipath->rdev->data_offset;
 	mp_bh->bio.bi_sector += multipath->rdev->data_offset;
 	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
 	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
-	mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
+	mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
 	mp_bh->bio.bi_end_io = multipath_end_request;
 	mp_bh->bio.bi_end_io = multipath_end_request;
 	mp_bh->bio.bi_private = mp_bh;
 	mp_bh->bio.bi_private = mp_bh;
 	generic_make_request(&mp_bh->bio);
 	generic_make_request(&mp_bh->bio);
@@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev)
 			*bio = *(mp_bh->master_bio);
 			*bio = *(mp_bh->master_bio);
 			bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
 			bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
 			bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
 			bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
-			bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
+			bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
 			bio->bi_end_io = multipath_end_request;
 			bio->bi_end_io = multipath_end_request;
 			bio->bi_private = mp_bh;
 			bio->bi_private = mp_bh;
 			generic_make_request(bio);
 			generic_make_request(bio);

+ 1 - 1
drivers/md/raid0.c

@@ -483,7 +483,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
 	struct strip_zone *zone;
 	struct strip_zone *zone;
 	mdk_rdev_t *tmp_dev;
 	mdk_rdev_t *tmp_dev;
 
 
-	if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+	if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
 		md_barrier_request(mddev, bio);
 		md_barrier_request(mddev, bio);
 		return 0;
 		return 0;
 	}
 	}

+ 10 - 12
drivers/md/raid1.c

@@ -787,7 +787,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
 	struct bio_list bl;
 	struct bio_list bl;
 	struct page **behind_pages = NULL;
 	struct page **behind_pages = NULL;
 	const int rw = bio_data_dir(bio);
 	const int rw = bio_data_dir(bio);
-	const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+	const bool do_sync = (bio->bi_rw & REQ_SYNC);
 	bool do_barriers;
 	bool do_barriers;
 	mdk_rdev_t *blocked_rdev;
 	mdk_rdev_t *blocked_rdev;
 
 
@@ -822,7 +822,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
 		finish_wait(&conf->wait_barrier, &w);
 		finish_wait(&conf->wait_barrier, &w);
 	}
 	}
 	if (unlikely(!mddev->barriers_work &&
 	if (unlikely(!mddev->barriers_work &&
-		     bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+		     (bio->bi_rw & REQ_HARDBARRIER))) {
 		if (rw == WRITE)
 		if (rw == WRITE)
 			md_write_end(mddev);
 			md_write_end(mddev);
 		bio_endio(bio, -EOPNOTSUPP);
 		bio_endio(bio, -EOPNOTSUPP);
@@ -877,7 +877,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
 		read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
 		read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
 		read_bio->bi_bdev = mirror->rdev->bdev;
 		read_bio->bi_bdev = mirror->rdev->bdev;
 		read_bio->bi_end_io = raid1_end_read_request;
 		read_bio->bi_end_io = raid1_end_read_request;
-		read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+		read_bio->bi_rw = READ | do_sync;
 		read_bio->bi_private = r1_bio;
 		read_bio->bi_private = r1_bio;
 
 
 		generic_make_request(read_bio);
 		generic_make_request(read_bio);
@@ -959,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
 	atomic_set(&r1_bio->remaining, 0);
 	atomic_set(&r1_bio->remaining, 0);
 	atomic_set(&r1_bio->behind_remaining, 0);
 	atomic_set(&r1_bio->behind_remaining, 0);
 
 
-	do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
+	do_barriers = bio->bi_rw & REQ_HARDBARRIER;
 	if (do_barriers)
 	if (do_barriers)
 		set_bit(R1BIO_Barrier, &r1_bio->state);
 		set_bit(R1BIO_Barrier, &r1_bio->state);
 
 
@@ -975,8 +975,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
 		mbio->bi_sector	= r1_bio->sector + conf->mirrors[i].rdev->data_offset;
 		mbio->bi_sector	= r1_bio->sector + conf->mirrors[i].rdev->data_offset;
 		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
 		mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
 		mbio->bi_end_io	= raid1_end_write_request;
 		mbio->bi_end_io	= raid1_end_write_request;
-		mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) |
-			(do_sync << BIO_RW_SYNCIO);
+		mbio->bi_rw = WRITE | do_barriers | do_sync;
 		mbio->bi_private = r1_bio;
 		mbio->bi_private = r1_bio;
 
 
 		if (behind_pages) {
 		if (behind_pages) {
@@ -1633,7 +1632,7 @@ static void raid1d(mddev_t *mddev)
 			sync_request_write(mddev, r1_bio);
 			sync_request_write(mddev, r1_bio);
 			unplug = 1;
 			unplug = 1;
 		} else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
 		} else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
-			/* some requests in the r1bio were BIO_RW_BARRIER
+			/* some requests in the r1bio were REQ_HARDBARRIER
 			 * requests which failed with -EOPNOTSUPP.  Hohumm..
 			 * requests which failed with -EOPNOTSUPP.  Hohumm..
 			 * Better resubmit without the barrier.
 			 * Better resubmit without the barrier.
 			 * We know which devices to resubmit for, because
 			 * We know which devices to resubmit for, because
@@ -1641,7 +1640,7 @@ static void raid1d(mddev_t *mddev)
 			 * We already have a nr_pending reference on these rdevs.
 			 * We already have a nr_pending reference on these rdevs.
 			 */
 			 */
 			int i;
 			int i;
-			const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
+			const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
 			clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
 			clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
 			clear_bit(R1BIO_Barrier, &r1_bio->state);
 			clear_bit(R1BIO_Barrier, &r1_bio->state);
 			for (i=0; i < conf->raid_disks; i++)
 			for (i=0; i < conf->raid_disks; i++)
@@ -1662,8 +1661,7 @@ static void raid1d(mddev_t *mddev)
 						conf->mirrors[i].rdev->data_offset;
 						conf->mirrors[i].rdev->data_offset;
 					bio->bi_bdev = conf->mirrors[i].rdev->bdev;
 					bio->bi_bdev = conf->mirrors[i].rdev->bdev;
 					bio->bi_end_io = raid1_end_write_request;
 					bio->bi_end_io = raid1_end_write_request;
-					bio->bi_rw = WRITE |
-						(do_sync << BIO_RW_SYNCIO);
+					bio->bi_rw = WRITE | do_sync;
 					bio->bi_private = r1_bio;
 					bio->bi_private = r1_bio;
 					r1_bio->bios[i] = bio;
 					r1_bio->bios[i] = bio;
 					generic_make_request(bio);
 					generic_make_request(bio);
@@ -1698,7 +1696,7 @@ static void raid1d(mddev_t *mddev)
 				       (unsigned long long)r1_bio->sector);
 				       (unsigned long long)r1_bio->sector);
 				raid_end_bio_io(r1_bio);
 				raid_end_bio_io(r1_bio);
 			} else {
 			} else {
-				const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
+				const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
 				r1_bio->bios[r1_bio->read_disk] =
 				r1_bio->bios[r1_bio->read_disk] =
 					mddev->ro ? IO_BLOCKED : NULL;
 					mddev->ro ? IO_BLOCKED : NULL;
 				r1_bio->read_disk = disk;
 				r1_bio->read_disk = disk;
@@ -1715,7 +1713,7 @@ static void raid1d(mddev_t *mddev)
 				bio->bi_sector = r1_bio->sector + rdev->data_offset;
 				bio->bi_sector = r1_bio->sector + rdev->data_offset;
 				bio->bi_bdev = rdev->bdev;
 				bio->bi_bdev = rdev->bdev;
 				bio->bi_end_io = raid1_end_read_request;
 				bio->bi_end_io = raid1_end_read_request;
-				bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+				bio->bi_rw = READ | do_sync;
 				bio->bi_private = r1_bio;
 				bio->bi_private = r1_bio;
 				unplug = 1;
 				unplug = 1;
 				generic_make_request(bio);
 				generic_make_request(bio);

Some files were not shown because too many files changed in this diff