Kaynağa Gözat

Merge branch 'akpm' (patches from Andrew)

Merge yet more updates from Andrew Morton:

 - the rest of ocfs2

 - various hotfixes, mainly MM

 - quite a bit of misc stuff - drivers, fork, exec, signals, etc.

 - printk updates

 - firmware

 - checkpatch

 - nilfs2

 - more kexec stuff than usual

 - rapidio updates

 - w1 things

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (111 commits)
  ipc: delete "nr_ipc_ns"
  kcov: allow more fine-grained coverage instrumentation
  init/Kconfig: add clarification for out-of-tree modules
  config: add android config fragments
  init/Kconfig: ban CONFIG_LOCALVERSION_AUTO with allmodconfig
  relay: add global mode support for buffer-only channels
  init: allow blacklisting of module_init functions
  w1:omap_hdq: fix regression
  w1: add helper macro module_w1_family
  w1: remove need for ida and use PLATFORM_DEVID_AUTO
  rapidio/switches: add driver for IDT gen3 switches
  powerpc/fsl_rio: apply changes for RIO spec rev 3
  rapidio: modify for rev.3 specification changes
  rapidio: change inbound window size type to u64
  rapidio/idt_gen2: fix locking warning
  rapidio: fix error handling in mbox request/release functions
  rapidio/tsi721_dma: advance queue processing from transfer submit call
  rapidio/tsi721: add messaging mbox selector parameter
  rapidio/tsi721: add PCIe MRRS override parameter
  rapidio/tsi721_dma: add channel mask and queue size parameters
  ...
Linus Torvalds 9 yıl önce
ebeveyn
işleme
d52bd54db8
100 değiştirilmiş dosya ile 3575 ekleme ve 841 silme
  1. 2 0
      .mailmap
  2. 2 1
      Documentation/filesystems/nilfs2.txt
  3. 1 1
      Documentation/ioctl/ioctl-number.txt
  4. 7 0
      Documentation/kernel-parameters.txt
  5. 1 2
      Documentation/rapidio/mport_cdev.txt
  6. 119 0
      Documentation/rapidio/rio_cm.txt
  7. 26 0
      Documentation/rapidio/tsi721.txt
  8. 14 0
      Documentation/sysctl/kernel.txt
  9. 11 2
      MAINTAINERS
  10. 0 27
      arch/alpha/include/asm/thread_info.h
  11. 1 1
      arch/alpha/kernel/machvec_impl.h
  12. 1 1
      arch/arc/mm/init.c
  13. 8 0
      arch/arm/boot/dts/keystone.dtsi
  14. 24 0
      arch/arm/include/asm/kexec.h
  15. 1 1
      arch/arm/kernel/machine_kexec.c
  16. 37 2
      arch/arm/kernel/setup.c
  17. 2 2
      arch/arm/mach-integrator/impd1.c
  18. 1 1
      arch/arm/mach-mv78xx0/common.c
  19. 1 1
      arch/blackfin/mm/init.c
  20. 1 1
      arch/hexagon/mm/init.c
  21. 0 28
      arch/ia64/include/asm/thread_info.h
  22. 1 1
      arch/ia64/kernel/machine_kexec.c
  23. 1 1
      arch/ia64/kernel/mca.c
  24. 0 27
      arch/microblaze/include/asm/thread_info.h
  25. 2 2
      arch/microblaze/mm/init.c
  26. 1 1
      arch/microblaze/mm/pgtable.c
  27. 1 1
      arch/mips/mm/init.c
  28. 1 1
      arch/mips/txx9/generic/pci.c
  29. 1 1
      arch/nios2/mm/init.c
  30. 2 2
      arch/openrisc/mm/ioremap.c
  31. 4 4
      arch/powerpc/include/asm/mman.h
  32. 0 25
      arch/powerpc/include/asm/thread_info.h
  33. 1 1
      arch/powerpc/lib/alloc.c
  34. 1 1
      arch/powerpc/mm/pgtable_32.c
  35. 2 2
      arch/powerpc/platforms/powermac/setup.c
  36. 1 1
      arch/powerpc/platforms/ps3/device-init.c
  37. 7 17
      arch/powerpc/sysdev/fsl_rio.c
  38. 1 1
      arch/powerpc/sysdev/msi_bitmap.c
  39. 1 1
      arch/score/mm/init.c
  40. 2 2
      arch/sh/drivers/pci/pci.c
  41. 0 26
      arch/sh/include/asm/thread_info.h
  42. 1 1
      arch/sh/mm/ioremap.c
  43. 0 24
      arch/sparc/include/asm/thread_info_64.h
  44. 0 27
      arch/tile/include/asm/thread_info.h
  45. 0 24
      arch/x86/include/asm/thread_info.h
  46. 2 2
      arch/x86/mm/init.c
  47. 2 2
      arch/x86/platform/efi/early_printk.c
  48. 2 3
      arch/x86/xen/enlighten.c
  49. 2 3
      drivers/acpi/osl.c
  50. 127 56
      drivers/base/firmware_class.c
  51. 1 1
      drivers/base/node.c
  52. 0 1
      drivers/block/drbd/drbd_actlog.c
  53. 1 0
      drivers/block/drbd/drbd_int.h
  54. 2 2
      drivers/clk/clkdev.c
  55. 2 15
      drivers/memstick/core/ms_block.c
  56. 1 1
      drivers/pci/xen-pcifront.c
  57. 9 0
      drivers/rapidio/Kconfig
  58. 1 0
      drivers/rapidio/Makefile
  59. 3 3
      drivers/rapidio/devices/rio_mport_cdev.c
  60. 45 12
      drivers/rapidio/devices/tsi721.c
  61. 1 1
      drivers/rapidio/devices/tsi721.h
  62. 18 9
      drivers/rapidio/devices/tsi721_dma.c
  63. 18 56
      drivers/rapidio/rio-scan.c
  64. 125 87
      drivers/rapidio/rio.c
  65. 1 1
      drivers/rapidio/rio.h
  66. 2366 0
      drivers/rapidio/rio_cm.c
  67. 6 0
      drivers/rapidio/switches/Kconfig
  68. 1 0
      drivers/rapidio/switches/Makefile
  69. 3 4
      drivers/rapidio/switches/idt_gen2.c
  70. 382 0
      drivers/rapidio/switches/idt_gen3.c
  71. 12 14
      drivers/rapidio/switches/tsi57x.c
  72. 2 0
      drivers/video/fbdev/bfin_adv7393fb.c
  73. 0 2
      drivers/video/fbdev/bfin_adv7393fb.h
  74. 2 2
      drivers/video/logo/logo.c
  75. 0 2
      drivers/w1/masters/omap_hdq.c
  76. 1 13
      drivers/w1/slaves/w1_ds2406.c
  77. 1 13
      drivers/w1/slaves/w1_ds2408.c
  78. 1 13
      drivers/w1/slaves/w1_ds2413.c
  79. 1 13
      drivers/w1/slaves/w1_ds2423.c
  80. 1 13
      drivers/w1/slaves/w1_ds2431.c
  81. 1 13
      drivers/w1/slaves/w1_ds2433.c
  82. 6 37
      drivers/w1/slaves/w1_ds2760.c
  83. 5 34
      drivers/w1/slaves/w1_ds2780.c
  84. 5 35
      drivers/w1/slaves/w1_ds2781.c
  85. 1 13
      drivers/w1/slaves/w1_ds28e04.c
  86. 12 0
      drivers/w1/w1_family.h
  87. 18 16
      fs/binfmt_elf.c
  88. 2 1
      fs/binfmt_em86.c
  89. 6 3
      fs/exec.c
  90. 1 1
      fs/inode.c
  91. 21 24
      fs/nilfs2/alloc.c
  92. 2 2
      fs/nilfs2/bmap.c
  93. 1 1
      fs/nilfs2/bmap.h
  94. 2 2
      fs/nilfs2/btnode.c
  95. 36 25
      fs/nilfs2/btree.c
  96. 1 1
      fs/nilfs2/btree.h
  97. 10 13
      fs/nilfs2/cpfile.c
  98. 2 1
      fs/nilfs2/cpfile.h
  99. 9 10
      fs/nilfs2/dat.c
  100. 1 0
      fs/nilfs2/dat.h

+ 2 - 0
.mailmap

@@ -92,6 +92,8 @@ Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
 Leonid I Ananiev <leonid.i.ananiev@intel.com>
 Linas Vepstas <linas@austin.ibm.com>
+Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
+Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
 Mark Brown <broonie@sirena.org.uk>
 Matthieu CASTET <castet.matthieu@free.fr>
 Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com>

+ 2 - 1
Documentation/filesystems/nilfs2.txt

@@ -267,7 +267,8 @@ among NILFS2 files can be depicted as follows:
                                   `-- file (ino=yy)
                                     ( regular file, directory, or symlink )
 
-For detail on the format of each file, please see include/linux/nilfs2_fs.h.
+For detail on the format of each file, please see nilfs2_ondisk.h
+located at include/uapi/linux directory.
 
 There are no patents or other intellectual property that we protect
 with regard to the design of NILFS2.  It is allowed to replicate the

+ 1 - 1
Documentation/ioctl/ioctl-number.txt

@@ -248,7 +248,7 @@ Code  Seq#(hex)	Include File		Comments
 'm'	00	drivers/scsi/megaraid/megaraid_ioctl.h	conflict!
 'm'	00-1F	net/irda/irmod.h	conflict!
 'n'	00-7F	linux/ncp_fs.h and fs/ncpfs/ioctl.c
-'n'	80-8F	linux/nilfs2_fs.h	NILFS2
+'n'	80-8F	uapi/linux/nilfs2_api.h	NILFS2
 'n'	E0-FF	linux/matroxfb.h	matroxfb
 'o'	00-1F	fs/ocfs2/ocfs2_fs.h	OCFS2
 'o'     00-03   mtd/ubi-user.h		conflict! (OCFS2 and UBI overlaps)

+ 7 - 0
Documentation/kernel-parameters.txt

@@ -3182,6 +3182,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 			Format: <bool>  (1/Y/y=enable, 0/N/n=disable)
 			default: disabled
 
+	printk.devkmsg={on,off,ratelimit}
+			Control writing to /dev/kmsg.
+			on - unlimited logging to /dev/kmsg from userspace
+			off - logging to /dev/kmsg disabled
+			ratelimit - ratelimit the logging
+			Default: ratelimit
+
 	printk.time=	Show timing data prefixed to each printk message line
 			Format: <bool>  (1/Y/y=enable, 0/N/n=disable)
 

+ 1 - 2
Documentation/rapidio/mport_cdev.txt

@@ -82,8 +82,7 @@ III. Module parameters
 
 - 'dbg_level' - This parameter allows to control amount of debug information
         generated by this device driver. This parameter is formed by set of
-        This parameter can be changed bit masks that correspond to the specific
-        functional block.
+        bit masks that correspond to the specific functional blocks.
         For mask definitions see 'drivers/rapidio/devices/rio_mport_cdev.c'
         This parameter can be changed dynamically.
         Use CONFIG_RAPIDIO_DEBUG=y to enable debug output at the top level.

+ 119 - 0
Documentation/rapidio/rio_cm.txt

@@ -0,0 +1,119 @@
+RapidIO subsystem Channelized Messaging character device driver (rio_cm.c)
+==========================================================================
+
+Version History:
+----------------
+  1.0.0 - Initial driver release.
+
+==========================================================================
+
+I. Overview
+
+This device driver is the result of collaboration within the RapidIO.org
+Software Task Group (STG) between Texas Instruments, Prodrive Technologies,
+Nokia Networks, BAE and IDT.  Additional input was received from other members
+of RapidIO.org.
+
+The objective was to create a character mode driver interface which exposes
+messaging capabilities of RapidIO endpoint devices (mports) directly
+to applications, in a manner that allows the numerous and varied RapidIO
+implementations to interoperate.
+
+This driver (RIO_CM) provides to user-space applications shared access to
+RapidIO mailbox messaging resources.
+
+RapidIO specification (Part 2) defines that endpoint devices may have up to four
+messaging mailboxes in case of multi-packet message (up to 4KB) and
+up to 64 mailboxes if single-packet messages (up to 256 B) are used. In addition
+to protocol definition limitations, a particular hardware implementation can
+have reduced number of messaging mailboxes.  RapidIO aware applications must
+therefore share the messaging resources of a RapidIO endpoint.
+
+Main purpose of this device driver is to provide RapidIO mailbox messaging
+capability to large number of user-space processes by introducing socket-like
+operations using a single messaging mailbox.  This allows applications to
+use the limited RapidIO messaging hardware resources efficiently.
+
+Most of device driver's operations are supported through 'ioctl' system calls.
+
+When loaded this device driver creates a single file system node named rio_cm
+in /dev directory common for all registered RapidIO mport devices.
+
+Following ioctl commands are available to user-space applications:
+
+- RIO_CM_MPORT_GET_LIST : Returns to caller list of local mport devices that
+    support messaging operations (number of entries up to RIO_MAX_MPORTS).
+    Each list entry is combination of mport's index in the system and RapidIO
+    destination ID assigned to the port.
+- RIO_CM_EP_GET_LIST_SIZE : Returns number of messaging capable remote endpoints
+    in a RapidIO network associated with the specified mport device.
+- RIO_CM_EP_GET_LIST : Returns list of RapidIO destination IDs for messaging
+    capable remote endpoints (peers) available in a RapidIO network associated
+    with the specified mport device.
+- RIO_CM_CHAN_CREATE : Creates RapidIO message exchange channel data structure
+    with channel ID assigned automatically or as requested by a caller.
+- RIO_CM_CHAN_BIND : Binds the specified channel data structure to the specified
+    mport device.
+- RIO_CM_CHAN_LISTEN : Enables listening for connection requests on the specified
+    channel.
+- RIO_CM_CHAN_ACCEPT : Accepts a connection request from peer on the specified
+    channel. If wait timeout for this request is specified by a caller it is
+    a blocking call. If timeout set to 0 this is non-blocking call - ioctl
+    handler checks for a pending connection request and if one is not available
+    exits with -EGAIN error status immediately.
+- RIO_CM_CHAN_CONNECT : Sends a connection request to a remote peer/channel.
+- RIO_CM_CHAN_SEND : Sends a data message through the specified channel.
+    The handler for this request assumes that message buffer specified by
+    a caller includes the reserved space for a packet header required by
+    this driver.
+- RIO_CM_CHAN_RECEIVE : Receives a data message through a connected channel.
+    If the channel does not have an incoming message ready to return this ioctl
+    handler will wait for new message until timeout specified by a caller
+    expires. If timeout value is set to 0, ioctl handler uses a default value
+    defined by MAX_SCHEDULE_TIMEOUT.
+- RIO_CM_CHAN_CLOSE : Closes a specified channel and frees associated buffers.
+    If the specified channel is in the CONNECTED state, sends close notification
+    to the remote peer.
+
+The ioctl command codes and corresponding data structures intended for use by
+user-space applications are defined in 'include/uapi/linux/rio_cm_cdev.h'.
+
+II. Hardware Compatibility
+
+This device driver uses standard interfaces defined by kernel RapidIO subsystem
+and therefore it can be used with any mport device driver registered by RapidIO
+subsystem with limitations set by available mport HW implementation of messaging
+mailboxes.
+
+III. Module parameters
+
+- 'dbg_level' - This parameter allows to control amount of debug information
+        generated by this device driver. This parameter is formed by set of
+        bit masks that correspond to the specific functional block.
+        For mask definitions see 'drivers/rapidio/devices/rio_cm.c'
+        This parameter can be changed dynamically.
+        Use CONFIG_RAPIDIO_DEBUG=y to enable debug output at the top level.
+
+- 'cmbox' - Number of RapidIO mailbox to use (default value is 1).
+        This parameter allows to set messaging mailbox number that will be used
+        within entire RapidIO network. It can be used when default mailbox is
+        used by other device drivers or is not supported by some nodes in the
+        RapidIO network.
+
+- 'chstart' - Start channel number for dynamic assignment. Default value - 256.
+        Allows to exclude channel numbers below this parameter from dynamic
+        allocation to avoid conflicts with software components that use
+        reserved predefined channel numbers.
+
+IV. Known problems
+
+  None.
+
+V. User-space Applications and API Library
+
+Messaging API library and applications that use this device driver are available
+from RapidIO.org.
+
+VI. TODO List
+
+- Add support for system notification messages (reserved channel 0).

+ 26 - 0
Documentation/rapidio/tsi721.txt

@@ -25,6 +25,32 @@ fully compatible with RIONET driver (Ethernet over RapidIO messaging services).
         This parameter can be changed dynamically.
         Use CONFIG_RAPIDIO_DEBUG=y to enable debug output at the top level.
 
+- 'dma_desc_per_channel' - This parameter defines number of hardware buffer
+        descriptors allocated for each registered Tsi721 DMA channel.
+        Its default value is 128.
+
+- 'dma_txqueue_sz' - DMA transactions queue size. Defines number of pending
+        transaction requests that can be accepted by each DMA channel.
+        Default value is 16.
+
+- 'dma_sel' - DMA channel selection mask. Bitmask that defines which hardware
+        DMA channels (0 ... 6) will be registered with DmaEngine core.
+        If bit is set to 1, the corresponding DMA channel will be registered.
+        DMA channels not selected by this mask will not be used by this device
+        driver. Default value is 0x7f (use all channels).
+
+- 'pcie_mrrs' - override value for PCIe Maximum Read Request Size (MRRS).
+        This parameter gives an ability to override MRRS value set during PCIe
+        configuration process. Tsi721 supports read request sizes up to 4096B.
+        Value for this parameter must be set as defined by PCIe specification:
+        0 = 128B, 1 = 256B, 2 = 512B, 3 = 1024B, 4 = 2048B and 5 = 4096B.
+        Default value is '-1' (= keep platform setting).
+
+- 'mbox_sel' - RIO messaging MBOX selection mask. This is a bitmask that defines
+        messaging MBOXes are managed by this device driver. Mask bits 0 - 3
+        correspond to MBOX0 - MBOX3. MBOX is under driver's control if the
+        corresponding bit is set to '1'. Default value is 0x0f (= all).
+
 II. Known problems
 
   None.

+ 14 - 0
Documentation/sysctl/kernel.txt

@@ -764,6 +764,20 @@ send before ratelimiting kicks in.
 
 ==============================================================
 
+printk_devkmsg:
+
+Control the logging to /dev/kmsg from userspace:
+
+ratelimit: default, ratelimited
+on: unlimited logging to /dev/kmsg from userspace
+off: logging to /dev/kmsg disabled
+
+The kernel command line parameter printk.devkmsg= overrides this and is
+a one-time setting until next reboot: once set, it cannot be changed by
+this sysctl interface anymore.
+
+==============================================================
+
 randomize_va_space:
 
 This option can be used to select the type of process address

+ 11 - 2
MAINTAINERS

@@ -778,6 +778,11 @@ W:	http://ez.analog.com/community/linux-device-drivers
 S:	Supported
 F:	drivers/dma/dma-axi-dmac.c
 
+ANDROID CONFIG FRAGMENTS
+M:	Rob Herring <robh@kernel.org>
+S:	Supported
+F:	kernel/configs/android*
+
 ANDROID DRIVERS
 M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 M:	Arve Hjønnevåg <arve@android.com>
@@ -2346,7 +2351,10 @@ S:	Supported
 F:	drivers/media/platform/sti/bdisp
 
 BEFS FILE SYSTEM
-S:	Orphan
+M:	Luis de Bethencourt <luisbg@osg.samsung.com>
+M:	Salah Triki <salah.triki@gmail.com>
+S:	Maintained
+T:	git git://github.com/luisbg/linux-befs.git
 F:	Documentation/filesystems/befs.txt
 F:	fs/befs/
 
@@ -8264,8 +8272,9 @@ T:	git git://github.com/konis/nilfs2.git
 S:	Supported
 F:	Documentation/filesystems/nilfs2.txt
 F:	fs/nilfs2/
-F:	include/linux/nilfs2_fs.h
 F:	include/trace/events/nilfs2.h
+F:	include/uapi/linux/nilfs2_api.h
+F:	include/uapi/linux/nilfs2_ondisk.h
 
 NINJA SCSI-3 / NINJA SCSI-32Bi (16bit/CardBus) PCMCIA SCSI HOST ADAPTER DRIVER
 M:	YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>

+ 0 - 27
arch/alpha/include/asm/thread_info.h

@@ -86,33 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
 #define TS_UAC_NOPRINT		0x0001	/* ! Preserve the following three */
 #define TS_UAC_NOFIX		0x0002	/* ! flags as they match          */
 #define TS_UAC_SIGBUS		0x0004	/* ! userspace part of 'osf_sysinfo' */
-#define TS_RESTORE_SIGMASK	0x0008	/* restore signal mask in do_signal() */
-
-#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK	1
-static inline void set_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	ti->status |= TS_RESTORE_SIGMASK;
-	WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
-	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
-	return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	if (!(ti->status & TS_RESTORE_SIGMASK))
-		return false;
-	ti->status &= ~TS_RESTORE_SIGMASK;
-	return true;
-}
-#endif
 
 #define SET_UNALIGN_CTL(task,value)	({				\
 	__u32 status = task_thread_info(task)->status & ~UAC_BITMASK;	\

+ 1 - 1
arch/alpha/kernel/machvec_impl.h

@@ -137,7 +137,7 @@
 #define __initmv __initdata
 #define ALIAS_MV(x)
 #else
-#define __initmv __initdata_refok
+#define __initmv __refdata
 
 /* GCC actually has a syntax for defining aliases, but is under some
    delusion that you shouldn't be able to declare it extern somewhere

+ 1 - 1
arch/arc/mm/init.c

@@ -220,7 +220,7 @@ void __init mem_init(void)
 /*
  * free_initmem: Free all the __init memory.
  */
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
 {
 	free_initmem_default(-1);
 }

+ 8 - 0
arch/arm/boot/dts/keystone.dtsi

@@ -70,6 +70,14 @@
 		cpu_on		= <0x84000003>;
 	};
 
+	psci {
+		compatible	= "arm,psci";
+		method		= "smc";
+		cpu_suspend	= <0x84000001>;
+		cpu_off		= <0x84000002>;
+		cpu_on		= <0x84000003>;
+	};
+
 	soc {
 		#address-cells = <1>;
 		#size-cells = <1>;

+ 24 - 0
arch/arm/include/asm/kexec.h

@@ -53,6 +53,30 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
 /* Function pointer to optional machine-specific reinitialization */
 extern void (*kexec_reinit)(void);
 
+static inline unsigned long phys_to_boot_phys(phys_addr_t phys)
+{
+	return phys_to_idmap(phys);
+}
+#define phys_to_boot_phys phys_to_boot_phys
+
+static inline phys_addr_t boot_phys_to_phys(unsigned long entry)
+{
+	return idmap_to_phys(entry);
+}
+#define boot_phys_to_phys boot_phys_to_phys
+
+static inline unsigned long page_to_boot_pfn(struct page *page)
+{
+	return page_to_pfn(page) + (arch_phys_to_idmap_offset >> PAGE_SHIFT);
+}
+#define page_to_boot_pfn page_to_boot_pfn
+
+static inline struct page *boot_pfn_to_page(unsigned long boot_pfn)
+{
+	return pfn_to_page(boot_pfn - (arch_phys_to_idmap_offset >> PAGE_SHIFT));
+}
+#define boot_pfn_to_page boot_pfn_to_page
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* CONFIG_KEXEC */

+ 1 - 1
arch/arm/kernel/machine_kexec.c

@@ -57,7 +57,7 @@ int machine_kexec_prepare(struct kimage *image)
 	for (i = 0; i < image->nr_segments; i++) {
 		current_segment = &image->segment[i];
 
-		if (!memblock_is_region_memory(current_segment->mem,
+		if (!memblock_is_region_memory(idmap_to_phys(current_segment->mem),
 					       current_segment->memsz))
 			return -EINVAL;
 

+ 37 - 2
arch/arm/kernel/setup.c

@@ -848,10 +848,29 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
 	kernel_data.end     = virt_to_phys(_end - 1);
 
 	for_each_memblock(memory, region) {
+		phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+		phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+		unsigned long boot_alias_start;
+
+		/*
+		 * Some systems have a special memory alias which is only
+		 * used for booting.  We need to advertise this region to
+		 * kexec-tools so they know where bootable RAM is located.
+		 */
+		boot_alias_start = phys_to_idmap(start);
+		if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
+			res = memblock_virt_alloc(sizeof(*res), 0);
+			res->name = "System RAM (boot alias)";
+			res->start = boot_alias_start;
+			res->end = phys_to_idmap(end);
+			res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+			request_resource(&iomem_resource, res);
+		}
+
 		res = memblock_virt_alloc(sizeof(*res), 0);
 		res->name  = "System RAM";
-		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
-		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+		res->start = start;
+		res->end = end;
 		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
 		request_resource(&iomem_resource, res);
@@ -1000,9 +1019,25 @@ static void __init reserve_crashkernel(void)
 		(unsigned long)(crash_base >> 20),
 		(unsigned long)(total_mem >> 20));
 
+	/* The crashk resource must always be located in normal mem */
 	crashk_res.start = crash_base;
 	crashk_res.end = crash_base + crash_size - 1;
 	insert_resource(&iomem_resource, &crashk_res);
+
+	if (arm_has_idmap_alias()) {
+		/*
+		 * If we have a special RAM alias for use at boot, we
+		 * need to advertise to kexec tools where the alias is.
+		 */
+		static struct resource crashk_boot_res = {
+			.name = "Crash kernel (boot alias)",
+			.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+		};
+
+		crashk_boot_res.start = phys_to_idmap(crash_base);
+		crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
+		insert_resource(&iomem_resource, &crashk_boot_res);
+	}
 }
 #else
 static inline void reserve_crashkernel(void) {}

+ 2 - 2
arch/arm/mach-integrator/impd1.c

@@ -320,11 +320,11 @@ static struct impd1_device impd1_devs[] = {
 #define IMPD1_VALID_IRQS 0x00000bffU
 
 /*
- * As this module is bool, it is OK to have this as __init_refok() - no
+ * As this module is bool, it is OK to have this as __ref() - no
  * probe calls will be done after the initial system bootup, as devices
  * are discovered as part of the machine startup.
  */
-static int __init_refok impd1_probe(struct lm_device *dev)
+static int __ref impd1_probe(struct lm_device *dev)
 {
 	struct impd1_module *impd1;
 	int irq_base;

+ 1 - 1
arch/arm/mach-mv78xx0/common.c

@@ -343,7 +343,7 @@ void __init mv78xx0_init_early(void)
 				DDR_WINDOW_CPU1_BASE, DDR_WINDOW_CPU_SZ);
 }
 
-void __init_refok mv78xx0_timer_init(void)
+void __ref mv78xx0_timer_init(void)
 {
 	orion_time_init(BRIDGE_VIRT_BASE, BRIDGE_INT_TIMER1_CLR,
 			IRQ_MV78XX0_TIMER_1, get_tclk());

+ 1 - 1
arch/blackfin/mm/init.c

@@ -112,7 +112,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
 }
 #endif
 
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
 {
 #if defined CONFIG_RAMKERNEL && !defined CONFIG_MPU
 	free_initmem_default(-1);

+ 1 - 1
arch/hexagon/mm/init.c

@@ -93,7 +93,7 @@ void __init mem_init(void)
  * Todo:  free pages between __init_begin and __init_end; possibly
  * some devtree related stuff as well.
  */
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
 {
 }
 

+ 0 - 28
arch/ia64/include/asm/thread_info.h

@@ -121,32 +121,4 @@ struct thread_info {
 /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
 #define TIF_WORK_MASK		(TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
 
-#define TS_RESTORE_SIGMASK	2	/* restore signal mask in do_signal() */
-
-#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK	1
-static inline void set_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	ti->status |= TS_RESTORE_SIGMASK;
-	WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
-	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
-	return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	if (!(ti->status & TS_RESTORE_SIGMASK))
-		return false;
-	ti->status &= ~TS_RESTORE_SIGMASK;
-	return true;
-}
-#endif	/* !__ASSEMBLY__ */
-
 #endif /* _ASM_IA64_THREAD_INFO_H */

+ 1 - 1
arch/ia64/kernel/machine_kexec.c

@@ -163,7 +163,7 @@ void arch_crash_save_vmcoreinfo(void)
 #endif
 }
 
-unsigned long paddr_vmcoreinfo_note(void)
+phys_addr_t paddr_vmcoreinfo_note(void)
 {
 	return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note);
 }

+ 1 - 1
arch/ia64/kernel/mca.c

@@ -1831,7 +1831,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
 }
 
 /* Caller prevents this from being called after init */
-static void * __init_refok mca_bootmem(void)
+static void * __ref mca_bootmem(void)
 {
 	return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
 	                    KERNEL_STACK_SIZE, 0);

+ 0 - 27
arch/microblaze/include/asm/thread_info.h

@@ -148,33 +148,6 @@ static inline struct thread_info *current_thread_info(void)
  */
 /* FPU was used by this task this quantum (SMP) */
 #define TS_USEDFPU		0x0001
-#define TS_RESTORE_SIGMASK	0x0002
-
-#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	ti->status |= TS_RESTORE_SIGMASK;
-	WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
-	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
-	return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	if (!(ti->status & TS_RESTORE_SIGMASK))
-		return false;
-	ti->status &= ~TS_RESTORE_SIGMASK;
-	return true;
-}
-#endif
 
 #endif /* __KERNEL__ */
 #endif /* _ASM_MICROBLAZE_THREAD_INFO_H */

+ 2 - 2
arch/microblaze/mm/init.c

@@ -414,7 +414,7 @@ void __init *early_get_page(void)
 
 #endif /* CONFIG_MMU */
 
-void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
+void * __ref alloc_maybe_bootmem(size_t size, gfp_t mask)
 {
 	if (mem_init_done)
 		return kmalloc(size, mask);
@@ -422,7 +422,7 @@ void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask)
 		return alloc_bootmem(size);
 }
 
-void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
+void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
 {
 	void *p;
 

+ 1 - 1
arch/microblaze/mm/pgtable.c

@@ -234,7 +234,7 @@ unsigned long iopa(unsigned long addr)
 	return pa;
 }
 
-__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 		unsigned long address)
 {
 	pte_t *pte;

+ 1 - 1
arch/mips/mm/init.c

@@ -504,7 +504,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 
 void (*free_init_pages_eva)(void *begin, void *end) = NULL;
 
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
 {
 	prom_free_prom_memory();
 	/*

+ 1 - 1
arch/mips/txx9/generic/pci.c

@@ -268,7 +268,7 @@ static int txx9_i8259_irq_setup(int irq)
 	return err;
 }
 
-static void __init_refok quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __ref quirk_slc90e66_bridge(struct pci_dev *dev)
 {
 	int irq;	/* PCI/ISA Bridge interrupt */
 	u8 reg_64;

+ 1 - 1
arch/nios2/mm/init.c

@@ -89,7 +89,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
 }
 #endif
 
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
 {
 	free_initmem_default(-1);
 }

+ 2 - 2
arch/openrisc/mm/ioremap.c

@@ -38,7 +38,7 @@ static unsigned int fixmaps_used __initdata;
  * have to convert them into an offset in a page-aligned mapping, but the
  * caller shouldn't need to know that small detail.
  */
-void __iomem *__init_refok
+void __iomem *__ref
 __ioremap(phys_addr_t addr, unsigned long size, pgprot_t prot)
 {
 	phys_addr_t p;
@@ -116,7 +116,7 @@ void iounmap(void *addr)
  * the memblock infrastructure.
  */
 
-pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
+pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm,
 					 unsigned long address)
 {
 	pte_t *pte;

+ 4 - 4
arch/powerpc/include/asm/mman.h

@@ -31,13 +31,13 @@ static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
 }
 #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
 
-static inline int arch_validate_prot(unsigned long prot)
+static inline bool arch_validate_prot(unsigned long prot)
 {
 	if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
-		return 0;
+		return false;
 	if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO))
-		return 0;
-	return 1;
+		return false;
+	return true;
 }
 #define arch_validate_prot(prot) arch_validate_prot(prot)
 

+ 0 - 25
arch/powerpc/include/asm/thread_info.h

@@ -138,40 +138,15 @@ static inline struct thread_info *current_thread_info(void)
 /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
 #define TLF_NAPPING		0	/* idle thread enabled NAP mode */
 #define TLF_SLEEPING		1	/* suspend code enabled SLEEP mode */
-#define TLF_RESTORE_SIGMASK	2	/* Restore signal mask in do_signal */
 #define TLF_LAZY_MMU		3	/* tlb_batch is active */
 #define TLF_RUNLATCH		4	/* Is the runlatch enabled? */
 
 #define _TLF_NAPPING		(1 << TLF_NAPPING)
 #define _TLF_SLEEPING		(1 << TLF_SLEEPING)
-#define _TLF_RESTORE_SIGMASK	(1 << TLF_RESTORE_SIGMASK)
 #define _TLF_LAZY_MMU		(1 << TLF_LAZY_MMU)
 #define _TLF_RUNLATCH		(1 << TLF_RUNLATCH)
 
 #ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK	1
-static inline void set_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	ti->local_flags |= _TLF_RESTORE_SIGMASK;
-	WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
-	current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
-	return current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	if (!(ti->local_flags & _TLF_RESTORE_SIGMASK))
-		return false;
-	ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
-	return true;
-}
 
 static inline bool test_thread_local_flags(unsigned int flags)
 {

+ 1 - 1
arch/powerpc/lib/alloc.c

@@ -6,7 +6,7 @@
 #include <asm/setup.h>
 
 
-void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
+void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
 {
 	void *p;
 

+ 1 - 1
arch/powerpc/mm/pgtable_32.c

@@ -79,7 +79,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 #endif
 }
 
-__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
 	pte_t *pte;
 

+ 2 - 2
arch/powerpc/platforms/powermac/setup.c

@@ -353,12 +353,12 @@ static int pmac_late_init(void)
 machine_late_initcall(powermac, pmac_late_init);
 
 /*
- * This is __init_refok because we check for "initializing" before
+ * This is __ref because we check for "initializing" before
  * touching any of the __init sensitive things and "initializing"
  * will be false after __init time. This can't be __init because it
  * can be called whenever a disk is first accessed.
  */
-void __init_refok note_bootable_part(dev_t dev, int part, int goodness)
+void __ref note_bootable_part(dev_t dev, int part, int goodness)
 {
 	char *p;
 

+ 1 - 1
arch/powerpc/platforms/ps3/device-init.c

@@ -189,7 +189,7 @@ fail_malloc:
 	return result;
 }
 
-static int __init_refok ps3_setup_uhc_device(
+static int __ref ps3_setup_uhc_device(
 	const struct ps3_repository_device *repo, enum ps3_match_id match_id,
 	enum ps3_interrupt_type interrupt_type, enum ps3_reg_type reg_type)
 {

+ 7 - 17
arch/powerpc/sysdev/fsl_rio.c

@@ -289,7 +289,7 @@ static void fsl_rio_inbound_mem_init(struct rio_priv *priv)
 }
 
 int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
-	u64 rstart, u32 size, u32 flags)
+	u64 rstart, u64 size, u32 flags)
 {
 	struct rio_priv *priv = mport->priv;
 	u32 base_size;
@@ -298,7 +298,7 @@ int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
 	u32 riwar;
 	int i;
 
-	if ((size & (size - 1)) != 0)
+	if ((size & (size - 1)) != 0 || size > 0x400000000ULL)
 		return -EINVAL;
 
 	base_size_log = ilog2(size);
@@ -643,19 +643,11 @@ int fsl_rio_setup(struct platform_device *dev)
 		port->ops = ops;
 		port->priv = priv;
 		port->phys_efptr = 0x100;
+		port->phys_rmap = 1;
 		priv->regs_win = rio_regs_win;
 
-		/* Probe the master port phy type */
 		ccsr = in_be32(priv->regs_win + RIO_CCSR + i*0x20);
-		port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL;
-		if (port->phy_type == RIO_PHY_PARALLEL) {
-			dev_err(&dev->dev, "RIO: Parallel PHY type, unsupported port type!\n");
-			release_resource(&port->iores);
-			kfree(priv);
-			kfree(port);
-			continue;
-		}
-		dev_info(&dev->dev, "RapidIO PHY type: Serial\n");
+
 		/* Checking the port training status */
 		if (in_be32((priv->regs_win + RIO_ESCSR + i*0x20)) & 1) {
 			dev_err(&dev->dev, "Port %d is not ready. "
@@ -705,11 +697,9 @@ int fsl_rio_setup(struct platform_device *dev)
 			((i == 0) ? RIO_INB_ATMU_REGS_PORT1_OFFSET :
 			RIO_INB_ATMU_REGS_PORT2_OFFSET));
 
-
-		/* Set to receive any dist ID for serial RapidIO controller. */
-		if (port->phy_type == RIO_PHY_SERIAL)
-			out_be32((priv->regs_win
-				+ RIO_ISR_AACR + i*0x80), RIO_ISR_AACR_AA);
+		/* Set to receive packets with any dest ID */
+		out_be32((priv->regs_win + RIO_ISR_AACR + i*0x80),
+			 RIO_ISR_AACR_AA);
 
 		/* Configure maintenance transaction window */
 		out_be32(&priv->maint_atmu_regs->rowbar,

+ 1 - 1
arch/powerpc/sysdev/msi_bitmap.c

@@ -112,7 +112,7 @@ int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp)
 	return 0;
 }
 
-int __init_refok msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
+int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
 		     struct device_node *of_node)
 {
 	int size;

+ 1 - 1
arch/score/mm/init.c

@@ -91,7 +91,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 }
 #endif
 
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
 {
 	free_initmem_default(POISON_FREE_INITMEM);
 }

+ 2 - 2
arch/sh/drivers/pci/pci.c

@@ -221,7 +221,7 @@ pcibios_bus_report_status_early(struct pci_channel *hose,
  * We can't use pci_find_device() here since we are
  * called from interrupt context.
  */
-static void __init_refok
+static void __ref
 pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask,
 			  int warn)
 {
@@ -256,7 +256,7 @@ pcibios_bus_report_status(struct pci_bus *bus, unsigned int status_mask,
 			pcibios_bus_report_status(dev->subordinate, status_mask, warn);
 }
 
-void __init_refok pcibios_report_status(unsigned int status_mask, int warn)
+void __ref pcibios_report_status(unsigned int status_mask, int warn)
 {
 	struct pci_channel *hose;
 

+ 0 - 26
arch/sh/include/asm/thread_info.h

@@ -151,19 +151,10 @@ extern void init_thread_xstate(void);
  * ever touches our thread-synchronous status, so we don't
  * have to worry about atomic accesses.
  */
-#define TS_RESTORE_SIGMASK	0x0001	/* restore signal mask in do_signal() */
 #define TS_USEDFPU		0x0002	/* FPU used by this task this quantum */
 
 #ifndef __ASSEMBLY__
 
-#define HAVE_SET_RESTORE_SIGMASK	1
-static inline void set_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	ti->status |= TS_RESTORE_SIGMASK;
-	WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
-}
-
 #define TI_FLAG_FAULT_CODE_SHIFT	24
 
 /*
@@ -182,23 +173,6 @@ static inline unsigned int get_thread_fault_code(void)
 	return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
 }
 
-static inline void clear_restore_sigmask(void)
-{
-	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
-	return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	if (!(ti->status & TS_RESTORE_SIGMASK))
-		return false;
-	ti->status &= ~TS_RESTORE_SIGMASK;
-	return true;
-}
-
 #endif	/* !__ASSEMBLY__ */
 
 #endif /* __KERNEL__ */

+ 1 - 1
arch/sh/mm/ioremap.c

@@ -34,7 +34,7 @@
  * have to convert them into an offset in a page-aligned mapping, but the
  * caller shouldn't need to know that small detail.
  */
-void __iomem * __init_refok
+void __iomem * __ref
 __ioremap_caller(phys_addr_t phys_addr, unsigned long size,
 		 pgprot_t pgprot, void *caller)
 {

+ 0 - 24
arch/sparc/include/asm/thread_info_64.h

@@ -222,32 +222,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
  *
  * Note that there are only 8 bits available.
  */
-#define TS_RESTORE_SIGMASK	0x0001	/* restore signal mask in do_signal() */
 
 #ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK	1
-static inline void set_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	ti->status |= TS_RESTORE_SIGMASK;
-	WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
-	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
-	return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	if (!(ti->status & TS_RESTORE_SIGMASK))
-		return false;
-	ti->status &= ~TS_RESTORE_SIGMASK;
-	return true;
-}
 
 #define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
 #define test_thread_64bit_stack(__SP) \

+ 0 - 27
arch/tile/include/asm/thread_info.h

@@ -166,32 +166,5 @@ extern void _cpu_idle(void);
 #ifdef __tilegx__
 #define TS_COMPAT		0x0001	/* 32-bit compatibility mode */
 #endif
-#define TS_RESTORE_SIGMASK	0x0008	/* restore signal mask in do_signal */
-
-#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK	1
-static inline void set_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	ti->status |= TS_RESTORE_SIGMASK;
-	WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
-	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
-	return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	if (!(ti->status & TS_RESTORE_SIGMASK))
-		return false;
-	ti->status &= ~TS_RESTORE_SIGMASK;
-	return true;
-}
-#endif	/* !__ASSEMBLY__ */
 
 #endif /* _ASM_TILE_THREAD_INFO_H */

+ 0 - 24
arch/x86/include/asm/thread_info.h

@@ -219,32 +219,8 @@ static inline unsigned long current_stack_pointer(void)
  * have to worry about atomic accesses.
  */
 #define TS_COMPAT		0x0002	/* 32bit syscall active (64BIT)*/
-#define TS_RESTORE_SIGMASK	0x0008	/* restore signal mask in do_signal() */
 
 #ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK	1
-static inline void set_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	ti->status |= TS_RESTORE_SIGMASK;
-	WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
-	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
-	return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
-	struct thread_info *ti = current_thread_info();
-	if (!(ti->status & TS_RESTORE_SIGMASK))
-		return false;
-	ti->status &= ~TS_RESTORE_SIGMASK;
-	return true;
-}
 
 static inline bool in_ia32_syscall(void)
 {

+ 2 - 2
arch/x86/mm/init.c

@@ -208,7 +208,7 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
  * adjust the page_size_mask for small range to go with
  *	big page size instead small one if nearby are ram too.
  */
-static void __init_refok adjust_range_page_size_mask(struct map_range *mr,
+static void __ref adjust_range_page_size_mask(struct map_range *mr,
 							 int nr_range)
 {
 	int i;
@@ -396,7 +396,7 @@ bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
  * This runs before bootmem is initialized and gets pages directly from
  * the physical memory. To access them they are temporarily mapped.
  */
-unsigned long __init_refok init_memory_mapping(unsigned long start,
+unsigned long __ref init_memory_mapping(unsigned long start,
 					       unsigned long end)
 {
 	struct map_range mr[NR_RANGE_MR];

+ 2 - 2
arch/x86/platform/efi/early_printk.c

@@ -44,7 +44,7 @@ early_initcall(early_efi_map_fb);
  * In case earlyprintk=efi,keep we have the whole framebuffer mapped already
  * so just return the offset efi_fb + start.
  */
-static __init_refok void *early_efi_map(unsigned long start, unsigned long len)
+static __ref void *early_efi_map(unsigned long start, unsigned long len)
 {
 	unsigned long base;
 
@@ -56,7 +56,7 @@ static __init_refok void *early_efi_map(unsigned long start, unsigned long len)
 		return early_ioremap(base + start, len);
 }
 
-static __init_refok void early_efi_unmap(void *addr, unsigned long len)
+static __ref void early_efi_unmap(void *addr, unsigned long len)
 {
 	if (!efi_fb)
 		early_iounmap(addr, len);

+ 2 - 3
arch/x86/xen/enlighten.c

@@ -34,9 +34,7 @@
 #include <linux/edd.h>
 #include <linux/frame.h>
 
-#ifdef CONFIG_KEXEC_CORE
 #include <linux/kexec.h>
-#endif
 
 #include <xen/xen.h>
 #include <xen/events.h>
@@ -1334,7 +1332,8 @@ static void xen_crash_shutdown(struct pt_regs *regs)
 static int
 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
-	xen_reboot(SHUTDOWN_crash);
+	if (!kexec_crash_loaded())
+		xen_reboot(SHUTDOWN_crash);
 	return NOTIFY_DONE;
 }
 

+ 2 - 3
drivers/acpi/osl.c

@@ -309,7 +309,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
  * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
  * routine simply calls __acpi_map_table() to get the job done.
  */
-void __iomem *__init_refok
+void __iomem *__ref
 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
 {
 	struct acpi_ioremap *map;
@@ -362,8 +362,7 @@ out:
 }
 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
 
-void *__init_refok
-acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
+void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 {
 	return (void *)acpi_os_map_iomem(phys, size);
 }

+ 127 - 56
drivers/base/firmware_class.c

@@ -46,7 +46,8 @@ MODULE_LICENSE("GPL");
 extern struct builtin_fw __start_builtin_fw[];
 extern struct builtin_fw __end_builtin_fw[];
 
-static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+static bool fw_get_builtin_firmware(struct firmware *fw, const char *name,
+				    void *buf, size_t size)
 {
 	struct builtin_fw *b_fw;
 
@@ -54,6 +55,9 @@ static bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
 		if (strcmp(name, b_fw->name) == 0) {
 			fw->size = b_fw->size;
 			fw->data = b_fw->data;
+
+			if (buf && fw->size <= size)
+				memcpy(buf, fw->data, fw->size);
 			return true;
 		}
 	}
@@ -74,7 +78,9 @@ static bool fw_is_builtin_firmware(const struct firmware *fw)
 
 #else /* Module case - no builtin firmware support */
 
-static inline bool fw_get_builtin_firmware(struct firmware *fw, const char *name)
+static inline bool fw_get_builtin_firmware(struct firmware *fw,
+					   const char *name, void *buf,
+					   size_t size)
 {
 	return false;
 }
@@ -112,6 +118,7 @@ static inline long firmware_loading_timeout(void)
 #define FW_OPT_FALLBACK		0
 #endif
 #define FW_OPT_NO_WARN	(1U << 3)
+#define FW_OPT_NOCACHE	(1U << 4)
 
 struct firmware_cache {
 	/* firmware_buf instance will be added into the below list */
@@ -143,6 +150,7 @@ struct firmware_buf {
 	unsigned long status;
 	void *data;
 	size_t size;
+	size_t allocated_size;
 #ifdef CONFIG_FW_LOADER_USER_HELPER
 	bool is_paged_buf;
 	bool need_uevent;
@@ -178,7 +186,8 @@ static DEFINE_MUTEX(fw_lock);
 static struct firmware_cache fw_cache;
 
 static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
-					      struct firmware_cache *fwc)
+					      struct firmware_cache *fwc,
+					      void *dbuf, size_t size)
 {
 	struct firmware_buf *buf;
 
@@ -194,6 +203,8 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
 
 	kref_init(&buf->ref);
 	buf->fwc = fwc;
+	buf->data = dbuf;
+	buf->allocated_size = size;
 	init_completion(&buf->completion);
 #ifdef CONFIG_FW_LOADER_USER_HELPER
 	INIT_LIST_HEAD(&buf->pending_list);
@@ -217,7 +228,8 @@ static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
 
 static int fw_lookup_and_allocate_buf(const char *fw_name,
 				      struct firmware_cache *fwc,
-				      struct firmware_buf **buf)
+				      struct firmware_buf **buf, void *dbuf,
+				      size_t size)
 {
 	struct firmware_buf *tmp;
 
@@ -229,7 +241,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name,
 		*buf = tmp;
 		return 1;
 	}
-	tmp = __allocate_fw_buf(fw_name, fwc);
+	tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
 	if (tmp)
 		list_add(&tmp->list, &fwc->head);
 	spin_unlock(&fwc->lock);
@@ -261,6 +273,7 @@ static void __fw_free_buf(struct kref *ref)
 		vfree(buf->pages);
 	} else
 #endif
+	if (!buf->allocated_size)
 		vfree(buf->data);
 	kfree_const(buf->fw_id);
 	kfree(buf);
@@ -301,13 +314,21 @@ static void fw_finish_direct_load(struct device *device,
 	mutex_unlock(&fw_lock);
 }
 
-static int fw_get_filesystem_firmware(struct device *device,
-				       struct firmware_buf *buf)
+static int
+fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
 {
 	loff_t size;
 	int i, len;
 	int rc = -ENOENT;
 	char *path;
+	enum kernel_read_file_id id = READING_FIRMWARE;
+	size_t msize = INT_MAX;
+
+	/* Already populated data member means we're loading into a buffer */
+	if (buf->data) {
+		id = READING_FIRMWARE_PREALLOC_BUFFER;
+		msize = buf->allocated_size;
+	}
 
 	path = __getname();
 	if (!path)
@@ -326,8 +347,8 @@ static int fw_get_filesystem_firmware(struct device *device,
 		}
 
 		buf->size = 0;
-		rc = kernel_read_file_from_path(path, &buf->data, &size,
-						INT_MAX, READING_FIRMWARE);
+		rc = kernel_read_file_from_path(path, &buf->data, &size, msize,
+						id);
 		if (rc) {
 			if (rc == -ENOENT)
 				dev_dbg(device, "loading %s failed with error %d\n",
@@ -691,6 +712,38 @@ out:
 
 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
 
+static void firmware_rw_buf(struct firmware_buf *buf, char *buffer,
+			   loff_t offset, size_t count, bool read)
+{
+	if (read)
+		memcpy(buffer, buf->data + offset, count);
+	else
+		memcpy(buf->data + offset, buffer, count);
+}
+
+static void firmware_rw(struct firmware_buf *buf, char *buffer,
+			loff_t offset, size_t count, bool read)
+{
+	while (count) {
+		void *page_data;
+		int page_nr = offset >> PAGE_SHIFT;
+		int page_ofs = offset & (PAGE_SIZE-1);
+		int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
+
+		page_data = kmap(buf->pages[page_nr]);
+
+		if (read)
+			memcpy(buffer, page_data + page_ofs, page_cnt);
+		else
+			memcpy(page_data + page_ofs, buffer, page_cnt);
+
+		kunmap(buf->pages[page_nr]);
+		buffer += page_cnt;
+		offset += page_cnt;
+		count -= page_cnt;
+	}
+}
+
 static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
 				  struct bin_attribute *bin_attr,
 				  char *buffer, loff_t offset, size_t count)
@@ -715,21 +768,11 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
 
 	ret_count = count;
 
-	while (count) {
-		void *page_data;
-		int page_nr = offset >> PAGE_SHIFT;
-		int page_ofs = offset & (PAGE_SIZE-1);
-		int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
-		page_data = kmap(buf->pages[page_nr]);
-
-		memcpy(buffer, page_data + page_ofs, page_cnt);
+	if (buf->data)
+		firmware_rw_buf(buf, buffer, offset, count, true);
+	else
+		firmware_rw(buf, buffer, offset, count, true);
 
-		kunmap(buf->pages[page_nr]);
-		buffer += page_cnt;
-		offset += page_cnt;
-		count -= page_cnt;
-	}
 out:
 	mutex_unlock(&fw_lock);
 	return ret_count;
@@ -804,29 +847,23 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
 		goto out;
 	}
 
-	retval = fw_realloc_buffer(fw_priv, offset + count);
-	if (retval)
-		goto out;
-
-	retval = count;
-
-	while (count) {
-		void *page_data;
-		int page_nr = offset >> PAGE_SHIFT;
-		int page_ofs = offset & (PAGE_SIZE - 1);
-		int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
-
-		page_data = kmap(buf->pages[page_nr]);
-
-		memcpy(page_data + page_ofs, buffer, page_cnt);
+	if (buf->data) {
+		if (offset + count > buf->allocated_size) {
+			retval = -ENOMEM;
+			goto out;
+		}
+		firmware_rw_buf(buf, buffer, offset, count, false);
+		retval = count;
+	} else {
+		retval = fw_realloc_buffer(fw_priv, offset + count);
+		if (retval)
+			goto out;
 
-		kunmap(buf->pages[page_nr]);
-		buffer += page_cnt;
-		offset += page_cnt;
-		count -= page_cnt;
+		retval = count;
+		firmware_rw(buf, buffer, offset, count, false);
 	}
 
-	buf->size = max_t(size_t, offset, buf->size);
+	buf->size = max_t(size_t, offset + count, buf->size);
 out:
 	mutex_unlock(&fw_lock);
 	return retval;
@@ -894,7 +931,8 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
 	struct firmware_buf *buf = fw_priv->buf;
 
 	/* fall back on userspace loading */
-	buf->is_paged_buf = true;
+	if (!buf->data)
+		buf->is_paged_buf = true;
 
 	dev_set_uevent_suppress(f_dev, true);
 
@@ -929,7 +967,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
 
 	if (is_fw_load_aborted(buf))
 		retval = -EAGAIN;
-	else if (!buf->data)
+	else if (buf->is_paged_buf && !buf->data)
 		retval = -ENOMEM;
 
 	device_del(f_dev);
@@ -1012,7 +1050,7 @@ static int sync_cached_firmware_buf(struct firmware_buf *buf)
  */
 static int
 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
-			  struct device *device)
+			  struct device *device, void *dbuf, size_t size)
 {
 	struct firmware *firmware;
 	struct firmware_buf *buf;
@@ -1025,12 +1063,12 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
 		return -ENOMEM;
 	}
 
-	if (fw_get_builtin_firmware(firmware, name)) {
+	if (fw_get_builtin_firmware(firmware, name, dbuf, size)) {
 		dev_dbg(device, "using built-in %s\n", name);
 		return 0; /* assigned */
 	}
 
-	ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
+	ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf, dbuf, size);
 
 	/*
 	 * bind with 'buf' now to avoid warning in failure path
@@ -1070,14 +1108,16 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
 	 * should be fixed in devres or driver core.
 	 */
 	/* don't cache firmware handled without uevent */
-	if (device && (opt_flags & FW_OPT_UEVENT))
+	if (device && (opt_flags & FW_OPT_UEVENT) &&
+	    !(opt_flags & FW_OPT_NOCACHE))
 		fw_add_devm_name(device, buf->fw_id);
 
 	/*
 	 * After caching firmware image is started, let it piggyback
 	 * on request firmware.
 	 */
-	if (buf->fwc->state == FW_LOADER_START_CACHE) {
+	if (!(opt_flags & FW_OPT_NOCACHE) &&
+	    buf->fwc->state == FW_LOADER_START_CACHE) {
 		if (fw_cache_piggyback_on_request(buf->fw_id))
 			kref_get(&buf->ref);
 	}
@@ -1091,7 +1131,8 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
 /* called from request_firmware() and request_firmware_work_func() */
 static int
 _request_firmware(const struct firmware **firmware_p, const char *name,
-		  struct device *device, unsigned int opt_flags)
+		  struct device *device, void *buf, size_t size,
+		  unsigned int opt_flags)
 {
 	struct firmware *fw = NULL;
 	long timeout;
@@ -1105,7 +1146,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
 		goto out;
 	}
 
-	ret = _request_firmware_prepare(&fw, name, device);
+	ret = _request_firmware_prepare(&fw, name, device, buf, size);
 	if (ret <= 0) /* error or already assigned */
 		goto out;
 
@@ -1184,7 +1225,7 @@ request_firmware(const struct firmware **firmware_p, const char *name,
 
 	/* Need to pin this module until return */
 	__module_get(THIS_MODULE);
-	ret = _request_firmware(firmware_p, name, device,
+	ret = _request_firmware(firmware_p, name, device, NULL, 0,
 				FW_OPT_UEVENT | FW_OPT_FALLBACK);
 	module_put(THIS_MODULE);
 	return ret;
@@ -1208,13 +1249,43 @@ int request_firmware_direct(const struct firmware **firmware_p,
 	int ret;
 
 	__module_get(THIS_MODULE);
-	ret = _request_firmware(firmware_p, name, device,
+	ret = _request_firmware(firmware_p, name, device, NULL, 0,
 				FW_OPT_UEVENT | FW_OPT_NO_WARN);
 	module_put(THIS_MODULE);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(request_firmware_direct);
 
+/**
+ * request_firmware_into_buf - load firmware into a previously allocated buffer
+ * @firmware_p: pointer to firmware image
+ * @name: name of firmware file
+ * @device: device for which firmware is being loaded and DMA region allocated
+ * @buf: address of buffer to load firmware into
+ * @size: size of buffer
+ *
+ * This function works pretty much like request_firmware(), but it doesn't
+ * allocate a buffer to hold the firmware data. Instead, the firmware
+ * is loaded directly into the buffer pointed to by @buf and the @firmware_p
+ * data member is pointed at @buf.
+ *
+ * This function doesn't cache firmware either.
+ */
+int
+request_firmware_into_buf(const struct firmware **firmware_p, const char *name,
+			  struct device *device, void *buf, size_t size)
+{
+	int ret;
+
+	__module_get(THIS_MODULE);
+	ret = _request_firmware(firmware_p, name, device, buf, size,
+				FW_OPT_UEVENT | FW_OPT_FALLBACK |
+				FW_OPT_NOCACHE);
+	module_put(THIS_MODULE);
+	return ret;
+}
+EXPORT_SYMBOL(request_firmware_into_buf);
+
 /**
  * release_firmware: - release the resource associated with a firmware image
  * @fw: firmware resource to release
@@ -1247,7 +1318,7 @@ static void request_firmware_work_func(struct work_struct *work)
 
 	fw_work = container_of(work, struct firmware_work, work);
 
-	_request_firmware(&fw, fw_work->name, fw_work->device,
+	_request_firmware(&fw, fw_work->name, fw_work->device, NULL, 0,
 			  fw_work->opt_flags);
 	fw_work->cont(fw, fw_work->context);
 	put_device(fw_work->device); /* taken in request_firmware_nowait() */
@@ -1380,7 +1451,7 @@ static int uncache_firmware(const char *fw_name)
 
 	pr_debug("%s: %s\n", __func__, fw_name);
 
-	if (fw_get_builtin_firmware(&fw, fw_name))
+	if (fw_get_builtin_firmware(&fw, fw_name, NULL, 0))
 		return 0;
 
 	buf = fw_lookup_buf(fw_name);

+ 1 - 1
drivers/base/node.c

@@ -370,7 +370,7 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 #define page_initialized(page)  (page->lru.next)
 
-static int __init_refok get_nid_for_pfn(unsigned long pfn)
+static int __ref get_nid_for_pfn(unsigned long pfn)
 {
 	struct page *page;
 

+ 0 - 1
drivers/block/drbd/drbd_actlog.c

@@ -27,7 +27,6 @@
 #include <linux/crc32c.h>
 #include <linux/drbd.h>
 #include <linux/drbd_limits.h>
-#include <linux/dynamic_debug.h>
 #include "drbd_int.h"
 
 

+ 1 - 0
drivers/block/drbd/drbd_int.h

@@ -41,6 +41,7 @@
 #include <linux/backing-dev.h>
 #include <linux/genhd.h>
 #include <linux/idr.h>
+#include <linux/dynamic_debug.h>
 #include <net/tcp.h>
 #include <linux/lru_cache.h>
 #include <linux/prefetch.h>

+ 2 - 2
drivers/clk/clkdev.c

@@ -250,7 +250,7 @@ struct clk_lookup_alloc {
 	char	con_id[MAX_CON_ID];
 };
 
-static struct clk_lookup * __init_refok
+static struct clk_lookup * __ref
 vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
 	va_list ap)
 {
@@ -287,7 +287,7 @@ vclkdev_create(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
 	return cl;
 }
 
-struct clk_lookup * __init_refok
+struct clk_lookup * __ref
 clkdev_alloc(struct clk *clk, const char *con_id, const char *dev_fmt, ...)
 {
 	struct clk_lookup *cl;

+ 2 - 15
drivers/memstick/core/ms_block.c

@@ -2338,23 +2338,11 @@ static struct memstick_driver msb_driver = {
 	.resume   = msb_resume
 };
 
-static int major;
-
 static int __init msb_init(void)
 {
-	int rc = register_blkdev(0, DRIVER_NAME);
-
-	if (rc < 0) {
-		pr_err("failed to register major (error %d)\n", rc);
-		return rc;
-	}
-
-	major = rc;
-	rc = memstick_register_driver(&msb_driver);
-	if (rc) {
-		unregister_blkdev(major, DRIVER_NAME);
+	int rc = memstick_register_driver(&msb_driver);
+	if (rc)
 		pr_err("failed to register memstick driver (error %d)\n", rc);
-	}
 
 	return rc;
 }
@@ -2362,7 +2350,6 @@ static int __init msb_init(void)
 static void __exit msb_exit(void)
 {
 	memstick_unregister_driver(&msb_driver);
-	unregister_blkdev(major, DRIVER_NAME);
 	idr_destroy(&msb_disk_idr);
 }
 

+ 1 - 1
drivers/pci/xen-pcifront.c

@@ -1086,7 +1086,7 @@ out:
 	return err;
 }
 
-static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev,
+static void __ref pcifront_backend_changed(struct xenbus_device *xdev,
 						  enum xenbus_state be_state)
 {
 	struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);

+ 9 - 0
drivers/rapidio/Kconfig

@@ -67,6 +67,15 @@ config RAPIDIO_ENUM_BASIC
 
 endchoice
 
+config RAPIDIO_CHMAN
+	tristate "RapidIO Channelized Messaging driver"
+	depends on RAPIDIO
+	help
+	  This option includes RapidIO channelized messaging driver which
+	  provides socket-like interface to allow sharing of single RapidIO
+	  messaging mailbox between multiple user-space applications.
+	  See "Documentation/rapidio/rio_cm.txt" for driver description.
+
 config RAPIDIO_MPORT_CDEV
 	tristate "RapidIO /dev mport device driver"
 	depends on RAPIDIO

+ 1 - 0
drivers/rapidio/Makefile

@@ -5,6 +5,7 @@ obj-$(CONFIG_RAPIDIO) += rapidio.o
 rapidio-y := rio.o rio-access.o rio-driver.o rio-sysfs.o
 
 obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o
+obj-$(CONFIG_RAPIDIO_CHMAN)	+= rio_cm.o
 
 obj-$(CONFIG_RAPIDIO)		+= switches/
 obj-$(CONFIG_RAPIDIO)		+= devices/

+ 3 - 3
drivers/rapidio/devices/rio_mport_cdev.c

@@ -1813,7 +1813,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
 	if (rdev->pef & RIO_PEF_EXT_FEATURES) {
 		rdev->efptr = rval & 0xffff;
 		rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
-							 hopcount);
+						hopcount, &rdev->phys_rmap);
 
 		rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
 						hopcount, RIO_EFB_ERR_MGMNT);
@@ -2242,7 +2242,7 @@ static void mport_mm_open(struct vm_area_struct *vma)
 {
 	struct rio_mport_mapping *map = vma->vm_private_data;
 
-rmcd_debug(MMAP, "0x%pad", &map->phys_addr);
+	rmcd_debug(MMAP, "%pad", &map->phys_addr);
 	kref_get(&map->ref);
 }
 
@@ -2250,7 +2250,7 @@ static void mport_mm_close(struct vm_area_struct *vma)
 {
 	struct rio_mport_mapping *map = vma->vm_private_data;
 
-rmcd_debug(MMAP, "0x%pad", &map->phys_addr);
+	rmcd_debug(MMAP, "%pad", &map->phys_addr);
 	mutex_lock(&map->md->buf_mutex);
 	kref_put(&map->ref, mport_release_mapping);
 	mutex_unlock(&map->md->buf_mutex);

+ 45 - 12
drivers/rapidio/devices/tsi721.c

@@ -37,11 +37,20 @@
 #include "tsi721.h"
 
 #ifdef DEBUG
-u32 dbg_level = DBG_INIT | DBG_EXIT;
+u32 dbg_level;
 module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
 #endif
 
+static int pcie_mrrs = -1;
+module_param(pcie_mrrs, int, S_IRUGO);
+MODULE_PARM_DESC(pcie_mrrs, "PCIe MRRS override value (0...5)");
+
+static u8 mbox_sel = 0x0f;
+module_param(mbox_sel, byte, S_IRUGO);
+MODULE_PARM_DESC(mbox_sel,
+		 "RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
+
 static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
 static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
 
@@ -1081,7 +1090,7 @@ static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv)
  * from rstart to lstart.
  */
 static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
-		u64 rstart, u32 size, u32 flags)
+		u64 rstart, u64 size, u32 flags)
 {
 	struct tsi721_device *priv = mport->priv;
 	int i, avail = -1;
@@ -1094,6 +1103,10 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
 	struct tsi721_ib_win_mapping *map = NULL;
 	int ret = -EBUSY;
 
+	/* Max IBW size supported by HW is 16GB */
+	if (size > 0x400000000UL)
+		return -EINVAL;
+
 	if (direct) {
 		/* Calculate minimal acceptable window size and base address */
 
@@ -1101,15 +1114,15 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
 		ibw_start = lstart & ~(ibw_size - 1);
 
 		tsi_debug(IBW, &priv->pdev->dev,
-			"Direct (RIO_0x%llx -> PCIe_0x%pad), size=0x%x, ibw_start = 0x%llx",
+			"Direct (RIO_0x%llx -> PCIe_%pad), size=0x%llx, ibw_start = 0x%llx",
 			rstart, &lstart, size, ibw_start);
 
 		while ((lstart + size) > (ibw_start + ibw_size)) {
 			ibw_size *= 2;
 			ibw_start = lstart & ~(ibw_size - 1);
-			if (ibw_size > 0x80000000) { /* Limit max size to 2GB */
+			/* Check for crossing IBW max size 16GB */
+			if (ibw_size > 0x400000000UL)
 				return -EBUSY;
-			}
 		}
 
 		loc_start = ibw_start;
@@ -1120,7 +1133,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
 
 	} else {
 		tsi_debug(IBW, &priv->pdev->dev,
-			"Translated (RIO_0x%llx -> PCIe_0x%pad), size=0x%x",
+			"Translated (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
 			rstart, &lstart, size);
 
 		if (!is_power_of_2(size) || size < 0x1000 ||
@@ -1215,7 +1228,7 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
 	priv->ibwin_cnt--;
 
 	tsi_debug(IBW, &priv->pdev->dev,
-		"Configured IBWIN%d (RIO_0x%llx -> PCIe_0x%pad), size=0x%llx",
+		"Configured IBWIN%d (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
 		i, ibw_start, &loc_start, ibw_size);
 
 	return 0;
@@ -1237,7 +1250,7 @@ static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport,
 	int i;
 
 	tsi_debug(IBW, &priv->pdev->dev,
-		"Unmap IBW mapped to PCIe_0x%pad", &lstart);
+		"Unmap IBW mapped to PCIe_%pad", &lstart);
 
 	/* Search for matching active inbound translation window */
 	for (i = 0; i < TSI721_IBWIN_NUM; i++) {
@@ -1877,6 +1890,11 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
 		goto out;
 	}
 
+	if ((mbox_sel & (1 << mbox)) == 0) {
+		rc = -ENODEV;
+		goto out;
+	}
+
 	priv->omsg_ring[mbox].dev_id = dev_id;
 	priv->omsg_ring[mbox].size = entries;
 	priv->omsg_ring[mbox].sts_rdptr = 0;
@@ -2161,6 +2179,11 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
 		goto out;
 	}
 
+	if ((mbox_sel & (1 << mbox)) == 0) {
+		rc = -ENODEV;
+		goto out;
+	}
+
 	/* Initialize IB Messaging Ring */
 	priv->imsg_ring[mbox].dev_id = dev_id;
 	priv->imsg_ring[mbox].size = entries;
@@ -2532,11 +2555,11 @@ static int tsi721_query_mport(struct rio_mport *mport,
 	struct tsi721_device *priv = mport->priv;
 	u32 rval;
 
-	rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_ERR_STS_CSR(0)));
+	rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_ERR_STS_CSR(0, 0));
 	if (rval & RIO_PORT_N_ERR_STS_PORT_OK) {
-		rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL2_CSR(0)));
+		rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL2_CSR(0, 0));
 		attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28;
-		rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL_CSR(0)));
+		rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL_CSR(0, 0));
 		attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27;
 	} else
 		attr->link_speed = RIO_LINK_DOWN;
@@ -2650,9 +2673,9 @@ static int tsi721_setup_mport(struct tsi721_device *priv)
 	mport->ops = &tsi721_rio_ops;
 	mport->index = 0;
 	mport->sys_size = 0; /* small system */
-	mport->phy_type = RIO_PHY_SERIAL;
 	mport->priv = (void *)priv;
 	mport->phys_efptr = 0x100;
+	mport->phys_rmap = 1;
 	mport->dev.parent = &pdev->dev;
 	mport->dev.release = tsi721_mport_release;
 
@@ -2840,6 +2863,16 @@ static int tsi721_probe(struct pci_dev *pdev,
 	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
 		PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
 
+	/* Override PCIe Maximum Read Request Size setting if requested */
+	if (pcie_mrrs >= 0) {
+		if (pcie_mrrs <= 5)
+			pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
+					PCI_EXP_DEVCTL_READRQ, pcie_mrrs << 12);
+		else
+			tsi_info(&pdev->dev,
+				 "Invalid MRRS override value %d", pcie_mrrs);
+	}
+
 	/* Adjust PCIe completion timeout. */
 	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2);
 

+ 1 - 1
drivers/rapidio/devices/tsi721.h

@@ -661,7 +661,7 @@ enum dma_rtype {
  */
 #define TSI721_DMA_CHNUM	TSI721_DMA_MAXCH
 
-#define TSI721_DMACH_MAINT	0	/* DMA channel for maint requests */
+#define TSI721_DMACH_MAINT	7	/* DMA channel for maint requests */
 #define TSI721_DMACH_MAINT_NBD	32	/* Number of BDs for maint requests */
 
 #define TSI721_DMACH_DMA	1	/* DMA channel for data transfers */

+ 18 - 9
drivers/rapidio/devices/tsi721_dma.c

@@ -36,18 +36,26 @@
 
 #include "tsi721.h"
 
-#define TSI721_DMA_TX_QUEUE_SZ	16	/* number of transaction descriptors */
-
 #ifdef CONFIG_PCI_MSI
 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
 #endif
 static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
 
 static unsigned int dma_desc_per_channel = 128;
-module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO);
+module_param(dma_desc_per_channel, uint, S_IRUGO);
 MODULE_PARM_DESC(dma_desc_per_channel,
 		 "Number of DMA descriptors per channel (default: 128)");
 
+static unsigned int dma_txqueue_sz = 16;
+module_param(dma_txqueue_sz, uint, S_IRUGO);
+MODULE_PARM_DESC(dma_txqueue_sz,
+		 "DMA Transactions Queue Size (default: 16)");
+
+static u8 dma_sel = 0x7f;
+module_param(dma_sel, byte, S_IRUGO);
+MODULE_PARM_DESC(dma_sel,
+		 "DMA Channel Selection Mask (default: 0x7f = all)");
+
 static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
 {
 	return container_of(chan, struct tsi721_bdma_chan, dchan);
@@ -718,6 +726,7 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
 	cookie = dma_cookie_assign(txd);
 	desc->status = DMA_IN_PROGRESS;
 	list_add_tail(&desc->desc_node, &bdma_chan->queue);
+	tsi721_advance_work(bdma_chan, NULL);
 
 	spin_unlock_bh(&bdma_chan->lock);
 	return cookie;
@@ -732,7 +741,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
 	tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
 
 	if (bdma_chan->bd_base)
-		return TSI721_DMA_TX_QUEUE_SZ;
+		return dma_txqueue_sz;
 
 	/* Initialize BDMA channel */
 	if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
@@ -742,7 +751,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
 	}
 
 	/* Allocate queue of transaction descriptors */
-	desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc),
+	desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
 			GFP_ATOMIC);
 	if (!desc) {
 		tsi_err(&dchan->dev->device,
@@ -754,7 +763,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
 
 	bdma_chan->tx_desc = desc;
 
-	for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) {
+	for (i = 0; i < dma_txqueue_sz; i++) {
 		dma_async_tx_descriptor_init(&desc[i].txd, dchan);
 		desc[i].txd.tx_submit = tsi721_tx_submit;
 		desc[i].txd.flags = DMA_CTRL_ACK;
@@ -766,7 +775,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
 	bdma_chan->active = true;
 	tsi721_bdma_interrupt_enable(bdma_chan, 1);
 
-	return TSI721_DMA_TX_QUEUE_SZ;
+	return dma_txqueue_sz;
 }
 
 static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
@@ -962,7 +971,7 @@ void tsi721_dma_stop_all(struct tsi721_device *priv)
 	int i;
 
 	for (i = 0; i < TSI721_DMA_MAXCH; i++) {
-		if (i != TSI721_DMACH_MAINT)
+		if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i)))
 			tsi721_dma_stop(&priv->bdma[i]);
 	}
 }
@@ -979,7 +988,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
 	for (i = 0; i < TSI721_DMA_MAXCH; i++) {
 		struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
 
-		if (i == TSI721_DMACH_MAINT)
+		if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0)
 			continue;
 
 		bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);

+ 18 - 56
drivers/rapidio/rio-scan.c

@@ -49,15 +49,6 @@ struct rio_id_table {
 static int next_destid = 0;
 static int next_comptag = 1;
 
-static int rio_mport_phys_table[] = {
-	RIO_EFB_PAR_EP_ID,
-	RIO_EFB_PAR_EP_REC_ID,
-	RIO_EFB_SER_EP_ID,
-	RIO_EFB_SER_EP_REC_ID,
-	-1,
-};
-
-
 /**
  * rio_destid_alloc - Allocate next available destID for given network
  * @net: RIO network
@@ -380,10 +371,15 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
 	if (rdev->pef & RIO_PEF_EXT_FEATURES) {
 		rdev->efptr = result & 0xffff;
 		rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
-							 hopcount);
+						hopcount, &rdev->phys_rmap);
+		pr_debug("RIO: %s Register Map %d device\n",
+			 __func__, rdev->phys_rmap);
 
 		rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
 						hopcount, RIO_EFB_ERR_MGMNT);
+		if (!rdev->em_efptr)
+			rdev->em_efptr = rio_mport_get_feature(port, 0, destid,
+						hopcount, RIO_EFB_ERR_MGMNT_HS);
 	}
 
 	rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
@@ -445,7 +441,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
 			rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0);
 	} else {
 		if (do_enum)
-			/*Enable Input Output Port (transmitter reviever)*/
+			/*Enable Input Output Port (transmitter receiver)*/
 			rio_enable_rx_tx_port(port, 0, destid, hopcount, 0);
 
 		dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id,
@@ -481,10 +477,8 @@ cleanup:
 
 /**
  * rio_sport_is_active- Tests if a switch port has an active connection.
- * @port: Master port to send transaction
- * @destid: Associated destination ID for switch
- * @hopcount: Hopcount to reach switch
- * @sport: Switch port number
+ * @rdev: RapidIO device object
+ * @sp: Switch port number
  *
  * Reads the port error status CSR for a particular switch port to
  * determine if the port has an active link.  Returns
@@ -492,31 +486,12 @@ cleanup:
  * inactive.
  */
 static int
-rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport)
+rio_sport_is_active(struct rio_dev *rdev, int sp)
 {
 	u32 result = 0;
-	u32 ext_ftr_ptr;
 
-	ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0);
-
-	while (ext_ftr_ptr) {
-		rio_mport_read_config_32(port, destid, hopcount,
-					 ext_ftr_ptr, &result);
-		result = RIO_GET_BLOCK_ID(result);
-		if ((result == RIO_EFB_SER_EP_FREE_ID) ||
-		    (result == RIO_EFB_SER_EP_FREE_ID_V13P) ||
-		    (result == RIO_EFB_SER_EP_FREC_ID))
-			break;
-
-		ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount,
-						ext_ftr_ptr);
-	}
-
-	if (ext_ftr_ptr)
-		rio_mport_read_config_32(port, destid, hopcount,
-					 ext_ftr_ptr +
-					 RIO_PORT_N_ERR_STS_CSR(sport),
-					 &result);
+	rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, sp),
+			   &result);
 
 	return result & RIO_PORT_N_ERR_STS_PORT_OK;
 }
@@ -655,9 +630,7 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
 
 			cur_destid = next_destid;
 
-			if (rio_sport_is_active
-			    (port, RIO_ANY_DESTID(port->sys_size), hopcount,
-			     port_num)) {
+			if (rio_sport_is_active(rdev, port_num)) {
 				pr_debug(
 				    "RIO: scanning device on port %d\n",
 				    port_num);
@@ -785,8 +758,7 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
 			if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num)
 				continue;
 
-			if (rio_sport_is_active
-			    (port, destid, hopcount, port_num)) {
+			if (rio_sport_is_active(rdev, port_num)) {
 				pr_debug(
 				    "RIO: scanning device on port %d\n",
 				    port_num);
@@ -831,21 +803,11 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
 static int rio_mport_is_active(struct rio_mport *port)
 {
 	u32 result = 0;
-	u32 ext_ftr_ptr;
-	int *entry = rio_mport_phys_table;
-
-	do {
-		if ((ext_ftr_ptr =
-		     rio_mport_get_feature(port, 1, 0, 0, *entry)))
-			break;
-	} while (*++entry >= 0);
-
-	if (ext_ftr_ptr)
-		rio_local_read_config_32(port,
-					 ext_ftr_ptr +
-					 RIO_PORT_N_ERR_STS_CSR(port->index),
-					 &result);
 
+	rio_local_read_config_32(port,
+		port->phys_efptr +
+			RIO_PORT_N_ERR_STS_CSR(port->index, port->phys_rmap),
+		&result);
 	return result & RIO_PORT_N_ERR_STS_PORT_OK;
 }
 

+ 125 - 87
drivers/rapidio/rio.c

@@ -268,6 +268,12 @@ int rio_request_inb_mbox(struct rio_mport *mport,
 		mport->inb_msg[mbox].mcback = minb;
 
 		rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries);
+		if (rc) {
+			mport->inb_msg[mbox].mcback = NULL;
+			mport->inb_msg[mbox].res = NULL;
+			release_resource(res);
+			kfree(res);
+		}
 	} else
 		rc = -ENOMEM;
 
@@ -285,13 +291,22 @@ int rio_request_inb_mbox(struct rio_mport *mport,
  */
 int rio_release_inb_mbox(struct rio_mport *mport, int mbox)
 {
-	if (mport->ops->close_inb_mbox) {
-		mport->ops->close_inb_mbox(mport, mbox);
+	int rc;
 
-		/* Release the mailbox resource */
-		return release_resource(mport->inb_msg[mbox].res);
-	} else
-		return -ENOSYS;
+	if (!mport->ops->close_inb_mbox || !mport->inb_msg[mbox].res)
+		return -EINVAL;
+
+	mport->ops->close_inb_mbox(mport, mbox);
+	mport->inb_msg[mbox].mcback = NULL;
+
+	rc = release_resource(mport->inb_msg[mbox].res);
+	if (rc)
+		return rc;
+
+	kfree(mport->inb_msg[mbox].res);
+	mport->inb_msg[mbox].res = NULL;
+
+	return 0;
 }
 
 /**
@@ -336,6 +351,12 @@ int rio_request_outb_mbox(struct rio_mport *mport,
 		mport->outb_msg[mbox].mcback = moutb;
 
 		rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries);
+		if (rc) {
+			mport->outb_msg[mbox].mcback = NULL;
+			mport->outb_msg[mbox].res = NULL;
+			release_resource(res);
+			kfree(res);
+		}
 	} else
 		rc = -ENOMEM;
 
@@ -353,13 +374,22 @@ int rio_request_outb_mbox(struct rio_mport *mport,
  */
 int rio_release_outb_mbox(struct rio_mport *mport, int mbox)
 {
-	if (mport->ops->close_outb_mbox) {
-		mport->ops->close_outb_mbox(mport, mbox);
+	int rc;
 
-		/* Release the mailbox resource */
-		return release_resource(mport->outb_msg[mbox].res);
-	} else
-		return -ENOSYS;
+	if (!mport->ops->close_outb_mbox || !mport->outb_msg[mbox].res)
+		return -EINVAL;
+
+	mport->ops->close_outb_mbox(mport, mbox);
+	mport->outb_msg[mbox].mcback = NULL;
+
+	rc = release_resource(mport->outb_msg[mbox].res);
+	if (rc)
+		return rc;
+
+	kfree(mport->outb_msg[mbox].res);
+	mport->outb_msg[mbox].res = NULL;
+
+	return 0;
 }
 
 /**
@@ -756,10 +786,11 @@ EXPORT_SYMBOL_GPL(rio_unmap_outb_region);
  * @local: Indicate a local master port or remote device access
  * @destid: Destination ID of the device
  * @hopcount: Number of switch hops to the device
+ * @rmap: pointer to location to store register map type info
  */
 u32
 rio_mport_get_physefb(struct rio_mport *port, int local,
-		      u16 destid, u8 hopcount)
+		      u16 destid, u8 hopcount, u32 *rmap)
 {
 	u32 ext_ftr_ptr;
 	u32 ftr_header;
@@ -777,14 +808,21 @@ rio_mport_get_physefb(struct rio_mport *port, int local,
 		ftr_header = RIO_GET_BLOCK_ID(ftr_header);
 		switch (ftr_header) {
 
-		case RIO_EFB_SER_EP_ID_V13P:
-		case RIO_EFB_SER_EP_REC_ID_V13P:
-		case RIO_EFB_SER_EP_FREE_ID_V13P:
 		case RIO_EFB_SER_EP_ID:
 		case RIO_EFB_SER_EP_REC_ID:
 		case RIO_EFB_SER_EP_FREE_ID:
-		case RIO_EFB_SER_EP_FREC_ID:
+		case RIO_EFB_SER_EP_M1_ID:
+		case RIO_EFB_SER_EP_SW_M1_ID:
+		case RIO_EFB_SER_EPF_M1_ID:
+		case RIO_EFB_SER_EPF_SW_M1_ID:
+			*rmap = 1;
+			return ext_ftr_ptr;
 
+		case RIO_EFB_SER_EP_M2_ID:
+		case RIO_EFB_SER_EP_SW_M2_ID:
+		case RIO_EFB_SER_EPF_M2_ID:
+		case RIO_EFB_SER_EPF_SW_M2_ID:
+			*rmap = 2;
 			return ext_ftr_ptr;
 
 		default:
@@ -843,16 +881,16 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
 	u32 regval;
 
 	rio_read_config_32(rdev,
-				 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
-				 &regval);
+		RIO_DEV_PORT_N_CTL_CSR(rdev, pnum),
+		&regval);
 	if (lock)
 		regval |= RIO_PORT_N_CTL_LOCKOUT;
 	else
 		regval &= ~RIO_PORT_N_CTL_LOCKOUT;
 
 	rio_write_config_32(rdev,
-				  rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
-				  regval);
+		RIO_DEV_PORT_N_CTL_CSR(rdev, pnum),
+		regval);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(rio_set_port_lockout);
@@ -876,6 +914,7 @@ int rio_enable_rx_tx_port(struct rio_mport *port,
 #ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
 	u32 regval;
 	u32 ext_ftr_ptr;
+	u32 rmap;
 
 	/*
 	* enable rx input tx output port
@@ -883,34 +922,29 @@ int rio_enable_rx_tx_port(struct rio_mport *port,
 	pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
 		 "%d, port_num = %d)\n", local, destid, hopcount, port_num);
 
-	ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
+	ext_ftr_ptr = rio_mport_get_physefb(port, local, destid,
+					    hopcount, &rmap);
 
 	if (local) {
-		rio_local_read_config_32(port, ext_ftr_ptr +
-				RIO_PORT_N_CTL_CSR(0),
+		rio_local_read_config_32(port,
+				ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap),
 				&regval);
 	} else {
 		if (rio_mport_read_config_32(port, destid, hopcount,
-		ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
+			ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap),
+				&regval) < 0)
 			return -EIO;
 	}
 
-	if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
-		/* serial */
-		regval = regval | RIO_PORT_N_CTL_EN_RX_SER
-				| RIO_PORT_N_CTL_EN_TX_SER;
-	} else {
-		/* parallel */
-		regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
-				| RIO_PORT_N_CTL_EN_TX_PAR;
-	}
+	regval = regval | RIO_PORT_N_CTL_EN_RX | RIO_PORT_N_CTL_EN_TX;
 
 	if (local) {
-		rio_local_write_config_32(port, ext_ftr_ptr +
-					  RIO_PORT_N_CTL_CSR(0), regval);
+		rio_local_write_config_32(port,
+			ext_ftr_ptr + RIO_PORT_N_CTL_CSR(0, rmap), regval);
 	} else {
 		if (rio_mport_write_config_32(port, destid, hopcount,
-		    ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
+			ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num, rmap),
+				regval) < 0)
 			return -EIO;
 	}
 #endif
@@ -1012,14 +1046,14 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
 		/* Read from link maintenance response register
 		 * to clear valid bit */
 		rio_read_config_32(rdev,
-			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+			RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum),
 			&regval);
 		udelay(50);
 	}
 
 	/* Issue Input-status command */
 	rio_write_config_32(rdev,
-		rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),
+		RIO_DEV_PORT_N_MNT_REQ_CSR(rdev, pnum),
 		RIO_MNT_REQ_CMD_IS);
 
 	/* Exit if the response is not expected */
@@ -1030,7 +1064,7 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
 	while (checkcount--) {
 		udelay(50);
 		rio_read_config_32(rdev,
-			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
+			RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, pnum),
 			&regval);
 		if (regval & RIO_PORT_N_MNT_RSP_RVAL) {
 			*lnkresp = regval;
@@ -1046,6 +1080,13 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
  * @rdev: Pointer to RIO device control structure
  * @pnum: Switch port number to clear errors
  * @err_status: port error status (if 0 reads register from device)
+ *
+ * TODO: Currently this routine is not compatible with recovery process
+ * specified for idt_gen3 RapidIO switch devices. It has to be reviewed
+ * to implement universal recovery process that is compatible full range
+ * off available devices.
+ * IDT gen3 switch driver now implements HW-specific error handler that
+ * issues soft port reset to the port to reset ERR_STOP bits and ackIDs.
  */
 static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
 {
@@ -1055,10 +1096,10 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
 
 	if (err_status == 0)
 		rio_read_config_32(rdev,
-			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
+			RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum),
 			&err_status);
 
-	if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) {
+	if (err_status & RIO_PORT_N_ERR_STS_OUT_ES) {
 		pr_debug("RIO_EM: servicing Output Error-Stopped state\n");
 		/*
 		 * Send a Link-Request/Input-Status control symbol
@@ -1073,7 +1114,7 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
 		far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;
 		far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT;
 		rio_read_config_32(rdev,
-			rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
+			RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum),
 			&regval);
 		pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval);
 		near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24;
@@ -1091,43 +1132,43 @@ static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
 			 * far inbound.
 			 */
 			rio_write_config_32(rdev,
-				rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
+				RIO_DEV_PORT_N_ACK_STS_CSR(rdev, pnum),
 				(near_ackid << 24) |
 					(far_ackid << 8) | far_ackid);
 			/* Align far outstanding/outbound ackIDs with
 			 * near inbound.
 			 */
 			far_ackid++;
-			if (nextdev)
-				rio_write_config_32(nextdev,
-					nextdev->phys_efptr +
-					RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)),
-					(far_ackid << 24) |
-					(near_ackid << 8) | near_ackid);
-			else
-				pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
+			if (!nextdev) {
+				pr_debug("RIO_EM: nextdev pointer == NULL\n");
+				goto rd_err;
+			}
+
+			rio_write_config_32(nextdev,
+				RIO_DEV_PORT_N_ACK_STS_CSR(nextdev,
+					RIO_GET_PORT_NUM(nextdev->swpinfo)),
+				(far_ackid << 24) |
+				(near_ackid << 8) | near_ackid);
 		}
 rd_err:
-		rio_read_config_32(rdev,
-			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
-			&err_status);
+		rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum),
+				   &err_status);
 		pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
 	}
 
-	if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) {
+	if ((err_status & RIO_PORT_N_ERR_STS_INP_ES) && nextdev) {
 		pr_debug("RIO_EM: servicing Input Error-Stopped state\n");
 		rio_get_input_status(nextdev,
 				     RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);
 		udelay(50);
 
-		rio_read_config_32(rdev,
-			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
-			&err_status);
+		rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum),
+				   &err_status);
 		pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
 	}
 
-	return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
-			      RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0;
+	return (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
+			      RIO_PORT_N_ERR_STS_INP_ES)) ? 1 : 0;
 }
 
 /**
@@ -1227,9 +1268,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
 	if (rdev->rswitch->ops && rdev->rswitch->ops->em_handle)
 		rdev->rswitch->ops->em_handle(rdev, portnum);
 
-	rio_read_config_32(rdev,
-			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
-			&err_status);
+	rio_read_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
+			   &err_status);
 	pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
 
 	if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) {
@@ -1246,8 +1286,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
 		 * Depending on the link partner state, two attempts
 		 * may be needed for successful recovery.
 		 */
-		if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
-				  RIO_PORT_N_ERR_STS_PW_INP_ES)) {
+		if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
+				  RIO_PORT_N_ERR_STS_INP_ES)) {
 			if (rio_clr_err_stopped(rdev, portnum, err_status))
 				rio_clr_err_stopped(rdev, portnum, 0);
 		}
@@ -1257,10 +1297,18 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
 			rdev->rswitch->port_ok &= ~(1 << portnum);
 			rio_set_port_lockout(rdev, portnum, 1);
 
+			if (rdev->phys_rmap == 1) {
 			rio_write_config_32(rdev,
-				rdev->phys_efptr +
-					RIO_PORT_N_ACK_STS_CSR(portnum),
+				RIO_DEV_PORT_N_ACK_STS_CSR(rdev, portnum),
 				RIO_PORT_N_ACK_CLEAR);
+			} else {
+				rio_write_config_32(rdev,
+					RIO_DEV_PORT_N_OB_ACK_CSR(rdev, portnum),
+					RIO_PORT_N_OB_ACK_CLEAR);
+				rio_write_config_32(rdev,
+					RIO_DEV_PORT_N_IB_ACK_CSR(rdev, portnum),
+					0);
+			}
 
 			/* Schedule Extraction Service */
 			pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n",
@@ -1289,9 +1337,8 @@ int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg)
 	}
 
 	/* Clear remaining error bits and Port-Write Pending bit */
-	rio_write_config_32(rdev,
-			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
-			err_status);
+	rio_write_config_32(rdev, RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
+			    err_status);
 
 	return 0;
 }
@@ -1342,20 +1389,7 @@ EXPORT_SYMBOL_GPL(rio_mport_get_efb);
  * Tell if a device supports a given RapidIO capability.
  * Returns the offset of the requested extended feature
  * block within the device's RIO configuration space or
- * 0 in case the device does not support it.  Possible
- * values for @ftr:
- *
- * %RIO_EFB_PAR_EP_ID		LP/LVDS EP Devices
- *
- * %RIO_EFB_PAR_EP_REC_ID	LP/LVDS EP Recovery Devices
- *
- * %RIO_EFB_PAR_EP_FREE_ID	LP/LVDS EP Free Devices
- *
- * %RIO_EFB_SER_EP_ID		LP/Serial EP Devices
- *
- * %RIO_EFB_SER_EP_REC_ID	LP/Serial EP Recovery Devices
- *
- * %RIO_EFB_SER_EP_FREE_ID	LP/Serial EP Free Devices
+ * 0 in case the device does not support it.
  */
 u32
 rio_mport_get_feature(struct rio_mport * port, int local, u16 destid,
@@ -1848,7 +1882,9 @@ EXPORT_SYMBOL_GPL(rio_release_dma);
  * Initializes RapidIO capable DMA channel for the specified data transfer.
  * Uses DMA channel private extension to pass information related to remote
  * target RIO device.
- * Returns pointer to DMA transaction descriptor or NULL if failed.
+ *
+ * Returns: pointer to DMA transaction descriptor if successful,
+ *          error-valued pointer or NULL if failed.
  */
 struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan,
 	u16 destid, struct rio_dma_data *data,
@@ -1883,7 +1919,9 @@ EXPORT_SYMBOL_GPL(rio_dma_prep_xfer);
  * Initializes RapidIO capable DMA channel for the specified data transfer.
  * Uses DMA channel private extension to pass information related to remote
  * target RIO device.
- * Returns pointer to DMA transaction descriptor or NULL if failed.
+ *
+ * Returns: pointer to DMA transaction descriptor if successful,
+ *          error-valued pointer or NULL if failed.
  */
 struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
 	struct dma_chan *dchan, struct rio_dma_data *data,

+ 1 - 1
drivers/rapidio/rio.h

@@ -22,7 +22,7 @@
 extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid,
 				 u8 hopcount, int ftr);
 extern u32 rio_mport_get_physefb(struct rio_mport *port, int local,
-				 u16 destid, u8 hopcount);
+				 u16 destid, u8 hopcount, u32 *rmap);
 extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
 			     u8 hopcount, u32 from);
 extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,

+ 2366 - 0
drivers/rapidio/rio_cm.c

@@ -0,0 +1,2366 @@
+/*
+ * rio_cm - RapidIO Channelized Messaging Driver
+ *
+ * Copyright 2013-2016 Integrated Device Technology, Inc.
+ * Copyright (c) 2015, Prodrive Technologies
+ * Copyright (c) 2015, RapidIO Trade Association
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * THIS PROGRAM IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL,
+ * BUT WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED WARRANTY OF
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.  SEE THE
+ * GNU GENERAL PUBLIC LICENSE FOR MORE DETAILS.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/reboot.h>
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/rio_cm_cdev.h>
+
+#define DRV_NAME        "rio_cm"
+#define DRV_VERSION     "1.0.0"
+#define DRV_AUTHOR      "Alexandre Bounine <alexandre.bounine@idt.com>"
+#define DRV_DESC        "RapidIO Channelized Messaging Driver"
+#define DEV_NAME	"rio_cm"
+
+/* Debug output filtering masks */
+enum {
+	DBG_NONE	= 0,
+	DBG_INIT	= BIT(0), /* driver init */
+	DBG_EXIT	= BIT(1), /* driver exit */
+	DBG_MPORT	= BIT(2), /* mport add/remove */
+	DBG_RDEV	= BIT(3), /* RapidIO device add/remove */
+	DBG_CHOP	= BIT(4), /* channel operations */
+	DBG_WAIT	= BIT(5), /* waiting for events */
+	DBG_TX		= BIT(6), /* message TX */
+	DBG_TX_EVENT	= BIT(7), /* message TX event */
+	DBG_RX_DATA	= BIT(8), /* inbound data messages */
+	DBG_RX_CMD	= BIT(9), /* inbound REQ/ACK/NACK messages */
+	DBG_ALL		= ~0,
+};
+
+#ifdef DEBUG
+#define riocm_debug(level, fmt, arg...) \
+	do { \
+		if (DBG_##level & dbg_level) \
+			pr_debug(DRV_NAME ": %s " fmt "\n", \
+				__func__, ##arg); \
+	} while (0)
+#else
+#define riocm_debug(level, fmt, arg...) \
+		no_printk(KERN_DEBUG pr_fmt(DRV_NAME fmt "\n"), ##arg)
+#endif
+
+#define riocm_warn(fmt, arg...) \
+	pr_warn(DRV_NAME ": %s WARNING " fmt "\n", __func__, ##arg)
+
+#define riocm_error(fmt, arg...) \
+	pr_err(DRV_NAME ": %s ERROR " fmt "\n", __func__, ##arg)
+
+
+static int cmbox = 1;
+module_param(cmbox, int, S_IRUGO);
+MODULE_PARM_DESC(cmbox, "RapidIO Mailbox number (default 1)");
+
+static int chstart = 256;
+module_param(chstart, int, S_IRUGO);
+MODULE_PARM_DESC(chstart,
+		 "Start channel number for dynamic allocation (default 256)");
+
+#ifdef DEBUG
+static u32 dbg_level = DBG_NONE;
+module_param(dbg_level, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
+#endif
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define RIOCM_TX_RING_SIZE	128
+#define RIOCM_RX_RING_SIZE	128
+#define RIOCM_CONNECT_TO	3 /* connect response TO (in sec) */
+
+#define RIOCM_MAX_CHNUM		0xffff /* Use full range of u16 field */
+#define RIOCM_CHNUM_AUTO	0
+#define RIOCM_MAX_EP_COUNT	0x10000 /* Max number of endpoints */
+
+enum rio_cm_state {
+	RIO_CM_IDLE,
+	RIO_CM_CONNECT,
+	RIO_CM_CONNECTED,
+	RIO_CM_DISCONNECT,
+	RIO_CM_CHAN_BOUND,
+	RIO_CM_LISTEN,
+	RIO_CM_DESTROYING,
+};
+
+enum rio_cm_pkt_type {
+	RIO_CM_SYS	= 0xaa,
+	RIO_CM_CHAN	= 0x55,
+};
+
+enum rio_cm_chop {
+	CM_CONN_REQ,
+	CM_CONN_ACK,
+	CM_CONN_CLOSE,
+	CM_DATA_MSG,
+};
+
+struct rio_ch_base_bhdr {
+	u32 src_id;
+	u32 dst_id;
+#define RIO_HDR_LETTER_MASK 0xffff0000
+#define RIO_HDR_MBOX_MASK   0x0000ffff
+	u8  src_mbox;
+	u8  dst_mbox;
+	u8  type;
+} __attribute__((__packed__));
+
+struct rio_ch_chan_hdr {
+	struct rio_ch_base_bhdr bhdr;
+	u8 ch_op;
+	u16 dst_ch;
+	u16 src_ch;
+	u16 msg_len;
+	u16 rsrvd;
+} __attribute__((__packed__));
+
+struct tx_req {
+	struct list_head node;
+	struct rio_dev   *rdev;
+	void		 *buffer;
+	size_t		 len;
+};
+
+struct cm_dev {
+	struct list_head	list;
+	struct rio_mport	*mport;
+	void			*rx_buf[RIOCM_RX_RING_SIZE];
+	int			rx_slots;
+	struct mutex		rx_lock;
+
+	void			*tx_buf[RIOCM_TX_RING_SIZE];
+	int			tx_slot;
+	int			tx_cnt;
+	int			tx_ack_slot;
+	struct list_head	tx_reqs;
+	spinlock_t		tx_lock;
+
+	struct list_head	peers;
+	u32			npeers;
+	struct workqueue_struct *rx_wq;
+	struct work_struct	rx_work;
+};
+
+struct chan_rx_ring {
+	void	*buf[RIOCM_RX_RING_SIZE];
+	int	head;
+	int	tail;
+	int	count;
+
+	/* Tracking RX buffers reported to upper level */
+	void	*inuse[RIOCM_RX_RING_SIZE];
+	int	inuse_cnt;
+};
+
+struct rio_channel {
+	u16			id;	/* local channel ID */
+	struct kref		ref;	/* channel refcount */
+	struct file		*filp;
+	struct cm_dev		*cmdev;	/* associated CM device object */
+	struct rio_dev		*rdev;	/* remote RapidIO device */
+	enum rio_cm_state	state;
+	int			error;
+	spinlock_t		lock;
+	void			*context;
+	u32			loc_destid;	/* local destID */
+	u32			rem_destid;	/* remote destID */
+	u16			rem_channel;	/* remote channel ID */
+	struct list_head	accept_queue;
+	struct list_head	ch_node;
+	struct completion	comp;
+	struct completion	comp_close;
+	struct chan_rx_ring	rx_ring;
+};
+
+struct cm_peer {
+	struct list_head node;
+	struct rio_dev *rdev;
+};
+
+struct rio_cm_work {
+	struct work_struct work;
+	struct cm_dev *cm;
+	void *data;
+};
+
+struct conn_req {
+	struct list_head node;
+	u32 destid;	/* requester destID */
+	u16 chan;	/* requester channel ID */
+	struct cm_dev *cmdev;
+};
+
+/*
+ * A channel_dev structure represents a CM_CDEV
+ * @cdev	Character device
+ * @dev		Associated device object
+ */
+struct channel_dev {
+	struct cdev	cdev;
+	struct device	*dev;
+};
+
+static struct rio_channel *riocm_ch_alloc(u16 ch_num);
+static void riocm_ch_free(struct kref *ref);
+static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
+			   void *buffer, size_t len);
+static int riocm_ch_close(struct rio_channel *ch);
+
+static DEFINE_SPINLOCK(idr_lock);
+static DEFINE_IDR(ch_idr);
+
+static LIST_HEAD(cm_dev_list);
+static DECLARE_RWSEM(rdev_sem);
+
+static struct class *dev_class;
+static unsigned int dev_major;
+static unsigned int dev_minor_base;
+static dev_t dev_number;
+static struct channel_dev riocm_cdev;
+
+#define is_msg_capable(src_ops, dst_ops)			\
+			((src_ops & RIO_SRC_OPS_DATA_MSG) &&	\
+			 (dst_ops & RIO_DST_OPS_DATA_MSG))
+#define dev_cm_capable(dev) \
+	is_msg_capable(dev->src_ops, dev->dst_ops)
+
+static int riocm_cmp(struct rio_channel *ch, enum rio_cm_state cmp)
+{
+	int ret;
+
+	spin_lock_bh(&ch->lock);
+	ret = (ch->state == cmp);
+	spin_unlock_bh(&ch->lock);
+	return ret;
+}
+
+static int riocm_cmp_exch(struct rio_channel *ch,
+			   enum rio_cm_state cmp, enum rio_cm_state exch)
+{
+	int ret;
+
+	spin_lock_bh(&ch->lock);
+	ret = (ch->state == cmp);
+	if (ret)
+		ch->state = exch;
+	spin_unlock_bh(&ch->lock);
+	return ret;
+}
+
+static enum rio_cm_state riocm_exch(struct rio_channel *ch,
+				    enum rio_cm_state exch)
+{
+	enum rio_cm_state old;
+
+	spin_lock_bh(&ch->lock);
+	old = ch->state;
+	ch->state = exch;
+	spin_unlock_bh(&ch->lock);
+	return old;
+}
+
+static struct rio_channel *riocm_get_channel(u16 nr)
+{
+	struct rio_channel *ch;
+
+	spin_lock_bh(&idr_lock);
+	ch = idr_find(&ch_idr, nr);
+	if (ch)
+		kref_get(&ch->ref);
+	spin_unlock_bh(&idr_lock);
+	return ch;
+}
+
+static void riocm_put_channel(struct rio_channel *ch)
+{
+	kref_put(&ch->ref, riocm_ch_free);
+}
+
+static void *riocm_rx_get_msg(struct cm_dev *cm)
+{
+	void *msg;
+	int i;
+
+	msg = rio_get_inb_message(cm->mport, cmbox);
+	if (msg) {
+		for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
+			if (cm->rx_buf[i] == msg) {
+				cm->rx_buf[i] = NULL;
+				cm->rx_slots++;
+				break;
+			}
+		}
+
+		if (i == RIOCM_RX_RING_SIZE)
+			riocm_warn("no record for buffer 0x%p", msg);
+	}
+
+	return msg;
+}
+
+/*
+ * riocm_rx_fill - fills a ring of receive buffers for given cm device
+ * @cm: cm_dev object
+ * @nent: max number of entries to fill
+ *
+ * Returns: none
+ */
+static void riocm_rx_fill(struct cm_dev *cm, int nent)
+{
+	int i;
+
+	if (cm->rx_slots == 0)
+		return;
+
+	for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) {
+		if (cm->rx_buf[i] == NULL) {
+			cm->rx_buf[i] = kmalloc(RIO_MAX_MSG_SIZE, GFP_KERNEL);
+			if (cm->rx_buf[i] == NULL)
+				break;
+			rio_add_inb_buffer(cm->mport, cmbox, cm->rx_buf[i]);
+			cm->rx_slots--;
+			nent--;
+		}
+	}
+}
+
+/*
+ * riocm_rx_free - frees all receive buffers associated with given cm device
+ * @cm: cm_dev object
+ *
+ * Returns: none
+ */
+static void riocm_rx_free(struct cm_dev *cm)
+{
+	int i;
+
+	for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
+		if (cm->rx_buf[i] != NULL) {
+			kfree(cm->rx_buf[i]);
+			cm->rx_buf[i] = NULL;
+		}
+	}
+}
+
+/*
+ * riocm_req_handler - connection request handler
+ * @cm: cm_dev object
+ * @req_data: pointer to the request packet
+ *
+ * Returns: 0 if success, or
+ *          -EINVAL if channel is not in correct state,
+ *          -ENODEV if cannot find a channel with specified ID,
+ *          -ENOMEM if unable to allocate memory to store the request
+ */
+static int riocm_req_handler(struct cm_dev *cm, void *req_data)
+{
+	struct rio_channel *ch;
+	struct conn_req *req;
+	struct rio_ch_chan_hdr *hh = req_data;
+	u16 chnum;
+
+	chnum = ntohs(hh->dst_ch);
+
+	ch = riocm_get_channel(chnum);
+
+	if (!ch)
+		return -ENODEV;
+
+	if (ch->state != RIO_CM_LISTEN) {
+		riocm_debug(RX_CMD, "channel %d is not in listen state", chnum);
+		riocm_put_channel(ch);
+		return -EINVAL;
+	}
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req) {
+		riocm_put_channel(ch);
+		return -ENOMEM;
+	}
+
+	req->destid = ntohl(hh->bhdr.src_id);
+	req->chan = ntohs(hh->src_ch);
+	req->cmdev = cm;
+
+	spin_lock_bh(&ch->lock);
+	list_add_tail(&req->node, &ch->accept_queue);
+	spin_unlock_bh(&ch->lock);
+	complete(&ch->comp);
+	riocm_put_channel(ch);
+
+	return 0;
+}
+
+/*
+ * riocm_resp_handler - response to connection request handler
+ * @resp_data: pointer to the response packet
+ *
+ * Returns: 0 if success, or
+ *          -EINVAL if channel is not in correct state,
+ *          -ENODEV if cannot find a channel with specified ID,
+ */
+static int riocm_resp_handler(void *resp_data)
+{
+	struct rio_channel *ch;
+	struct rio_ch_chan_hdr *hh = resp_data;
+	u16 chnum;
+
+	chnum = ntohs(hh->dst_ch);
+	ch = riocm_get_channel(chnum);
+	if (!ch)
+		return -ENODEV;
+
+	if (ch->state != RIO_CM_CONNECT) {
+		riocm_put_channel(ch);
+		return -EINVAL;
+	}
+
+	riocm_exch(ch, RIO_CM_CONNECTED);
+	ch->rem_channel = ntohs(hh->src_ch);
+	complete(&ch->comp);
+	riocm_put_channel(ch);
+
+	return 0;
+}
+
+/*
+ * riocm_close_handler - channel close request handler
+ * @req_data: pointer to the request packet
+ *
+ * Returns: 0 if success, or
+ *          -ENODEV if cannot find a channel with specified ID,
+ *            + error codes returned by riocm_ch_close.
+ */
+static int riocm_close_handler(void *data)
+{
+	struct rio_channel *ch;
+	struct rio_ch_chan_hdr *hh = data;
+	int ret;
+
+	riocm_debug(RX_CMD, "for ch=%d", ntohs(hh->dst_ch));
+
+	spin_lock_bh(&idr_lock);
+	ch = idr_find(&ch_idr, ntohs(hh->dst_ch));
+	if (!ch) {
+		spin_unlock_bh(&idr_lock);
+		return -ENODEV;
+	}
+	idr_remove(&ch_idr, ch->id);
+	spin_unlock_bh(&idr_lock);
+
+	riocm_exch(ch, RIO_CM_DISCONNECT);
+
+	ret = riocm_ch_close(ch);
+	if (ret)
+		riocm_debug(RX_CMD, "riocm_ch_close() returned %d", ret);
+
+	return 0;
+}
+
+/*
+ * rio_cm_handler - function that services request (non-data) packets
+ * @cm: cm_dev object
+ * @data: pointer to the packet
+ */
+static void rio_cm_handler(struct cm_dev *cm, void *data)
+{
+	struct rio_ch_chan_hdr *hdr;
+
+	if (!rio_mport_is_running(cm->mport))
+		goto out;
+
+	hdr = data;
+
+	riocm_debug(RX_CMD, "OP=%x for ch=%d from %d",
+		    hdr->ch_op, ntohs(hdr->dst_ch), ntohs(hdr->src_ch));
+
+	switch (hdr->ch_op) {
+	case CM_CONN_REQ:
+		riocm_req_handler(cm, data);
+		break;
+	case CM_CONN_ACK:
+		riocm_resp_handler(data);
+		break;
+	case CM_CONN_CLOSE:
+		riocm_close_handler(data);
+		break;
+	default:
+		riocm_error("Invalid packet header");
+		break;
+	}
+out:
+	kfree(data);
+}
+
+/*
+ * rio_rx_data_handler - received data packet handler
+ * @cm: cm_dev object
+ * @buf: data packet
+ *
+ * Returns: 0 if success, or
+ *          -ENODEV if cannot find a channel with specified ID,
+ *          -EIO if channel is not in CONNECTED state,
+ *          -ENOMEM if channel RX queue is full (packet discarded)
+ */
+static int rio_rx_data_handler(struct cm_dev *cm, void *buf)
+{
+	struct rio_ch_chan_hdr *hdr;
+	struct rio_channel *ch;
+
+	hdr = buf;
+
+	riocm_debug(RX_DATA, "for ch=%d", ntohs(hdr->dst_ch));
+
+	ch = riocm_get_channel(ntohs(hdr->dst_ch));
+	if (!ch) {
+		/* Discard data message for non-existing channel */
+		kfree(buf);
+		return -ENODEV;
+	}
+
+	/* Place pointer to the buffer into channel's RX queue */
+	spin_lock(&ch->lock);
+
+	if (ch->state != RIO_CM_CONNECTED) {
+		/* Channel is not ready to receive data, discard a packet */
+		riocm_debug(RX_DATA, "ch=%d is in wrong state=%d",
+			    ch->id, ch->state);
+		spin_unlock(&ch->lock);
+		kfree(buf);
+		riocm_put_channel(ch);
+		return -EIO;
+	}
+
+	if (ch->rx_ring.count == RIOCM_RX_RING_SIZE) {
+		/* If RX ring is full, discard a packet */
+		riocm_debug(RX_DATA, "ch=%d is full", ch->id);
+		spin_unlock(&ch->lock);
+		kfree(buf);
+		riocm_put_channel(ch);
+		return -ENOMEM;
+	}
+
+	ch->rx_ring.buf[ch->rx_ring.head] = buf;
+	ch->rx_ring.head++;
+	ch->rx_ring.count++;
+	ch->rx_ring.head %= RIOCM_RX_RING_SIZE;
+
+	complete(&ch->comp);
+
+	spin_unlock(&ch->lock);
+	riocm_put_channel(ch);
+
+	return 0;
+}
+
+/*
+ * rio_ibmsg_handler - inbound message packet handler
+ */
+static void rio_ibmsg_handler(struct work_struct *work)
+{
+	struct cm_dev *cm = container_of(work, struct cm_dev, rx_work);
+	void *data;
+	struct rio_ch_chan_hdr *hdr;
+
+	if (!rio_mport_is_running(cm->mport))
+		return;
+
+	while (1) {
+		mutex_lock(&cm->rx_lock);
+		data = riocm_rx_get_msg(cm);
+		if (data)
+			riocm_rx_fill(cm, 1);
+		mutex_unlock(&cm->rx_lock);
+
+		if (data == NULL)
+			break;
+
+		hdr = data;
+
+		if (hdr->bhdr.type != RIO_CM_CHAN) {
+			/* For now simply discard packets other than channel */
+			riocm_error("Unsupported TYPE code (0x%x). Msg dropped",
+				    hdr->bhdr.type);
+			kfree(data);
+			continue;
+		}
+
+		/* Process a channel message */
+		if (hdr->ch_op == CM_DATA_MSG)
+			rio_rx_data_handler(cm, data);
+		else
+			rio_cm_handler(cm, data);
+	}
+}
+
+static void riocm_inb_msg_event(struct rio_mport *mport, void *dev_id,
+				int mbox, int slot)
+{
+	struct cm_dev *cm = dev_id;
+
+	if (rio_mport_is_running(cm->mport) && !work_pending(&cm->rx_work))
+		queue_work(cm->rx_wq, &cm->rx_work);
+}
+
+/*
+ * rio_txcq_handler - TX completion handler
+ * @cm: cm_dev object
+ * @slot: TX queue slot
+ *
+ * TX completion handler also ensures that pending request packets are placed
+ * into transmit queue as soon as a free slot becomes available. This is done
+ * to give higher priority to request packets during high intensity data flow.
+ */
+static void rio_txcq_handler(struct cm_dev *cm, int slot)
+{
+	int ack_slot;
+
+	/* ATTN: Add TX completion notification if/when direct buffer
+	 * transfer is implemented. At this moment only correct tracking
+	 * of tx_count is important.
+	 */
+	riocm_debug(TX_EVENT, "for mport_%d slot %d tx_cnt %d",
+		    cm->mport->id, slot, cm->tx_cnt);
+
+	spin_lock(&cm->tx_lock);
+	ack_slot = cm->tx_ack_slot;
+
+	if (ack_slot == slot)
+		riocm_debug(TX_EVENT, "slot == ack_slot");
+
+	while (cm->tx_cnt && ((ack_slot != slot) ||
+	       (cm->tx_cnt == RIOCM_TX_RING_SIZE))) {
+
+		cm->tx_buf[ack_slot] = NULL;
+		++ack_slot;
+		ack_slot &= (RIOCM_TX_RING_SIZE - 1);
+		cm->tx_cnt--;
+	}
+
+	if (cm->tx_cnt < 0 || cm->tx_cnt > RIOCM_TX_RING_SIZE)
+		riocm_error("tx_cnt %d out of sync", cm->tx_cnt);
+
+	WARN_ON((cm->tx_cnt < 0) || (cm->tx_cnt > RIOCM_TX_RING_SIZE));
+
+	cm->tx_ack_slot = ack_slot;
+
+	/*
+	 * If there are pending requests, insert them into transmit queue
+	 */
+	if (!list_empty(&cm->tx_reqs) && (cm->tx_cnt < RIOCM_TX_RING_SIZE)) {
+		struct tx_req *req, *_req;
+		int rc;
+
+		list_for_each_entry_safe(req, _req, &cm->tx_reqs, node) {
+			list_del(&req->node);
+			cm->tx_buf[cm->tx_slot] = req->buffer;
+			rc = rio_add_outb_message(cm->mport, req->rdev, cmbox,
+						  req->buffer, req->len);
+			kfree(req->buffer);
+			kfree(req);
+
+			++cm->tx_cnt;
+			++cm->tx_slot;
+			cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
+			if (cm->tx_cnt == RIOCM_TX_RING_SIZE)
+				break;
+		}
+	}
+
+	spin_unlock(&cm->tx_lock);
+}
+
+static void riocm_outb_msg_event(struct rio_mport *mport, void *dev_id,
+				 int mbox, int slot)
+{
+	struct cm_dev *cm = dev_id;
+
+	if (cm && rio_mport_is_running(cm->mport))
+		rio_txcq_handler(cm, slot);
+}
+
+static int riocm_queue_req(struct cm_dev *cm, struct rio_dev *rdev,
+			   void *buffer, size_t len)
+{
+	unsigned long flags;
+	struct tx_req *treq;
+
+	treq = kzalloc(sizeof(*treq), GFP_KERNEL);
+	if (treq == NULL)
+		return -ENOMEM;
+
+	treq->rdev = rdev;
+	treq->buffer = buffer;
+	treq->len = len;
+
+	spin_lock_irqsave(&cm->tx_lock, flags);
+	list_add_tail(&treq->node, &cm->tx_reqs);
+	spin_unlock_irqrestore(&cm->tx_lock, flags);
+	return 0;
+}
+
+/*
+ * riocm_post_send - helper function that places packet into msg TX queue
+ * @cm: cm_dev object
+ * @rdev: target RapidIO device object (required by outbound msg interface)
+ * @buffer: pointer to a packet buffer to send
+ * @len: length of data to transfer
+ * @req: request priority flag
+ *
+ * Returns: 0 if success, or error code otherwise.
+ */
+static int riocm_post_send(struct cm_dev *cm, struct rio_dev *rdev,
+			   void *buffer, size_t len)
+{
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cm->tx_lock, flags);
+
+	if (cm->mport == NULL) {
+		rc = -ENODEV;
+		goto err_out;
+	}
+
+	if (cm->tx_cnt == RIOCM_TX_RING_SIZE) {
+		riocm_debug(TX, "Tx Queue is full");
+		rc = -EBUSY;
+		goto err_out;
+	}
+
+	cm->tx_buf[cm->tx_slot] = buffer;
+	rc = rio_add_outb_message(cm->mport, rdev, cmbox, buffer, len);
+
+	riocm_debug(TX, "Add buf@%p destid=%x tx_slot=%d tx_cnt=%d",
+		 buffer, rdev->destid, cm->tx_slot, cm->tx_cnt);
+
+	++cm->tx_cnt;
+	++cm->tx_slot;
+	cm->tx_slot &= (RIOCM_TX_RING_SIZE - 1);
+
+err_out:
+	spin_unlock_irqrestore(&cm->tx_lock, flags);
+	return rc;
+}
+
+/*
+ * riocm_ch_send - sends a data packet to a remote device
+ * @ch_id: local channel ID
+ * @buf: pointer to a data buffer to send (including CM header)
+ * @len: length of data to transfer (including CM header)
+ *
+ * ATTN: ASSUMES THAT THE HEADER SPACE IS RESERVED PART OF THE DATA PACKET
+ *
+ * Returns: 0 if success, or
+ *          -EINVAL if one or more input parameters is/are not valid,
+ *          -ENODEV if cannot find a channel with specified ID,
+ *          -EAGAIN if a channel is not in CONNECTED state,
+ *	    + error codes returned by HW send routine.
+ */
+static int riocm_ch_send(u16 ch_id, void *buf, int len)
+{
+	struct rio_channel *ch;
+	struct rio_ch_chan_hdr *hdr;
+	int ret;
+
+	if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
+		return -EINVAL;
+
+	ch = riocm_get_channel(ch_id);
+	if (!ch) {
+		riocm_error("%s(%d) ch_%d not found", current->comm,
+			    task_pid_nr(current), ch_id);
+		return -ENODEV;
+	}
+
+	if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
+		ret = -EAGAIN;
+		goto err_out;
+	}
+
+	/*
+	 * Fill buffer header section with corresponding channel data
+	 */
+	hdr = buf;
+
+	hdr->bhdr.src_id = htonl(ch->loc_destid);
+	hdr->bhdr.dst_id = htonl(ch->rem_destid);
+	hdr->bhdr.src_mbox = cmbox;
+	hdr->bhdr.dst_mbox = cmbox;
+	hdr->bhdr.type = RIO_CM_CHAN;
+	hdr->ch_op = CM_DATA_MSG;
+	hdr->dst_ch = htons(ch->rem_channel);
+	hdr->src_ch = htons(ch->id);
+	hdr->msg_len = htons((u16)len);
+
+	/* ATTN: the function call below relies on the fact that underlying
+	 * HW-specific add_outb_message() routine copies TX data into its own
+	 * internal transfer buffer (true for all RIONET compatible mport
+	 * drivers). Must be reviewed if mport driver uses the buffer directly.
+	 */
+
+	ret = riocm_post_send(ch->cmdev, ch->rdev, buf, len);
+	if (ret)
+		riocm_debug(TX, "ch %d send_err=%d", ch->id, ret);
+err_out:
+	riocm_put_channel(ch);
+	return ret;
+}
+
+static int riocm_ch_free_rxbuf(struct rio_channel *ch, void *buf)
+{
+	int i, ret = -EINVAL;
+
+	spin_lock_bh(&ch->lock);
+
+	for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
+		if (ch->rx_ring.inuse[i] == buf) {
+			ch->rx_ring.inuse[i] = NULL;
+			ch->rx_ring.inuse_cnt--;
+			ret = 0;
+			break;
+		}
+	}
+
+	spin_unlock_bh(&ch->lock);
+
+	if (!ret)
+		kfree(buf);
+
+	return ret;
+}
+
+/*
+ * riocm_ch_receive - fetch a data packet received for the specified channel
+ * @ch: local channel ID
+ * @buf: pointer to a packet buffer
+ * @timeout: timeout to wait for incoming packet (in jiffies)
+ *
+ * Returns: 0 and valid buffer pointer if success, or NULL pointer and one of:
+ *          -EAGAIN if a channel is not in CONNECTED state,
+ *          -ENOMEM if in-use tracking queue is full,
+ *          -ETIME if wait timeout expired,
+ *	    -EINTR if wait was interrupted.
+ */
+static int riocm_ch_receive(struct rio_channel *ch, void **buf, long timeout)
+{
+	void *rxmsg = NULL;
+	int i, ret = 0;
+	long wret;
+
+	if (!riocm_cmp(ch, RIO_CM_CONNECTED)) {
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	if (ch->rx_ring.inuse_cnt == RIOCM_RX_RING_SIZE) {
+		/* If we do not have entries to track buffers given to upper
+		 * layer, reject request.
+		 */
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	wret = wait_for_completion_interruptible_timeout(&ch->comp, timeout);
+
+	riocm_debug(WAIT, "wait on %d returned %ld", ch->id, wret);
+
+	if (!wret)
+		ret = -ETIME;
+	else if (wret == -ERESTARTSYS)
+		ret = -EINTR;
+	else
+		ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -ECONNRESET;
+
+	if (ret)
+		goto out;
+
+	spin_lock_bh(&ch->lock);
+
+	rxmsg = ch->rx_ring.buf[ch->rx_ring.tail];
+	ch->rx_ring.buf[ch->rx_ring.tail] = NULL;
+	ch->rx_ring.count--;
+	ch->rx_ring.tail++;
+	ch->rx_ring.tail %= RIOCM_RX_RING_SIZE;
+	ret = -ENOMEM;
+
+	for (i = 0; i < RIOCM_RX_RING_SIZE; i++) {
+		if (ch->rx_ring.inuse[i] == NULL) {
+			ch->rx_ring.inuse[i] = rxmsg;
+			ch->rx_ring.inuse_cnt++;
+			ret = 0;
+			break;
+		}
+	}
+
+	if (ret) {
+		/* We have no entry to store pending message: drop it */
+		kfree(rxmsg);
+		rxmsg = NULL;
+	}
+
+	spin_unlock_bh(&ch->lock);
+out:
+	*buf = rxmsg;
+	return ret;
+}
+
+/*
+ * riocm_ch_connect - sends a connect request to a remote device
+ * @loc_ch: local channel ID
+ * @cm: CM device to send connect request
+ * @peer: target RapidIO device
+ * @rem_ch: remote channel ID
+ *
+ * Returns: 0 if success, or
+ *          -EINVAL if the channel is not in IDLE state,
+ *          -EAGAIN if no connection request available immediately,
+ *          -ETIME if ACK response timeout expired,
+ *          -EINTR if wait for response was interrupted.
+ */
+static int riocm_ch_connect(u16 loc_ch, struct cm_dev *cm,
+			    struct cm_peer *peer, u16 rem_ch)
+{
+	struct rio_channel *ch = NULL;
+	struct rio_ch_chan_hdr *hdr;
+	int ret;
+	long wret;
+
+	ch = riocm_get_channel(loc_ch);
+	if (!ch)
+		return -ENODEV;
+
+	if (!riocm_cmp_exch(ch, RIO_CM_IDLE, RIO_CM_CONNECT)) {
+		ret = -EINVAL;
+		goto conn_done;
+	}
+
+	ch->cmdev = cm;
+	ch->rdev = peer->rdev;
+	ch->context = NULL;
+	ch->loc_destid = cm->mport->host_deviceid;
+	ch->rem_channel = rem_ch;
+
+	/*
+	 * Send connect request to the remote RapidIO device
+	 */
+
+	hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
+	if (hdr == NULL) {
+		ret = -ENOMEM;
+		goto conn_done;
+	}
+
+	hdr->bhdr.src_id = htonl(ch->loc_destid);
+	hdr->bhdr.dst_id = htonl(peer->rdev->destid);
+	hdr->bhdr.src_mbox = cmbox;
+	hdr->bhdr.dst_mbox = cmbox;
+	hdr->bhdr.type = RIO_CM_CHAN;
+	hdr->ch_op = CM_CONN_REQ;
+	hdr->dst_ch = htons(rem_ch);
+	hdr->src_ch = htons(loc_ch);
+
+	/* ATTN: the function call below relies on the fact that underlying
+	 * HW-specific add_outb_message() routine copies TX data into its
+	 * internal transfer buffer. Must be reviewed if mport driver uses
+	 * this buffer directly.
+	 */
+	ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr));
+
+	if (ret != -EBUSY) {
+		kfree(hdr);
+	} else {
+		ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr));
+		if (ret)
+			kfree(hdr);
+	}
+
+	if (ret) {
+		riocm_cmp_exch(ch, RIO_CM_CONNECT, RIO_CM_IDLE);
+		goto conn_done;
+	}
+
+	/* Wait for connect response from the remote device */
+	wret = wait_for_completion_interruptible_timeout(&ch->comp,
+							 RIOCM_CONNECT_TO * HZ);
+	riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
+
+	if (!wret)
+		ret = -ETIME;
+	else if (wret == -ERESTARTSYS)
+		ret = -EINTR;
+	else
+		ret = riocm_cmp(ch, RIO_CM_CONNECTED) ? 0 : -1;
+
+conn_done:
+	riocm_put_channel(ch);
+	return ret;
+}
+
+static int riocm_send_ack(struct rio_channel *ch)
+{
+	struct rio_ch_chan_hdr *hdr;
+	int ret;
+
+	hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
+	if (hdr == NULL)
+		return -ENOMEM;
+
+	hdr->bhdr.src_id = htonl(ch->loc_destid);
+	hdr->bhdr.dst_id = htonl(ch->rem_destid);
+	hdr->dst_ch = htons(ch->rem_channel);
+	hdr->src_ch = htons(ch->id);
+	hdr->bhdr.src_mbox = cmbox;
+	hdr->bhdr.dst_mbox = cmbox;
+	hdr->bhdr.type = RIO_CM_CHAN;
+	hdr->ch_op = CM_CONN_ACK;
+
+	/* ATTN: the function call below relies on the fact that underlying
+	 * add_outb_message() routine copies TX data into its internal transfer
+	 * buffer. Review if switching to direct buffer version.
+	 */
+	ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
+
+	if (ret == -EBUSY && !riocm_queue_req(ch->cmdev,
+					      ch->rdev, hdr, sizeof(*hdr)))
+		return 0;
+	kfree(hdr);
+
+	if (ret)
+		riocm_error("send ACK to ch_%d on %s failed (ret=%d)",
+			    ch->id, rio_name(ch->rdev), ret);
+	return ret;
+}
+
+/*
+ * riocm_ch_accept - accept incoming connection request
+ * @ch_id: channel ID
+ * @new_ch_id: local mport device
+ * @timeout: wait timeout (if 0 non-blocking call, do not wait if connection
+ *           request is not available).
+ *
+ * Returns: pointer to new channel struct if success, or error-valued pointer:
+ *          -ENODEV - cannot find specified channel or mport,
+ *          -EINVAL - the channel is not in IDLE state,
+ *          -EAGAIN - no connection request available immediately (timeout=0),
+ *          -ENOMEM - unable to allocate new channel,
+ *          -ETIME - wait timeout expired,
+ *          -EINTR - wait was interrupted.
+ */
+static struct rio_channel *riocm_ch_accept(u16 ch_id, u16 *new_ch_id,
+					   long timeout)
+{
+	struct rio_channel *ch = NULL;
+	struct rio_channel *new_ch = NULL;
+	struct conn_req *req;
+	struct cm_peer *peer;
+	int found = 0;
+	int err = 0;
+	long wret;
+
+	ch = riocm_get_channel(ch_id);
+	if (!ch)
+		return ERR_PTR(-EINVAL);
+
+	if (!riocm_cmp(ch, RIO_CM_LISTEN)) {
+		err = -EINVAL;
+		goto err_put;
+	}
+
+	/* Don't sleep if this is a non blocking call */
+	if (!timeout) {
+		if (!try_wait_for_completion(&ch->comp)) {
+			err = -EAGAIN;
+			goto err_put;
+		}
+	} else {
+		riocm_debug(WAIT, "on %d", ch->id);
+
+		wret = wait_for_completion_interruptible_timeout(&ch->comp,
+								 timeout);
+		if (!wret) {
+			err = -ETIME;
+			goto err_put;
+		} else if (wret == -ERESTARTSYS) {
+			err = -EINTR;
+			goto err_put;
+		}
+	}
+
+	spin_lock_bh(&ch->lock);
+
+	if (ch->state != RIO_CM_LISTEN) {
+		err = -ECANCELED;
+	} else if (list_empty(&ch->accept_queue)) {
+		riocm_debug(WAIT, "on %d accept_queue is empty on completion",
+			    ch->id);
+		err = -EIO;
+	}
+
+	spin_unlock_bh(&ch->lock);
+
+	if (err) {
+		riocm_debug(WAIT, "on %d returns %d", ch->id, err);
+		goto err_put;
+	}
+
+	/* Create new channel for this connection */
+	new_ch = riocm_ch_alloc(RIOCM_CHNUM_AUTO);
+
+	if (IS_ERR(new_ch)) {
+		riocm_error("failed to get channel for new req (%ld)",
+			PTR_ERR(new_ch));
+		err = -ENOMEM;
+		goto err_put;
+	}
+
+	spin_lock_bh(&ch->lock);
+
+	req = list_first_entry(&ch->accept_queue, struct conn_req, node);
+	list_del(&req->node);
+	new_ch->cmdev = ch->cmdev;
+	new_ch->loc_destid = ch->loc_destid;
+	new_ch->rem_destid = req->destid;
+	new_ch->rem_channel = req->chan;
+
+	spin_unlock_bh(&ch->lock);
+	riocm_put_channel(ch);
+	kfree(req);
+
+	down_read(&rdev_sem);
+	/* Find requester's device object */
+	list_for_each_entry(peer, &new_ch->cmdev->peers, node) {
+		if (peer->rdev->destid == new_ch->rem_destid) {
+			riocm_debug(RX_CMD, "found matching device(%s)",
+				    rio_name(peer->rdev));
+			found = 1;
+			break;
+		}
+	}
+	up_read(&rdev_sem);
+
+	if (!found) {
+		/* If peer device object not found, simply ignore the request */
+		err = -ENODEV;
+		goto err_nodev;
+	}
+
+	new_ch->rdev = peer->rdev;
+	new_ch->state = RIO_CM_CONNECTED;
+	spin_lock_init(&new_ch->lock);
+
+	/* Acknowledge the connection request. */
+	riocm_send_ack(new_ch);
+
+	*new_ch_id = new_ch->id;
+	return new_ch;
+err_put:
+	riocm_put_channel(ch);
+err_nodev:
+	if (new_ch) {
+		spin_lock_bh(&idr_lock);
+		idr_remove(&ch_idr, new_ch->id);
+		spin_unlock_bh(&idr_lock);
+		riocm_put_channel(new_ch);
+	}
+	*new_ch_id = 0;
+	return ERR_PTR(err);
+}
+
+/*
+ * riocm_ch_listen - puts a channel into LISTEN state
+ * @ch_id: channel ID
+ *
+ * Returns: 0 if success, or
+ *          -EINVAL if the specified channel does not exists or
+ *                  is not in CHAN_BOUND state.
+ */
+static int riocm_ch_listen(u16 ch_id)
+{
+	struct rio_channel *ch = NULL;
+	int ret = 0;
+
+	riocm_debug(CHOP, "(ch_%d)", ch_id);
+
+	ch = riocm_get_channel(ch_id);
+	if (!ch || !riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN))
+		ret = -EINVAL;
+	riocm_put_channel(ch);
+	return ret;
+}
+
+/*
+ * riocm_ch_bind - associate a channel object and an mport device
+ * @ch_id: channel ID
+ * @mport_id: local mport device ID
+ * @context: pointer to the additional caller's context
+ *
+ * Returns: 0 if success, or
+ *          -ENODEV if cannot find specified mport,
+ *          -EINVAL if the specified channel does not exist or
+ *                  is not in IDLE state.
+ */
+static int riocm_ch_bind(u16 ch_id, u8 mport_id, void *context)
+{
+	struct rio_channel *ch = NULL;
+	struct cm_dev *cm;
+	int rc = -ENODEV;
+
+	riocm_debug(CHOP, "ch_%d to mport_%d", ch_id, mport_id);
+
+	/* Find matching cm_dev object */
+	down_read(&rdev_sem);
+	list_for_each_entry(cm, &cm_dev_list, list) {
+		if ((cm->mport->id == mport_id) &&
+		     rio_mport_is_running(cm->mport)) {
+			rc = 0;
+			break;
+		}
+	}
+
+	if (rc)
+		goto exit;
+
+	ch = riocm_get_channel(ch_id);
+	if (!ch) {
+		rc = -EINVAL;
+		goto exit;
+	}
+
+	spin_lock_bh(&ch->lock);
+	if (ch->state != RIO_CM_IDLE) {
+		spin_unlock_bh(&ch->lock);
+		rc = -EINVAL;
+		goto err_put;
+	}
+
+	ch->cmdev = cm;
+	ch->loc_destid = cm->mport->host_deviceid;
+	ch->context = context;
+	ch->state = RIO_CM_CHAN_BOUND;
+	spin_unlock_bh(&ch->lock);
+err_put:
+	riocm_put_channel(ch);
+exit:
+	up_read(&rdev_sem);
+	return rc;
+}
+
+/*
+ * riocm_ch_alloc - channel object allocation helper routine
+ * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
+ *
+ * Return value: pointer to newly created channel object,
+ *               or error-valued pointer
+ */
+static struct rio_channel *riocm_ch_alloc(u16 ch_num)
+{
+	int id;
+	int start, end;
+	struct rio_channel *ch;
+
+	ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+	if (!ch)
+		return ERR_PTR(-ENOMEM);
+
+	if (ch_num) {
+		/* If requested, try to obtain the specified channel ID */
+		start = ch_num;
+		end = ch_num + 1;
+	} else {
+		/* Obtain channel ID from the dynamic allocation range */
+		start = chstart;
+		end = RIOCM_MAX_CHNUM + 1;
+	}
+
+	idr_preload(GFP_KERNEL);
+	spin_lock_bh(&idr_lock);
+	id = idr_alloc_cyclic(&ch_idr, ch, start, end, GFP_NOWAIT);
+	spin_unlock_bh(&idr_lock);
+	idr_preload_end();
+
+	if (id < 0) {
+		kfree(ch);
+		return ERR_PTR(id == -ENOSPC ? -EBUSY : id);
+	}
+
+	ch->id = (u16)id;
+	ch->state = RIO_CM_IDLE;
+	spin_lock_init(&ch->lock);
+	INIT_LIST_HEAD(&ch->accept_queue);
+	INIT_LIST_HEAD(&ch->ch_node);
+	init_completion(&ch->comp);
+	init_completion(&ch->comp_close);
+	kref_init(&ch->ref);
+	ch->rx_ring.head = 0;
+	ch->rx_ring.tail = 0;
+	ch->rx_ring.count = 0;
+	ch->rx_ring.inuse_cnt = 0;
+
+	return ch;
+}
+
+/*
+ * riocm_ch_create - creates a new channel object and allocates ID for it
+ * @ch_num: channel ID (1 ... RIOCM_MAX_CHNUM, 0 = automatic)
+ *
+ * Allocates and initializes a new channel object. If the parameter ch_num > 0
+ * and is within the valid range, riocm_ch_create tries to allocate the
+ * specified ID for the new channel. If ch_num = 0, channel ID will be assigned
+ * automatically from the range (chstart ... RIOCM_MAX_CHNUM).
+ * Module parameter 'chstart' defines start of an ID range available for dynamic
+ * allocation. Range below 'chstart' is reserved for pre-defined ID numbers.
+ * Available channel numbers are limited by 16-bit size of channel numbers used
+ * in the packet header.
+ *
+ * Return value: PTR to rio_channel structure if successful (with channel number
+ *               updated via pointer) or error-valued pointer if error.
+ */
+static struct rio_channel *riocm_ch_create(u16 *ch_num)
+{
+	struct rio_channel *ch = NULL;
+
+	ch = riocm_ch_alloc(*ch_num);
+
+	if (IS_ERR(ch))
+		riocm_debug(CHOP, "Failed to allocate channel %d (err=%ld)",
+			    *ch_num, PTR_ERR(ch));
+	else
+		*ch_num = ch->id;
+
+	return ch;
+}
+
+/*
+ * riocm_ch_free - channel object release routine
+ * @ref: pointer to a channel's kref structure
+ */
+static void riocm_ch_free(struct kref *ref)
+{
+	struct rio_channel *ch = container_of(ref, struct rio_channel, ref);
+	int i;
+
+	riocm_debug(CHOP, "(ch_%d)", ch->id);
+
+	if (ch->rx_ring.inuse_cnt) {
+		for (i = 0;
+		     i < RIOCM_RX_RING_SIZE && ch->rx_ring.inuse_cnt; i++) {
+			if (ch->rx_ring.inuse[i] != NULL) {
+				kfree(ch->rx_ring.inuse[i]);
+				ch->rx_ring.inuse_cnt--;
+			}
+		}
+	}
+
+	if (ch->rx_ring.count)
+		for (i = 0; i < RIOCM_RX_RING_SIZE && ch->rx_ring.count; i++) {
+			if (ch->rx_ring.buf[i] != NULL) {
+				kfree(ch->rx_ring.buf[i]);
+				ch->rx_ring.count--;
+			}
+		}
+
+	complete(&ch->comp_close);
+}
+
+static int riocm_send_close(struct rio_channel *ch)
+{
+	struct rio_ch_chan_hdr *hdr;
+	int ret;
+
+	/*
+	 * Send CH_CLOSE notification to the remote RapidIO device
+	 */
+
+	hdr = kzalloc(sizeof(*hdr), GFP_KERNEL);
+	if (hdr == NULL)
+		return -ENOMEM;
+
+	hdr->bhdr.src_id = htonl(ch->loc_destid);
+	hdr->bhdr.dst_id = htonl(ch->rem_destid);
+	hdr->bhdr.src_mbox = cmbox;
+	hdr->bhdr.dst_mbox = cmbox;
+	hdr->bhdr.type = RIO_CM_CHAN;
+	hdr->ch_op = CM_CONN_CLOSE;
+	hdr->dst_ch = htons(ch->rem_channel);
+	hdr->src_ch = htons(ch->id);
+
+	/* ATTN: the function call below relies on the fact that underlying
+	 * add_outb_message() routine copies TX data into its internal transfer
+	 * buffer. Needs to be reviewed if switched to direct buffer mode.
+	 */
+	ret = riocm_post_send(ch->cmdev, ch->rdev, hdr, sizeof(*hdr));
+
+	if (ret == -EBUSY && !riocm_queue_req(ch->cmdev, ch->rdev,
+					      hdr, sizeof(*hdr)))
+		return 0;
+	kfree(hdr);
+
+	if (ret)
+		riocm_error("ch(%d) send CLOSE failed (ret=%d)", ch->id, ret);
+
+	return ret;
+}
+
+/*
+ * riocm_ch_close - closes a channel object with specified ID (by local request)
+ * @ch: channel to be closed
+ */
+static int riocm_ch_close(struct rio_channel *ch)
+{
+	unsigned long tmo = msecs_to_jiffies(3000);
+	enum rio_cm_state state;
+	long wret;
+	int ret = 0;
+
+	riocm_debug(CHOP, "ch_%d by %s(%d)",
+		    ch->id, current->comm, task_pid_nr(current));
+
+	state = riocm_exch(ch, RIO_CM_DESTROYING);
+	if (state == RIO_CM_CONNECTED)
+		riocm_send_close(ch);
+
+	complete_all(&ch->comp);
+
+	riocm_put_channel(ch);
+	wret = wait_for_completion_interruptible_timeout(&ch->comp_close, tmo);
+
+	riocm_debug(WAIT, "wait on %d returns %ld", ch->id, wret);
+
+	if (wret == 0) {
+		/* Timeout on wait occurred */
+		riocm_debug(CHOP, "%s(%d) timed out waiting for ch %d",
+		       current->comm, task_pid_nr(current), ch->id);
+		ret = -ETIMEDOUT;
+	} else if (wret == -ERESTARTSYS) {
+		/* Wait_for_completion was interrupted by a signal */
+		riocm_debug(CHOP, "%s(%d) wait for ch %d was interrupted",
+			current->comm, task_pid_nr(current), ch->id);
+		ret = -EINTR;
+	}
+
+	if (!ret) {
+		riocm_debug(CHOP, "ch_%d resources released", ch->id);
+		kfree(ch);
+	} else {
+		riocm_debug(CHOP, "failed to release ch_%d resources", ch->id);
+	}
+
+	return ret;
+}
+
+/*
+ * riocm_cdev_open() - Open character device
+ */
+static int riocm_cdev_open(struct inode *inode, struct file *filp)
+{
+	riocm_debug(INIT, "by %s(%d) filp=%p ",
+		    current->comm, task_pid_nr(current), filp);
+
+	if (list_empty(&cm_dev_list))
+		return -ENODEV;
+
+	return 0;
+}
+
+/*
+ * riocm_cdev_release() - Release character device
+ */
+static int riocm_cdev_release(struct inode *inode, struct file *filp)
+{
+	struct rio_channel *ch, *_c;
+	unsigned int i;
+	LIST_HEAD(list);
+
+	riocm_debug(EXIT, "by %s(%d) filp=%p",
+		    current->comm, task_pid_nr(current), filp);
+
+	/* Check if there are channels associated with this file descriptor */
+	spin_lock_bh(&idr_lock);
+	idr_for_each_entry(&ch_idr, ch, i) {
+		if (ch && ch->filp == filp) {
+			riocm_debug(EXIT, "ch_%d not released by %s(%d)",
+				    ch->id, current->comm,
+				    task_pid_nr(current));
+			idr_remove(&ch_idr, ch->id);
+			list_add(&ch->ch_node, &list);
+		}
+	}
+	spin_unlock_bh(&idr_lock);
+
+	if (!list_empty(&list)) {
+		list_for_each_entry_safe(ch, _c, &list, ch_node) {
+			list_del(&ch->ch_node);
+			riocm_ch_close(ch);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * cm_ep_get_list_size() - Reports number of endpoints in the network
+ */
+static int cm_ep_get_list_size(void __user *arg)
+{
+	u32 __user *p = arg;
+	u32 mport_id;
+	u32 count = 0;
+	struct cm_dev *cm;
+
+	if (get_user(mport_id, p))
+		return -EFAULT;
+	if (mport_id >= RIO_MAX_MPORTS)
+		return -EINVAL;
+
+	/* Find a matching cm_dev object */
+	down_read(&rdev_sem);
+	list_for_each_entry(cm, &cm_dev_list, list) {
+		if (cm->mport->id == mport_id) {
+			count = cm->npeers;
+			up_read(&rdev_sem);
+			if (copy_to_user(arg, &count, sizeof(u32)))
+				return -EFAULT;
+			return 0;
+		}
+	}
+	up_read(&rdev_sem);
+
+	return -ENODEV;
+}
+
+/*
+ * cm_ep_get_list() - Returns list of attached endpoints
+ */
+static int cm_ep_get_list(void __user *arg)
+{
+	struct cm_dev *cm;
+	struct cm_peer *peer;
+	u32 info[2];
+	void *buf;
+	u32 nent;
+	u32 *entry_ptr;
+	u32 i = 0;
+	int ret = 0;
+
+	if (copy_from_user(&info, arg, sizeof(info)))
+		return -EFAULT;
+
+	if (info[1] >= RIO_MAX_MPORTS || info[0] > RIOCM_MAX_EP_COUNT)
+		return -EINVAL;
+
+	/* Find a matching cm_dev object */
+	down_read(&rdev_sem);
+	list_for_each_entry(cm, &cm_dev_list, list)
+		if (cm->mport->id == (u8)info[1])
+			goto found;
+
+	up_read(&rdev_sem);
+	return -ENODEV;
+
+found:
+	nent = min(info[0], cm->npeers);
+	buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL);
+	if (!buf) {
+		up_read(&rdev_sem);
+		return -ENOMEM;
+	}
+
+	entry_ptr = (u32 *)((uintptr_t)buf + 2*sizeof(u32));
+
+	list_for_each_entry(peer, &cm->peers, node) {
+		*entry_ptr = (u32)peer->rdev->destid;
+		entry_ptr++;
+		if (++i == nent)
+			break;
+	}
+	up_read(&rdev_sem);
+
+	((u32 *)buf)[0] = i; /* report an updated number of entries */
+	((u32 *)buf)[1] = info[1]; /* put back an mport ID */
+	if (copy_to_user(arg, buf, sizeof(u32) * (info[0] + 2)))
+		ret = -EFAULT;
+
+	kfree(buf);
+	return ret;
+}
+
+/*
+ * cm_mport_get_list() - Returns list of available local mport devices
+ */
+static int cm_mport_get_list(void __user *arg)
+{
+	int ret = 0;
+	u32 entries;
+	void *buf;
+	struct cm_dev *cm;
+	u32 *entry_ptr;
+	int count = 0;
+
+	if (copy_from_user(&entries, arg, sizeof(entries)))
+		return -EFAULT;
+	if (entries == 0 || entries > RIO_MAX_MPORTS)
+		return -EINVAL;
+	buf = kcalloc(entries + 1, sizeof(u32), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Scan all registered cm_dev objects */
+	entry_ptr = (u32 *)((uintptr_t)buf + sizeof(u32));
+	down_read(&rdev_sem);
+	list_for_each_entry(cm, &cm_dev_list, list) {
+		if (count++ < entries) {
+			*entry_ptr = (cm->mport->id << 16) |
+				      cm->mport->host_deviceid;
+			entry_ptr++;
+		}
+	}
+	up_read(&rdev_sem);
+
+	*((u32 *)buf) = count; /* report a real number of entries */
+	if (copy_to_user(arg, buf, sizeof(u32) * (count + 1)))
+		ret = -EFAULT;
+
+	kfree(buf);
+	return ret;
+}
+
+/*
+ * cm_chan_create() - Create a message exchange channel
+ */
+static int cm_chan_create(struct file *filp, void __user *arg)
+{
+	u16 __user *p = arg;
+	u16 ch_num;
+	struct rio_channel *ch;
+
+	if (get_user(ch_num, p))
+		return -EFAULT;
+
+	riocm_debug(CHOP, "ch_%d requested by %s(%d)",
+		    ch_num, current->comm, task_pid_nr(current));
+	ch = riocm_ch_create(&ch_num);
+	if (IS_ERR(ch))
+		return PTR_ERR(ch);
+
+	ch->filp = filp;
+	riocm_debug(CHOP, "ch_%d created by %s(%d)",
+		    ch_num, current->comm, task_pid_nr(current));
+	return put_user(ch_num, p);
+}
+
+/*
+ * cm_chan_close() - Close channel
+ * @filp:	Pointer to file object
+ * @arg:	Channel to close
+ */
+static int cm_chan_close(struct file *filp, void __user *arg)
+{
+	u16 __user *p = arg;
+	u16 ch_num;
+	struct rio_channel *ch;
+
+	if (get_user(ch_num, p))
+		return -EFAULT;
+
+	riocm_debug(CHOP, "ch_%d by %s(%d)",
+		    ch_num, current->comm, task_pid_nr(current));
+
+	spin_lock_bh(&idr_lock);
+	ch = idr_find(&ch_idr, ch_num);
+	if (!ch) {
+		spin_unlock_bh(&idr_lock);
+		return 0;
+	}
+	if (ch->filp != filp) {
+		spin_unlock_bh(&idr_lock);
+		return -EINVAL;
+	}
+	idr_remove(&ch_idr, ch->id);
+	spin_unlock_bh(&idr_lock);
+
+	return riocm_ch_close(ch);
+}
+
+/*
+ * cm_chan_bind() - Bind channel
+ * @arg:	Channel number
+ */
+static int cm_chan_bind(void __user *arg)
+{
+	struct rio_cm_channel chan;
+
+	if (copy_from_user(&chan, arg, sizeof(chan)))
+		return -EFAULT;
+	if (chan.mport_id >= RIO_MAX_MPORTS)
+		return -EINVAL;
+
+	return riocm_ch_bind(chan.id, chan.mport_id, NULL);
+}
+
+/*
+ * cm_chan_listen() - Listen on channel
+ * @arg:	Channel number
+ */
+static int cm_chan_listen(void __user *arg)
+{
+	u16 __user *p = arg;
+	u16 ch_num;
+
+	if (get_user(ch_num, p))
+		return -EFAULT;
+
+	return riocm_ch_listen(ch_num);
+}
+
+/*
+ * cm_chan_accept() - Accept incoming connection
+ * @filp:	Pointer to file object
+ * @arg:	Channel number
+ */
+static int cm_chan_accept(struct file *filp, void __user *arg)
+{
+	struct rio_cm_accept param;
+	long accept_to;
+	struct rio_channel *ch;
+
+	if (copy_from_user(&param, arg, sizeof(param)))
+		return -EFAULT;
+
+	riocm_debug(CHOP, "on ch_%d by %s(%d)",
+		    param.ch_num, current->comm, task_pid_nr(current));
+
+	accept_to = param.wait_to ?
+			msecs_to_jiffies(param.wait_to) : 0;
+
+	ch = riocm_ch_accept(param.ch_num, &param.ch_num, accept_to);
+	if (IS_ERR(ch))
+		return PTR_ERR(ch);
+	ch->filp = filp;
+
+	riocm_debug(CHOP, "new ch_%d for %s(%d)",
+		    ch->id, current->comm, task_pid_nr(current));
+
+	if (copy_to_user(arg, &param, sizeof(param)))
+		return -EFAULT;
+	return 0;
+}
+
+/*
+ * cm_chan_connect() - Connect on channel
+ * @arg:	Channel information
+ */
+static int cm_chan_connect(void __user *arg)
+{
+	struct rio_cm_channel chan;
+	struct cm_dev *cm;
+	struct cm_peer *peer;
+	int ret = -ENODEV;
+
+	if (copy_from_user(&chan, arg, sizeof(chan)))
+		return -EFAULT;
+	if (chan.mport_id >= RIO_MAX_MPORTS)
+		return -EINVAL;
+
+	down_read(&rdev_sem);
+
+	/* Find matching cm_dev object */
+	list_for_each_entry(cm, &cm_dev_list, list) {
+		if (cm->mport->id == chan.mport_id) {
+			ret = 0;
+			break;
+		}
+	}
+
+	if (ret)
+		goto err_out;
+
+	if (chan.remote_destid >= RIO_ANY_DESTID(cm->mport->sys_size)) {
+		ret = -EINVAL;
+		goto err_out;
+	}
+
+	/* Find corresponding RapidIO endpoint device object */
+	ret = -ENODEV;
+
+	list_for_each_entry(peer, &cm->peers, node) {
+		if (peer->rdev->destid == chan.remote_destid) {
+			ret = 0;
+			break;
+		}
+	}
+
+	if (ret)
+		goto err_out;
+
+	up_read(&rdev_sem);
+
+	return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel);
+err_out:
+	up_read(&rdev_sem);
+	return ret;
+}
+
+/*
+ * cm_chan_msg_send() - Send a message through channel
+ * @arg:	Outbound message information
+ */
+static int cm_chan_msg_send(void __user *arg)
+{
+	struct rio_cm_msg msg;
+	void *buf;
+	int ret = 0;
+
+	if (copy_from_user(&msg, arg, sizeof(msg)))
+		return -EFAULT;
+	if (msg.size > RIO_MAX_MSG_SIZE)
+		return -EINVAL;
+
+	buf = kmalloc(msg.size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = riocm_ch_send(msg.ch_num, buf, msg.size);
+out:
+	kfree(buf);
+	return ret;
+}
+
+/*
+ * cm_chan_msg_rcv() - Receive a message through channel
+ * @arg:	Inbound message information
+ */
+static int cm_chan_msg_rcv(void __user *arg)
+{
+	struct rio_cm_msg msg;
+	struct rio_channel *ch;
+	void *buf;
+	long rxto;
+	int ret = 0, msg_size;
+
+	if (copy_from_user(&msg, arg, sizeof(msg)))
+		return -EFAULT;
+
+	if (msg.ch_num == 0 || msg.size == 0)
+		return -EINVAL;
+
+	ch = riocm_get_channel(msg.ch_num);
+	if (!ch)
+		return -ENODEV;
+
+	rxto = msg.rxto ? msecs_to_jiffies(msg.rxto) : MAX_SCHEDULE_TIMEOUT;
+
+	ret = riocm_ch_receive(ch, &buf, rxto);
+	if (ret)
+		goto out;
+
+	msg_size = min(msg.size, (u16)(RIO_MAX_MSG_SIZE));
+
+	if (copy_to_user((void __user *)(uintptr_t)msg.msg, buf, msg_size))
+		ret = -EFAULT;
+
+	riocm_ch_free_rxbuf(ch, buf);
+out:
+	riocm_put_channel(ch);
+	return ret;
+}
+
+/*
+ * riocm_cdev_ioctl() - IOCTL requests handler
+ */
+static long
+riocm_cdev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case RIO_CM_EP_GET_LIST_SIZE:
+		return cm_ep_get_list_size((void __user *)arg);
+	case RIO_CM_EP_GET_LIST:
+		return cm_ep_get_list((void __user *)arg);
+	case RIO_CM_CHAN_CREATE:
+		return cm_chan_create(filp, (void __user *)arg);
+	case RIO_CM_CHAN_CLOSE:
+		return cm_chan_close(filp, (void __user *)arg);
+	case RIO_CM_CHAN_BIND:
+		return cm_chan_bind((void __user *)arg);
+	case RIO_CM_CHAN_LISTEN:
+		return cm_chan_listen((void __user *)arg);
+	case RIO_CM_CHAN_ACCEPT:
+		return cm_chan_accept(filp, (void __user *)arg);
+	case RIO_CM_CHAN_CONNECT:
+		return cm_chan_connect((void __user *)arg);
+	case RIO_CM_CHAN_SEND:
+		return cm_chan_msg_send((void __user *)arg);
+	case RIO_CM_CHAN_RECEIVE:
+		return cm_chan_msg_rcv((void __user *)arg);
+	case RIO_CM_MPORT_GET_LIST:
+		return cm_mport_get_list((void __user *)arg);
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static const struct file_operations riocm_cdev_fops = {
+	.owner		= THIS_MODULE,
+	.open		= riocm_cdev_open,
+	.release	= riocm_cdev_release,
+	.unlocked_ioctl = riocm_cdev_ioctl,
+};
+
+/*
+ * riocm_add_dev - add new remote RapidIO device into channel management core
+ * @dev: device object associated with RapidIO device
+ * @sif: subsystem interface
+ *
+ * Adds the specified RapidIO device (if applicable) into peers list of
+ * the corresponding channel management device (cm_dev).
+ */
+static int riocm_add_dev(struct device *dev, struct subsys_interface *sif)
+{
+	struct cm_peer *peer;
+	struct rio_dev *rdev = to_rio_dev(dev);
+	struct cm_dev *cm;
+
+	/* Check if the remote device has capabilities required to support CM */
+	if (!dev_cm_capable(rdev))
+		return 0;
+
+	riocm_debug(RDEV, "(%s)", rio_name(rdev));
+
+	peer = kmalloc(sizeof(*peer), GFP_KERNEL);
+	if (!peer)
+		return -ENOMEM;
+
+	/* Find a corresponding cm_dev object */
+	down_write(&rdev_sem);
+	list_for_each_entry(cm, &cm_dev_list, list) {
+		if (cm->mport == rdev->net->hport)
+			goto found;
+	}
+
+	up_write(&rdev_sem);
+	kfree(peer);
+	return -ENODEV;
+
+found:
+	peer->rdev = rdev;
+	list_add_tail(&peer->node, &cm->peers);
+	cm->npeers++;
+
+	up_write(&rdev_sem);
+	return 0;
+}
+
+/*
+ * riocm_remove_dev - remove remote RapidIO device from channel management core
+ * @dev: device object associated with RapidIO device
+ * @sif: subsystem interface
+ *
+ * Removes the specified RapidIO device (if applicable) from peers list of
+ * the corresponding channel management device (cm_dev).
+ */
+static void riocm_remove_dev(struct device *dev, struct subsys_interface *sif)
+{
+	struct rio_dev *rdev = to_rio_dev(dev);
+	struct cm_dev *cm;
+	struct cm_peer *peer;
+	struct rio_channel *ch, *_c;
+	unsigned int i;
+	bool found = false;
+	LIST_HEAD(list);
+
+	/* Check if the remote device has capabilities required to support CM */
+	if (!dev_cm_capable(rdev))
+		return;
+
+	riocm_debug(RDEV, "(%s)", rio_name(rdev));
+
+	/* Find matching cm_dev object */
+	down_write(&rdev_sem);
+	list_for_each_entry(cm, &cm_dev_list, list) {
+		if (cm->mport == rdev->net->hport) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found) {
+		up_write(&rdev_sem);
+		return;
+	}
+
+	/* Remove remote device from the list of peers */
+	found = false;
+	list_for_each_entry(peer, &cm->peers, node) {
+		if (peer->rdev == rdev) {
+			riocm_debug(RDEV, "removing peer %s", rio_name(rdev));
+			found = true;
+			list_del(&peer->node);
+			cm->npeers--;
+			kfree(peer);
+			break;
+		}
+	}
+
+	up_write(&rdev_sem);
+
+	if (!found)
+		return;
+
+	/*
+	 * Release channels associated with this peer
+	 */
+
+	spin_lock_bh(&idr_lock);
+	idr_for_each_entry(&ch_idr, ch, i) {
+		if (ch && ch->rdev == rdev) {
+			if (atomic_read(&rdev->state) != RIO_DEVICE_SHUTDOWN)
+				riocm_exch(ch, RIO_CM_DISCONNECT);
+			idr_remove(&ch_idr, ch->id);
+			list_add(&ch->ch_node, &list);
+		}
+	}
+	spin_unlock_bh(&idr_lock);
+
+	if (!list_empty(&list)) {
+		list_for_each_entry_safe(ch, _c, &list, ch_node) {
+			list_del(&ch->ch_node);
+			riocm_ch_close(ch);
+		}
+	}
+}
+
+/*
+ * riocm_cdev_add() - Create rio_cm char device
+ * @devno: device number assigned to device (MAJ + MIN)
+ */
+static int riocm_cdev_add(dev_t devno)
+{
+	int ret;
+
+	cdev_init(&riocm_cdev.cdev, &riocm_cdev_fops);
+	riocm_cdev.cdev.owner = THIS_MODULE;
+	ret = cdev_add(&riocm_cdev.cdev, devno, 1);
+	if (ret < 0) {
+		riocm_error("Cannot register a device with error %d", ret);
+		return ret;
+	}
+
+	riocm_cdev.dev = device_create(dev_class, NULL, devno, NULL, DEV_NAME);
+	if (IS_ERR(riocm_cdev.dev)) {
+		cdev_del(&riocm_cdev.cdev);
+		return PTR_ERR(riocm_cdev.dev);
+	}
+
+	riocm_debug(MPORT, "Added %s cdev(%d:%d)",
+		    DEV_NAME, MAJOR(devno), MINOR(devno));
+
+	return 0;
+}
+
+/*
+ * riocm_add_mport - add new local mport device into channel management core
+ * @dev: device object associated with mport
+ * @class_intf: class interface
+ *
+ * When a new mport device is added, CM immediately reserves inbound and
+ * outbound RapidIO mailboxes that will be used.
+ */
+static int riocm_add_mport(struct device *dev,
+			   struct class_interface *class_intf)
+{
+	int rc;
+	int i;
+	struct cm_dev *cm;
+	struct rio_mport *mport = to_rio_mport(dev);
+
+	riocm_debug(MPORT, "add mport %s", mport->name);
+
+	cm = kzalloc(sizeof(*cm), GFP_KERNEL);
+	if (!cm)
+		return -ENOMEM;
+
+	cm->mport = mport;
+
+	rc = rio_request_outb_mbox(mport, cm, cmbox,
+				   RIOCM_TX_RING_SIZE, riocm_outb_msg_event);
+	if (rc) {
+		riocm_error("failed to allocate OBMBOX_%d on %s",
+			    cmbox, mport->name);
+		kfree(cm);
+		return -ENODEV;
+	}
+
+	rc = rio_request_inb_mbox(mport, cm, cmbox,
+				  RIOCM_RX_RING_SIZE, riocm_inb_msg_event);
+	if (rc) {
+		riocm_error("failed to allocate IBMBOX_%d on %s",
+			    cmbox, mport->name);
+		rio_release_outb_mbox(mport, cmbox);
+		kfree(cm);
+		return -ENODEV;
+	}
+
+	/*
+	 * Allocate and register inbound messaging buffers to be ready
+	 * to receive channel and system management requests
+	 */
+	for (i = 0; i < RIOCM_RX_RING_SIZE; i++)
+		cm->rx_buf[i] = NULL;
+
+	cm->rx_slots = RIOCM_RX_RING_SIZE;
+	mutex_init(&cm->rx_lock);
+	riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
+	cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
+	INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
+
+	cm->tx_slot = 0;
+	cm->tx_cnt = 0;
+	cm->tx_ack_slot = 0;
+	spin_lock_init(&cm->tx_lock);
+
+	INIT_LIST_HEAD(&cm->peers);
+	cm->npeers = 0;
+	INIT_LIST_HEAD(&cm->tx_reqs);
+
+	down_write(&rdev_sem);
+	list_add_tail(&cm->list, &cm_dev_list);
+	up_write(&rdev_sem);
+
+	return 0;
+}
+
+/*
+ * riocm_remove_mport - remove local mport device from channel management core
+ * @dev: device object associated with mport
+ * @class_intf: class interface
+ *
+ * Removes a local mport device from the list of registered devices that provide
+ * channel management services. Returns an error if the specified mport is not
+ * registered with the CM core.
+ */
+static void riocm_remove_mport(struct device *dev,
+			       struct class_interface *class_intf)
+{
+	struct rio_mport *mport = to_rio_mport(dev);
+	struct cm_dev *cm;
+	struct cm_peer *peer, *temp;
+	struct rio_channel *ch, *_c;
+	unsigned int i;
+	bool found = false;
+	LIST_HEAD(list);
+
+	riocm_debug(MPORT, "%s", mport->name);
+
+	/* Find a matching cm_dev object */
+	down_write(&rdev_sem);
+	list_for_each_entry(cm, &cm_dev_list, list) {
+		if (cm->mport == mport) {
+			list_del(&cm->list);
+			found = true;
+			break;
+		}
+	}
+	up_write(&rdev_sem);
+	if (!found)
+		return;
+
+	flush_workqueue(cm->rx_wq);
+	destroy_workqueue(cm->rx_wq);
+
+	/* Release channels bound to this mport */
+	spin_lock_bh(&idr_lock);
+	idr_for_each_entry(&ch_idr, ch, i) {
+		if (ch->cmdev == cm) {
+			riocm_debug(RDEV, "%s drop ch_%d",
+				    mport->name, ch->id);
+			idr_remove(&ch_idr, ch->id);
+			list_add(&ch->ch_node, &list);
+		}
+	}
+	spin_unlock_bh(&idr_lock);
+
+	if (!list_empty(&list)) {
+		list_for_each_entry_safe(ch, _c, &list, ch_node) {
+			list_del(&ch->ch_node);
+			riocm_ch_close(ch);
+		}
+	}
+
+	rio_release_inb_mbox(mport, cmbox);
+	rio_release_outb_mbox(mport, cmbox);
+
+	/* Remove and free peer entries */
+	if (!list_empty(&cm->peers))
+		riocm_debug(RDEV, "ATTN: peer list not empty");
+	list_for_each_entry_safe(peer, temp, &cm->peers, node) {
+		riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev));
+		list_del(&peer->node);
+		kfree(peer);
+	}
+
+	riocm_rx_free(cm);
+	kfree(cm);
+	riocm_debug(MPORT, "%s done", mport->name);
+}
+
+static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
+	void *unused)
+{
+	struct rio_channel *ch;
+	unsigned int i;
+
+	riocm_debug(EXIT, ".");
+
+	spin_lock_bh(&idr_lock);
+	idr_for_each_entry(&ch_idr, ch, i) {
+		riocm_debug(EXIT, "close ch %d", ch->id);
+		if (ch->state == RIO_CM_CONNECTED)
+			riocm_send_close(ch);
+	}
+	spin_unlock_bh(&idr_lock);
+
+	return NOTIFY_DONE;
+}
+
+/*
+ * riocm_interface handles addition/removal of remote RapidIO devices
+ */
+static struct subsys_interface riocm_interface = {
+	.name		= "rio_cm",
+	.subsys		= &rio_bus_type,
+	.add_dev	= riocm_add_dev,
+	.remove_dev	= riocm_remove_dev,
+};
+
+/*
+ * rio_mport_interface handles addition/removal local mport devices
+ */
+static struct class_interface rio_mport_interface __refdata = {
+	.class = &rio_mport_class,
+	.add_dev = riocm_add_mport,
+	.remove_dev = riocm_remove_mport,
+};
+
+static struct notifier_block rio_cm_notifier = {
+	.notifier_call = rio_cm_shutdown,
+};
+
+static int __init riocm_init(void)
+{
+	int ret;
+
+	/* Create device class needed by udev */
+	dev_class = class_create(THIS_MODULE, DRV_NAME);
+	if (IS_ERR(dev_class)) {
+		riocm_error("Cannot create " DRV_NAME " class");
+		return PTR_ERR(dev_class);
+	}
+
+	ret = alloc_chrdev_region(&dev_number, 0, 1, DRV_NAME);
+	if (ret) {
+		class_destroy(dev_class);
+		return ret;
+	}
+
+	dev_major = MAJOR(dev_number);
+	dev_minor_base = MINOR(dev_number);
+	riocm_debug(INIT, "Registered class with %d major", dev_major);
+
+	/*
+	 * Register as rapidio_port class interface to get notifications about
+	 * mport additions and removals.
+	 */
+	ret = class_interface_register(&rio_mport_interface);
+	if (ret) {
+		riocm_error("class_interface_register error: %d", ret);
+		goto err_reg;
+	}
+
+	/*
+	 * Register as RapidIO bus interface to get notifications about
+	 * addition/removal of remote RapidIO devices.
+	 */
+	ret = subsys_interface_register(&riocm_interface);
+	if (ret) {
+		riocm_error("subsys_interface_register error: %d", ret);
+		goto err_cl;
+	}
+
+	ret = register_reboot_notifier(&rio_cm_notifier);
+	if (ret) {
+		riocm_error("failed to register reboot notifier (err=%d)", ret);
+		goto err_sif;
+	}
+
+	ret = riocm_cdev_add(dev_number);
+	if (ret) {
+		unregister_reboot_notifier(&rio_cm_notifier);
+		ret = -ENODEV;
+		goto err_sif;
+	}
+
+	return 0;
+err_sif:
+	subsys_interface_unregister(&riocm_interface);
+err_cl:
+	class_interface_unregister(&rio_mport_interface);
+err_reg:
+	unregister_chrdev_region(dev_number, 1);
+	class_destroy(dev_class);
+	return ret;
+}
+
+static void __exit riocm_exit(void)
+{
+	riocm_debug(EXIT, "enter");
+	unregister_reboot_notifier(&rio_cm_notifier);
+	subsys_interface_unregister(&riocm_interface);
+	class_interface_unregister(&rio_mport_interface);
+	idr_destroy(&ch_idr);
+
+	device_unregister(riocm_cdev.dev);
+	cdev_del(&(riocm_cdev.cdev));
+
+	class_destroy(dev_class);
+	unregister_chrdev_region(dev_number, 1);
+}
+
+late_initcall(riocm_init);
+module_exit(riocm_exit);

+ 6 - 0
drivers/rapidio/switches/Kconfig

@@ -22,3 +22,9 @@ config RAPIDIO_CPS_GEN2
 	default n
 	---help---
 	  Includes support for ITD CPS Gen.2 serial RapidIO switches.
+
+config RAPIDIO_RXS_GEN3
+	tristate "IDT RXS Gen.3 SRIO switch support"
+	default n
+	---help---
+	  Includes support for ITD RXS Gen.3 serial RapidIO switches.

+ 1 - 0
drivers/rapidio/switches/Makefile

@@ -6,3 +6,4 @@ obj-$(CONFIG_RAPIDIO_TSI57X)	+= tsi57x.o
 obj-$(CONFIG_RAPIDIO_CPS_XX)	+= idtcps.o
 obj-$(CONFIG_RAPIDIO_TSI568)	+= tsi568.o
 obj-$(CONFIG_RAPIDIO_CPS_GEN2)	+= idt_gen2.o
+obj-$(CONFIG_RAPIDIO_RXS_GEN3)	+= idt_gen3.o

+ 3 - 4
drivers/rapidio/switches/idt_gen2.c

@@ -436,10 +436,11 @@ static int idtg2_probe(struct rio_dev *rdev, const struct rio_device_id *id)
 				    RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE);
 	}
 
+	spin_unlock(&rdev->rswitch->lock);
+
 	/* Create device-specific sysfs attributes */
 	idtg2_sysfs(rdev, true);
 
-	spin_unlock(&rdev->rswitch->lock);
 	return 0;
 }
 
@@ -452,11 +453,9 @@ static void idtg2_remove(struct rio_dev *rdev)
 		return;
 	}
 	rdev->rswitch->ops = NULL;
-
+	spin_unlock(&rdev->rswitch->lock);
 	/* Remove device-specific sysfs attributes */
 	idtg2_sysfs(rdev, false);
-
-	spin_unlock(&rdev->rswitch->lock);
 }
 
 static struct rio_device_id idtg2_id_table[] = {

+ 382 - 0
drivers/rapidio/switches/idt_gen3.c

@@ -0,0 +1,382 @@
+/*
+ * IDT RXS Gen.3 Serial RapidIO switch family support
+ *
+ * Copyright 2016 Integrated Device Technology, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/stat.h>
+#include <linux/module.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/rio_ids.h>
+#include <linux/delay.h>
+
+#include <asm/page.h>
+#include "../rio.h"
+
+#define RIO_EM_PW_STAT		0x40020
+#define RIO_PW_CTL		0x40204
+#define RIO_PW_CTL_PW_TMR		0xffffff00
+#define RIO_PW_ROUTE		0x40208
+
+#define RIO_EM_DEV_INT_EN	0x40030
+
+#define RIO_PLM_SPx_IMP_SPEC_CTL(x)	(0x10100 + (x)*0x100)
+#define RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST	0x02000000
+
+#define RIO_PLM_SPx_PW_EN(x)	(0x10118 + (x)*0x100)
+#define RIO_PLM_SPx_PW_EN_OK2U	0x40000000
+#define RIO_PLM_SPx_PW_EN_LINIT 0x10000000
+
+#define RIO_BC_L2_Gn_ENTRYx_CSR(n, x)	(0x31000 + (n)*0x400 + (x)*0x4)
+#define RIO_SPx_L2_Gn_ENTRYy_CSR(x, n, y) \
+				(0x51000 + (x)*0x2000 + (n)*0x400 + (y)*0x4)
+
+static int
+idtg3_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table, u16 route_destid, u8 route_port)
+{
+	u32 rval;
+	u32 entry = route_port;
+	int err = 0;
+
+	pr_debug("RIO: %s t=0x%x did_%x to p_%x\n",
+		 __func__, table, route_destid, entry);
+
+	if (route_destid > 0xFF)
+		return -EINVAL;
+
+	if (route_port == RIO_INVALID_ROUTE)
+		entry = RIO_RT_ENTRY_DROP_PKT;
+
+	if (table == RIO_GLOBAL_TABLE) {
+		/* Use broadcast register to update all per-port tables */
+		err = rio_mport_write_config_32(mport, destid, hopcount,
+				RIO_BC_L2_Gn_ENTRYx_CSR(0, route_destid),
+				entry);
+		return err;
+	}
+
+	/*
+	 * Verify that specified port/table number is valid
+	 */
+	err = rio_mport_read_config_32(mport, destid, hopcount,
+				       RIO_SWP_INFO_CAR, &rval);
+	if (err)
+		return err;
+
+	if (table >= RIO_GET_TOTAL_PORTS(rval))
+		return -EINVAL;
+
+	err = rio_mport_write_config_32(mport, destid, hopcount,
+			RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid),
+			entry);
+	return err;
+}
+
+static int
+idtg3_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table, u16 route_destid, u8 *route_port)
+{
+	u32 rval;
+	int err;
+
+	if (route_destid > 0xFF)
+		return -EINVAL;
+
+	err = rio_mport_read_config_32(mport, destid, hopcount,
+				       RIO_SWP_INFO_CAR, &rval);
+	if (err)
+		return err;
+
+	/*
+	 * This switch device does not have the dedicated global routing table.
+	 * It is substituted by reading routing table of the ingress port of
+	 * maintenance read requests.
+	 */
+	if (table == RIO_GLOBAL_TABLE)
+		table = RIO_GET_PORT_NUM(rval);
+	else if (table >= RIO_GET_TOTAL_PORTS(rval))
+		return -EINVAL;
+
+	err = rio_mport_read_config_32(mport, destid, hopcount,
+			RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, route_destid),
+			&rval);
+	if (err)
+		return err;
+
+	if (rval == RIO_RT_ENTRY_DROP_PKT)
+		*route_port = RIO_INVALID_ROUTE;
+	else
+		*route_port = (u8)rval;
+
+	return 0;
+}
+
+static int
+idtg3_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
+		       u16 table)
+{
+	u32 i;
+	u32 rval;
+	int err;
+
+	if (table == RIO_GLOBAL_TABLE) {
+		for (i = 0; i <= 0xff; i++) {
+			err = rio_mport_write_config_32(mport, destid, hopcount,
+						RIO_BC_L2_Gn_ENTRYx_CSR(0, i),
+						RIO_RT_ENTRY_DROP_PKT);
+			if (err)
+				break;
+		}
+
+		return err;
+	}
+
+	err = rio_mport_read_config_32(mport, destid, hopcount,
+				       RIO_SWP_INFO_CAR, &rval);
+	if (err)
+		return err;
+
+	if (table >= RIO_GET_TOTAL_PORTS(rval))
+		return -EINVAL;
+
+	for (i = 0; i <= 0xff; i++) {
+		err = rio_mport_write_config_32(mport, destid, hopcount,
+					RIO_SPx_L2_Gn_ENTRYy_CSR(table, 0, i),
+					RIO_RT_ENTRY_DROP_PKT);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+
+/*
+ * This routine performs device-specific initialization only.
+ * All standard EM configuration should be performed at upper level.
+ */
+static int
+idtg3_em_init(struct rio_dev *rdev)
+{
+	int i, tmp;
+	u32 rval;
+
+	pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
+
+	/* Disable assertion of interrupt signal */
+	rio_write_config_32(rdev, RIO_EM_DEV_INT_EN, 0);
+
+	/* Disable port-write event notifications during initialization */
+	rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL,
+			    RIO_EM_PW_TX_CTRL_PW_DIS);
+
+	/* Configure Port-Write notifications for hot-swap events */
+	tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);
+	for (i = 0; i < tmp; i++) {
+
+		rio_read_config_32(rdev,
+			RIO_DEV_PORT_N_ERR_STS_CSR(rdev, i),
+			&rval);
+		if (rval & RIO_PORT_N_ERR_STS_PORT_UA)
+			continue;
+
+		/* Clear events signaled before enabling notification */
+		rio_write_config_32(rdev,
+			rdev->em_efptr + RIO_EM_PN_ERR_DETECT(i), 0);
+
+		/* Enable event notifications */
+		rio_write_config_32(rdev,
+			rdev->em_efptr + RIO_EM_PN_ERRRATE_EN(i),
+			RIO_EM_PN_ERRRATE_EN_OK2U | RIO_EM_PN_ERRRATE_EN_U2OK);
+		/* Enable port-write generation on events */
+		rio_write_config_32(rdev, RIO_PLM_SPx_PW_EN(i),
+			RIO_PLM_SPx_PW_EN_OK2U | RIO_PLM_SPx_PW_EN_LINIT);
+
+	}
+
+	/* Set Port-Write destination port */
+	tmp = RIO_GET_PORT_NUM(rdev->swpinfo);
+	rio_write_config_32(rdev, RIO_PW_ROUTE, 1 << tmp);
+
+
+	/* Enable sending port-write event notifications */
+	rio_write_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0);
+
+	/* set TVAL = ~50us */
+	rio_write_config_32(rdev,
+		rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
+	return 0;
+}
+
+
+/*
+ * idtg3_em_handler - device-specific error handler
+ *
+ * If the link is down (PORT_UNINIT) does nothing - this is considered
+ * as link partner removal from the port.
+ *
+ * If the link is up (PORT_OK) - situation is handled as *new* device insertion.
+ * In this case ERR_STOP bits are cleared by issuing soft reset command to the
+ * reporting port. Inbound and outbound ackIDs are cleared by the reset as well.
+ * This way the port is synchronized with freshly inserted device (assuming it
+ * was reset/powered-up on insertion).
+ *
+ * TODO: This is not sufficient in a situation when a link between two devices
+ * was down and up again (e.g. cable disconnect). For that situation full ackID
+ * realignment process has to be implemented.
+ */
+static int
+idtg3_em_handler(struct rio_dev *rdev, u8 pnum)
+{
+	u32 err_status;
+	u32 rval;
+
+	rio_read_config_32(rdev,
+			RIO_DEV_PORT_N_ERR_STS_CSR(rdev, pnum),
+			&err_status);
+
+	/* Do nothing for device/link removal */
+	if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT)
+		return 0;
+
+	/* When link is OK we have a device insertion.
+	 * Request port soft reset to clear errors if they present.
+	 * Inbound and outbound ackIDs will be 0 after reset.
+	 */
+	if (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
+				RIO_PORT_N_ERR_STS_INP_ES)) {
+		rio_read_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), &rval);
+		rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum),
+				    rval | RIO_PLM_SPx_IMP_SPEC_CTL_SOFT_RST);
+		udelay(10);
+		rio_write_config_32(rdev, RIO_PLM_SPx_IMP_SPEC_CTL(pnum), rval);
+		msleep(500);
+	}
+
+	return 0;
+}
+
+static struct rio_switch_ops idtg3_switch_ops = {
+	.owner = THIS_MODULE,
+	.add_entry = idtg3_route_add_entry,
+	.get_entry = idtg3_route_get_entry,
+	.clr_table = idtg3_route_clr_table,
+	.em_init   = idtg3_em_init,
+	.em_handle = idtg3_em_handler,
+};
+
+static int idtg3_probe(struct rio_dev *rdev, const struct rio_device_id *id)
+{
+	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+
+	spin_lock(&rdev->rswitch->lock);
+
+	if (rdev->rswitch->ops) {
+		spin_unlock(&rdev->rswitch->lock);
+		return -EINVAL;
+	}
+
+	rdev->rswitch->ops = &idtg3_switch_ops;
+
+	if (rdev->do_enum) {
+		/* Disable hierarchical routing support: Existing fabric
+		 * enumeration/discovery process (see rio-scan.c) uses 8-bit
+		 * flat destination ID routing only.
+		 */
+		rio_write_config_32(rdev, 0x5000 + RIO_BC_RT_CTL_CSR, 0);
+	}
+
+	spin_unlock(&rdev->rswitch->lock);
+
+	return 0;
+}
+
+static void idtg3_remove(struct rio_dev *rdev)
+{
+	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
+	spin_lock(&rdev->rswitch->lock);
+	if (rdev->rswitch->ops == &idtg3_switch_ops)
+		rdev->rswitch->ops = NULL;
+	spin_unlock(&rdev->rswitch->lock);
+}
+
+/*
+ * Gen3 switches repeat sending PW messages until a corresponding event flag
+ * is cleared. Use shutdown notification to disable generation of port-write
+ * messages if their destination node is shut down.
+ */
+static void idtg3_shutdown(struct rio_dev *rdev)
+{
+	int i;
+	u32 rval;
+	u16 destid;
+
+	/* Currently the enumerator node acts also as PW handler */
+	if (!rdev->do_enum)
+		return;
+
+	pr_debug("RIO: %s(%s)\n", __func__, rio_name(rdev));
+
+	rio_read_config_32(rdev, RIO_PW_ROUTE, &rval);
+	i = RIO_GET_PORT_NUM(rdev->swpinfo);
+
+	/* Check port-write destination port */
+	if (!((1 << i) & rval))
+		return;
+
+	/* Disable sending port-write event notifications if PW destID
+	 * matches to one of the enumerator node
+	 */
+	rio_read_config_32(rdev, rdev->em_efptr + RIO_EM_PW_TGT_DEVID, &rval);
+
+	if (rval & RIO_EM_PW_TGT_DEVID_DEV16)
+		destid = rval >> 16;
+	else
+		destid = ((rval & RIO_EM_PW_TGT_DEVID_D8) >> 16);
+
+	if (rdev->net->hport->host_deviceid == destid) {
+		rio_write_config_32(rdev,
+				    rdev->em_efptr + RIO_EM_PW_TX_CTRL, 0);
+		pr_debug("RIO: %s(%s) PW transmission disabled\n",
+			 __func__, rio_name(rdev));
+	}
+}
+
+static struct rio_device_id idtg3_id_table[] = {
+	{RIO_DEVICE(RIO_DID_IDTRXS1632, RIO_VID_IDT)},
+	{RIO_DEVICE(RIO_DID_IDTRXS2448, RIO_VID_IDT)},
+	{ 0, }	/* terminate list */
+};
+
+static struct rio_driver idtg3_driver = {
+	.name = "idt_gen3",
+	.id_table = idtg3_id_table,
+	.probe = idtg3_probe,
+	.remove = idtg3_remove,
+	.shutdown = idtg3_shutdown,
+};
+
+static int __init idtg3_init(void)
+{
+	return rio_register_driver(&idtg3_driver);
+}
+
+static void __exit idtg3_exit(void)
+{
+	pr_debug("RIO: %s\n", __func__);
+	rio_unregister_driver(&idtg3_driver);
+	pr_debug("RIO: %s done\n", __func__);
+}
+
+device_initcall(idtg3_init);
+module_exit(idtg3_exit);
+
+MODULE_DESCRIPTION("IDT RXS Gen.3 Serial RapidIO switch family driver");
+MODULE_AUTHOR("Integrated Device Technology, Inc.");
+MODULE_LICENSE("GPL");

+ 12 - 14
drivers/rapidio/switches/tsi57x.c

@@ -175,12 +175,10 @@ tsi57x_em_init(struct rio_dev *rdev)
 
 		/* Clear all pending interrupts */
 		rio_read_config_32(rdev,
-				rdev->phys_efptr +
-					RIO_PORT_N_ERR_STS_CSR(portnum),
+				RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
 				&regval);
 		rio_write_config_32(rdev,
-				rdev->phys_efptr +
-					RIO_PORT_N_ERR_STS_CSR(portnum),
+				RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
 				regval & 0x07120214);
 
 		rio_read_config_32(rdev,
@@ -198,7 +196,7 @@ tsi57x_em_init(struct rio_dev *rdev)
 
 		/* Skip next (odd) port if the current port is in x4 mode */
 		rio_read_config_32(rdev,
-				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+				RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
 				&regval);
 		if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
 			portnum++;
@@ -221,23 +219,23 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
 	u32 regval;
 
 	rio_read_config_32(rdev,
-			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
+			RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
 			&err_status);
 
 	if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) &&
-	    (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
-			  RIO_PORT_N_ERR_STS_PW_INP_ES))) {
+	    (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
+			  RIO_PORT_N_ERR_STS_INP_ES))) {
 		/* Remove any queued packets by locking/unlocking port */
 		rio_read_config_32(rdev,
-			rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+			RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
 			&regval);
 		if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
 			rio_write_config_32(rdev,
-				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+				RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
 				regval | RIO_PORT_N_CTL_LOCKOUT);
 			udelay(50);
 			rio_write_config_32(rdev,
-				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
+				RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
 				regval);
 		}
 
@@ -245,7 +243,7 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
 		 * valid bit
 		 */
 		rio_read_config_32(rdev,
-			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
+			RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, portnum),
 			&regval);
 
 		/* Send a Packet-Not-Accepted/Link-Request-Input-Status control
@@ -259,8 +257,8 @@ tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
 			while (checkcount--) {
 				udelay(50);
 				rio_read_config_32(rdev,
-					rdev->phys_efptr +
-						RIO_PORT_N_MNT_RSP_CSR(portnum),
+					RIO_DEV_PORT_N_MNT_RSP_CSR(rdev,
+								   portnum),
 					&regval);
 				if (regval & RIO_PORT_N_MNT_RSP_RVAL)
 					goto exit_es;

+ 2 - 0
drivers/video/fbdev/bfin_adv7393fb.c

@@ -10,6 +10,8 @@
  * TODO: Code Cleanup
  */
 
+#define DRIVER_NAME "bfin-adv7393"
+
 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
 
 #include <linux/module.h>

+ 0 - 2
drivers/video/fbdev/bfin_adv7393fb.h

@@ -59,8 +59,6 @@ enum {
 	BLANK_OFF,
 };
 
-#define DRIVER_NAME "bfin-adv7393"
-
 struct adv7393fb_modes {
 	const s8 name[25];	/* Full name */
 	u16 xres;		/* Active Horizonzal Pixels  */

+ 2 - 2
drivers/video/logo/logo.c

@@ -36,11 +36,11 @@ static int __init fb_logo_late_init(void)
 
 late_initcall(fb_logo_late_init);
 
-/* logo's are marked __initdata. Use __init_refok to tell
+/* logo's are marked __initdata. Use __ref to tell
  * modpost that it is intended that this function uses data
  * marked __initdata.
  */
-const struct linux_logo * __init_refok fb_find_logo(int depth)
+const struct linux_logo * __ref fb_find_logo(int depth)
 {
 	const struct linux_logo *logo = NULL;
 

+ 0 - 2
drivers/w1/masters/omap_hdq.c

@@ -390,8 +390,6 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
 		goto out;
 	}
 
-	hdq_data->hdq_irqstatus = 0;
-
 	if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
 		hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
 			OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,

+ 1 - 13
drivers/w1/slaves/w1_ds2406.c

@@ -153,16 +153,4 @@ static struct w1_family w1_family_12 = {
 	.fid = W1_FAMILY_DS2406,
 	.fops = &w1_f12_fops,
 };
-
-static int __init w1_f12_init(void)
-{
-	return w1_register_family(&w1_family_12);
-}
-
-static void __exit w1_f12_exit(void)
-{
-	w1_unregister_family(&w1_family_12);
-}
-
-module_init(w1_f12_init);
-module_exit(w1_f12_exit);
+module_w1_family(w1_family_12);

+ 1 - 13
drivers/w1/slaves/w1_ds2408.c

@@ -351,16 +351,4 @@ static struct w1_family w1_family_29 = {
 	.fid = W1_FAMILY_DS2408,
 	.fops = &w1_f29_fops,
 };
-
-static int __init w1_f29_init(void)
-{
-	return w1_register_family(&w1_family_29);
-}
-
-static void __exit w1_f29_exit(void)
-{
-	w1_unregister_family(&w1_family_29);
-}
-
-module_init(w1_f29_init);
-module_exit(w1_f29_exit);
+module_w1_family(w1_family_29);

+ 1 - 13
drivers/w1/slaves/w1_ds2413.c

@@ -135,16 +135,4 @@ static struct w1_family w1_family_3a = {
 	.fid = W1_FAMILY_DS2413,
 	.fops = &w1_f3a_fops,
 };
-
-static int __init w1_f3a_init(void)
-{
-	return w1_register_family(&w1_family_3a);
-}
-
-static void __exit w1_f3a_exit(void)
-{
-	w1_unregister_family(&w1_family_3a);
-}
-
-module_init(w1_f3a_init);
-module_exit(w1_f3a_exit);
+module_w1_family(w1_family_3a);

+ 1 - 13
drivers/w1/slaves/w1_ds2423.c

@@ -138,19 +138,7 @@ static struct w1_family w1_family_1d = {
 	.fid = W1_COUNTER_DS2423,
 	.fops = &w1_f1d_fops,
 };
-
-static int __init w1_f1d_init(void)
-{
-	return w1_register_family(&w1_family_1d);
-}
-
-static void __exit w1_f1d_exit(void)
-{
-	w1_unregister_family(&w1_family_1d);
-}
-
-module_init(w1_f1d_init);
-module_exit(w1_f1d_exit);
+module_w1_family(w1_family_1d);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mika Laitio <lamikr@pilppa.org>");

+ 1 - 13
drivers/w1/slaves/w1_ds2431.c

@@ -288,19 +288,7 @@ static struct w1_family w1_family_2d = {
 	.fid = W1_EEPROM_DS2431,
 	.fops = &w1_f2d_fops,
 };
-
-static int __init w1_f2d_init(void)
-{
-	return w1_register_family(&w1_family_2d);
-}
-
-static void __exit w1_f2d_fini(void)
-{
-	w1_unregister_family(&w1_family_2d);
-}
-
-module_init(w1_f2d_init);
-module_exit(w1_f2d_fini);
+module_w1_family(w1_family_2d);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Bernhard Weirich <bernhard.weirich@riedel.net>");

+ 1 - 13
drivers/w1/slaves/w1_ds2433.c

@@ -305,16 +305,4 @@ static struct w1_family w1_family_23 = {
 	.fid = W1_EEPROM_DS2433,
 	.fops = &w1_f23_fops,
 };
-
-static int __init w1_f23_init(void)
-{
-	return w1_register_family(&w1_family_23);
-}
-
-static void __exit w1_f23_fini(void)
-{
-	w1_unregister_family(&w1_family_23);
-}
-
-module_init(w1_f23_init);
-module_exit(w1_f23_fini);
+module_w1_family(w1_family_23);

+ 6 - 37
drivers/w1/slaves/w1_ds2760.c

@@ -121,25 +121,14 @@ static const struct attribute_group *w1_ds2760_groups[] = {
 	NULL,
 };
 
-static DEFINE_IDA(bat_ida);
-
 static int w1_ds2760_add_slave(struct w1_slave *sl)
 {
 	int ret;
-	int id;
 	struct platform_device *pdev;
 
-	id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL);
-	if (id < 0) {
-		ret = id;
-		goto noid;
-	}
-
-	pdev = platform_device_alloc("ds2760-battery", id);
-	if (!pdev) {
-		ret = -ENOMEM;
-		goto pdev_alloc_failed;
-	}
+	pdev = platform_device_alloc("ds2760-battery", PLATFORM_DEVID_AUTO);
+	if (!pdev)
+		return -ENOMEM;
 	pdev->dev.parent = &sl->dev;
 
 	ret = platform_device_add(pdev);
@@ -148,24 +137,19 @@ static int w1_ds2760_add_slave(struct w1_slave *sl)
 
 	dev_set_drvdata(&sl->dev, pdev);
 
-	goto success;
+	return 0;
 
 pdev_add_failed:
 	platform_device_put(pdev);
-pdev_alloc_failed:
-	ida_simple_remove(&bat_ida, id);
-noid:
-success:
+
 	return ret;
 }
 
 static void w1_ds2760_remove_slave(struct w1_slave *sl)
 {
 	struct platform_device *pdev = dev_get_drvdata(&sl->dev);
-	int id = pdev->id;
 
 	platform_device_unregister(pdev);
-	ida_simple_remove(&bat_ida, id);
 }
 
 static struct w1_family_ops w1_ds2760_fops = {
@@ -178,28 +162,13 @@ static struct w1_family w1_ds2760_family = {
 	.fid = W1_FAMILY_DS2760,
 	.fops = &w1_ds2760_fops,
 };
-
-static int __init w1_ds2760_init(void)
-{
-	pr_info("1-Wire driver for the DS2760 battery monitor chip - (c) 2004-2005, Szabolcs Gyurko\n");
-	ida_init(&bat_ida);
-	return w1_register_family(&w1_ds2760_family);
-}
-
-static void __exit w1_ds2760_exit(void)
-{
-	w1_unregister_family(&w1_ds2760_family);
-	ida_destroy(&bat_ida);
-}
+module_w1_family(w1_ds2760_family);
 
 EXPORT_SYMBOL(w1_ds2760_read);
 EXPORT_SYMBOL(w1_ds2760_write);
 EXPORT_SYMBOL(w1_ds2760_store_eeprom);
 EXPORT_SYMBOL(w1_ds2760_recall_eeprom);
 
-module_init(w1_ds2760_init);
-module_exit(w1_ds2760_exit);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>");
 MODULE_DESCRIPTION("1-wire Driver Dallas 2760 battery monitor chip");

+ 5 - 34
drivers/w1/slaves/w1_ds2780.c

@@ -113,25 +113,14 @@ static const struct attribute_group *w1_ds2780_groups[] = {
 	NULL,
 };
 
-static DEFINE_IDA(bat_ida);
-
 static int w1_ds2780_add_slave(struct w1_slave *sl)
 {
 	int ret;
-	int id;
 	struct platform_device *pdev;
 
-	id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL);
-	if (id < 0) {
-		ret = id;
-		goto noid;
-	}
-
-	pdev = platform_device_alloc("ds2780-battery", id);
-	if (!pdev) {
-		ret = -ENOMEM;
-		goto pdev_alloc_failed;
-	}
+	pdev = platform_device_alloc("ds2780-battery", PLATFORM_DEVID_AUTO);
+	if (!pdev)
+		return -ENOMEM;
 	pdev->dev.parent = &sl->dev;
 
 	ret = platform_device_add(pdev);
@@ -144,19 +133,15 @@ static int w1_ds2780_add_slave(struct w1_slave *sl)
 
 pdev_add_failed:
 	platform_device_put(pdev);
-pdev_alloc_failed:
-	ida_simple_remove(&bat_ida, id);
-noid:
+
 	return ret;
 }
 
 static void w1_ds2780_remove_slave(struct w1_slave *sl)
 {
 	struct platform_device *pdev = dev_get_drvdata(&sl->dev);
-	int id = pdev->id;
 
 	platform_device_unregister(pdev);
-	ida_simple_remove(&bat_ida, id);
 }
 
 static struct w1_family_ops w1_ds2780_fops = {
@@ -169,21 +154,7 @@ static struct w1_family w1_ds2780_family = {
 	.fid = W1_FAMILY_DS2780,
 	.fops = &w1_ds2780_fops,
 };
-
-static int __init w1_ds2780_init(void)
-{
-	ida_init(&bat_ida);
-	return w1_register_family(&w1_ds2780_family);
-}
-
-static void __exit w1_ds2780_exit(void)
-{
-	w1_unregister_family(&w1_ds2780_family);
-	ida_destroy(&bat_ida);
-}
-
-module_init(w1_ds2780_init);
-module_exit(w1_ds2780_exit);
+module_w1_family(w1_ds2780_family);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");

+ 5 - 35
drivers/w1/slaves/w1_ds2781.c

@@ -17,7 +17,6 @@
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
-#include <linux/idr.h>
 
 #include "../w1.h"
 #include "../w1_int.h"
@@ -111,25 +110,14 @@ static const struct attribute_group *w1_ds2781_groups[] = {
 	NULL,
 };
 
-static DEFINE_IDA(bat_ida);
-
 static int w1_ds2781_add_slave(struct w1_slave *sl)
 {
 	int ret;
-	int id;
 	struct platform_device *pdev;
 
-	id = ida_simple_get(&bat_ida, 0, 0, GFP_KERNEL);
-	if (id < 0) {
-		ret = id;
-		goto noid;
-	}
-
-	pdev = platform_device_alloc("ds2781-battery", id);
-	if (!pdev) {
-		ret = -ENOMEM;
-		goto pdev_alloc_failed;
-	}
+	pdev = platform_device_alloc("ds2781-battery", PLATFORM_DEVID_AUTO);
+	if (!pdev)
+		return -ENOMEM;
 	pdev->dev.parent = &sl->dev;
 
 	ret = platform_device_add(pdev);
@@ -142,19 +130,15 @@ static int w1_ds2781_add_slave(struct w1_slave *sl)
 
 pdev_add_failed:
 	platform_device_put(pdev);
-pdev_alloc_failed:
-	ida_simple_remove(&bat_ida, id);
-noid:
+
 	return ret;
 }
 
 static void w1_ds2781_remove_slave(struct w1_slave *sl)
 {
 	struct platform_device *pdev = dev_get_drvdata(&sl->dev);
-	int id = pdev->id;
 
 	platform_device_unregister(pdev);
-	ida_simple_remove(&bat_ida, id);
 }
 
 static struct w1_family_ops w1_ds2781_fops = {
@@ -167,21 +151,7 @@ static struct w1_family w1_ds2781_family = {
 	.fid = W1_FAMILY_DS2781,
 	.fops = &w1_ds2781_fops,
 };
-
-static int __init w1_ds2781_init(void)
-{
-	ida_init(&bat_ida);
-	return w1_register_family(&w1_ds2781_family);
-}
-
-static void __exit w1_ds2781_exit(void)
-{
-	w1_unregister_family(&w1_ds2781_family);
-	ida_destroy(&bat_ida);
-}
-
-module_init(w1_ds2781_init);
-module_exit(w1_ds2781_exit);
+module_w1_family(w1_ds2781_family);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>");

+ 1 - 13
drivers/w1/slaves/w1_ds28e04.c

@@ -427,16 +427,4 @@ static struct w1_family w1_family_1C = {
 	.fid = W1_FAMILY_DS28E04,
 	.fops = &w1_f1C_fops,
 };
-
-static int __init w1_f1C_init(void)
-{
-	return w1_register_family(&w1_family_1C);
-}
-
-static void __exit w1_f1C_fini(void)
-{
-	w1_unregister_family(&w1_family_1C);
-}
-
-module_init(w1_f1C_init);
-module_exit(w1_f1C_fini);
+module_w1_family(w1_family_1C);

+ 12 - 0
drivers/w1/w1_family.h

@@ -88,4 +88,16 @@ struct w1_family * w1_family_registered(u8);
 void w1_unregister_family(struct w1_family *);
 int w1_register_family(struct w1_family *);
 
+/**
+ * module_w1_driver() - Helper macro for registering a 1-Wire families
+ * @__w1_family: w1_family struct
+ *
+ * Helper macro for 1-Wire families which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_w1_family(__w1_family) \
+	module_driver(__w1_family, w1_register_family, \
+			w1_unregister_family)
+
 #endif /* __W1_FAMILY_H */

+ 18 - 16
fs/binfmt_elf.c

@@ -605,28 +605,30 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
 			 * Do the same thing for the memory mapping - between
 			 * elf_bss and last_bss is the bss section.
 			 */
-			k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
+			k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
 			if (k > last_bss)
 				last_bss = k;
 		}
 	}
 
+	/*
+	 * Now fill out the bss section: first pad the last page from
+	 * the file up to the page boundary, and zero it from elf_bss
+	 * up to the end of the page.
+	 */
+	if (padzero(elf_bss)) {
+		error = -EFAULT;
+		goto out;
+	}
+	/*
+	 * Next, align both the file and mem bss up to the page size,
+	 * since this is where elf_bss was just zeroed up to, and where
+	 * last_bss will end after the vm_brk() below.
+	 */
+	elf_bss = ELF_PAGEALIGN(elf_bss);
+	last_bss = ELF_PAGEALIGN(last_bss);
+	/* Finally, if there is still more bss to allocate, do it. */
 	if (last_bss > elf_bss) {
-		/*
-		 * Now fill out the bss section.  First pad the last page up
-		 * to the page boundary, and then perform a mmap to make sure
-		 * that there are zero-mapped pages up to and including the
-		 * last bss page.
-		 */
-		if (padzero(elf_bss)) {
-			error = -EFAULT;
-			goto out;
-		}
-
-		/* What we have mapped so far */
-		elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
-
-		/* Map the last of the bss segment */
 		error = vm_brk(elf_bss, last_bss - elf_bss);
 		if (error)
 			goto out;

+ 2 - 1
fs/binfmt_em86.c

@@ -24,7 +24,8 @@
 
 static int load_em86(struct linux_binprm *bprm)
 {
-	char *interp, *i_name, *i_arg;
+	const char *i_name, *i_arg;
+	char *interp;
 	struct file * file;
 	int retval;
 	struct elfhdr	elf_ex;

+ 6 - 3
fs/exec.c

@@ -866,7 +866,8 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
 		goto out;
 	}
 
-	*buf = vmalloc(i_size);
+	if (id != READING_FIRMWARE_PREALLOC_BUFFER)
+		*buf = vmalloc(i_size);
 	if (!*buf) {
 		ret = -ENOMEM;
 		goto out;
@@ -897,8 +898,10 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
 
 out_free:
 	if (ret < 0) {
-		vfree(*buf);
-		*buf = NULL;
+		if (id != READING_FIRMWARE_PREALLOC_BUFFER) {
+			vfree(*buf);
+			*buf = NULL;
+		}
 	}
 
 out:

+ 1 - 1
fs/inode.c

@@ -345,7 +345,7 @@ EXPORT_SYMBOL(inc_nlink);
 void address_space_init_once(struct address_space *mapping)
 {
 	memset(mapping, 0, sizeof(*mapping));
-	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT);
 	spin_lock_init(&mapping->tree_lock);
 	init_rwsem(&mapping->i_mmap_rwsem);
 	INIT_LIST_HEAD(&mapping->private_list);

+ 21 - 24
fs/nilfs2/alloc.c

@@ -622,10 +622,10 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
 	lock = nilfs_mdt_bgl_lock(inode, group);
 
 	if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
-		nilfs_warning(inode->i_sb, __func__,
-			      "entry number %llu already freed: ino=%lu",
-			      (unsigned long long)req->pr_entry_nr,
-			      (unsigned long)inode->i_ino);
+		nilfs_msg(inode->i_sb, KERN_WARNING,
+			  "%s (ino=%lu): entry number %llu already freed",
+			  __func__, inode->i_ino,
+			  (unsigned long long)req->pr_entry_nr);
 	else
 		nilfs_palloc_group_desc_add_entries(desc, lock, 1);
 
@@ -663,10 +663,10 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
 	lock = nilfs_mdt_bgl_lock(inode, group);
 
 	if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
-		nilfs_warning(inode->i_sb, __func__,
-			      "entry number %llu already freed: ino=%lu",
-			      (unsigned long long)req->pr_entry_nr,
-			      (unsigned long)inode->i_ino);
+		nilfs_msg(inode->i_sb, KERN_WARNING,
+			  "%s (ino=%lu): entry number %llu already freed",
+			  __func__, inode->i_ino,
+			  (unsigned long long)req->pr_entry_nr);
 	else
 		nilfs_palloc_group_desc_add_entries(desc, lock, 1);
 
@@ -772,10 +772,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
 		do {
 			if (!nilfs_clear_bit_atomic(lock, group_offset,
 						    bitmap)) {
-				nilfs_warning(inode->i_sb, __func__,
-					      "entry number %llu already freed: ino=%lu",
-					      (unsigned long long)entry_nrs[j],
-					      (unsigned long)inode->i_ino);
+				nilfs_msg(inode->i_sb, KERN_WARNING,
+					  "%s (ino=%lu): entry number %llu already freed",
+					  __func__, inode->i_ino,
+					  (unsigned long long)entry_nrs[j]);
 			} else {
 				n++;
 			}
@@ -816,12 +816,11 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
 		for (k = 0; k < nempties; k++) {
 			ret = nilfs_palloc_delete_entry_block(inode,
 							      last_nrs[k]);
-			if (ret && ret != -ENOENT) {
-				nilfs_warning(inode->i_sb, __func__,
-					      "failed to delete block of entry %llu: ino=%lu, err=%d",
-					      (unsigned long long)last_nrs[k],
-					      (unsigned long)inode->i_ino, ret);
-			}
+			if (ret && ret != -ENOENT)
+				nilfs_msg(inode->i_sb, KERN_WARNING,
+					  "error %d deleting block that object (entry=%llu, ino=%lu) belongs to",
+					  ret, (unsigned long long)last_nrs[k],
+					  inode->i_ino);
 		}
 
 		desc_kaddr = kmap_atomic(desc_bh->b_page);
@@ -835,12 +834,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
 
 		if (nfree == nilfs_palloc_entries_per_group(inode)) {
 			ret = nilfs_palloc_delete_bitmap_block(inode, group);
-			if (ret && ret != -ENOENT) {
-				nilfs_warning(inode->i_sb, __func__,
-					      "failed to delete bitmap block of group %lu: ino=%lu, err=%d",
-					      group,
-					      (unsigned long)inode->i_ino, ret);
-			}
+			if (ret && ret != -ENOENT)
+				nilfs_msg(inode->i_sb, KERN_WARNING,
+					  "error %d deleting bitmap block of group=%lu, ino=%lu",
+					  ret, group, inode->i_ino);
 		}
 	}
 	return 0;

+ 2 - 2
fs/nilfs2/bmap.c

@@ -41,8 +41,8 @@ static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap,
 	struct inode *inode = bmap->b_inode;
 
 	if (err == -EINVAL) {
-		nilfs_error(inode->i_sb, fname,
-			    "broken bmap (inode number=%lu)", inode->i_ino);
+		__nilfs_error(inode->i_sb, fname,
+			      "broken bmap (inode number=%lu)", inode->i_ino);
 		err = -EIO;
 	}
 	return err;

+ 1 - 1
fs/nilfs2/bmap.h

@@ -22,7 +22,7 @@
 #include <linux/types.h>
 #include <linux/fs.h>
 #include <linux/buffer_head.h>
-#include <linux/nilfs2_fs.h>
+#include <linux/nilfs2_ondisk.h>	/* nilfs_binfo, nilfs_inode, etc */
 #include "alloc.h"
 #include "dat.h"
 

+ 2 - 2
fs/nilfs2/btnode.c

@@ -41,7 +41,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
 	struct inode *inode = NILFS_BTNC_I(btnc);
 	struct buffer_head *bh;
 
-	bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
+	bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
 	if (unlikely(!bh))
 		return NULL;
 
@@ -70,7 +70,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
 	struct page *page;
 	int err;
 
-	bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
+	bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
 	if (unlikely(!bh))
 		return -ENOMEM;
 

+ 36 - 25
fs/nilfs2/btree.c

@@ -339,12 +339,14 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node,
  * nilfs_btree_node_broken - verify consistency of btree node
  * @node: btree node block to be examined
  * @size: node size (in bytes)
+ * @inode: host inode of btree
  * @blocknr: block number
  *
  * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
  */
 static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
-				   size_t size, sector_t blocknr)
+				   size_t size, struct inode *inode,
+				   sector_t blocknr)
 {
 	int level, flags, nchildren;
 	int ret = 0;
@@ -358,9 +360,10 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
 		     (flags & NILFS_BTREE_NODE_ROOT) ||
 		     nchildren < 0 ||
 		     nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) {
-		printk(KERN_CRIT "NILFS: bad btree node (blocknr=%llu): "
-		       "level = %d, flags = 0x%x, nchildren = %d\n",
-		       (unsigned long long)blocknr, level, flags, nchildren);
+		nilfs_msg(inode->i_sb, KERN_CRIT,
+			  "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d",
+			  inode->i_ino, (unsigned long long)blocknr, level,
+			  flags, nchildren);
 		ret = 1;
 	}
 	return ret;
@@ -369,12 +372,12 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
 /**
  * nilfs_btree_root_broken - verify consistency of btree root node
  * @node: btree root node to be examined
- * @ino: inode number
+ * @inode: host inode of btree
  *
  * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
  */
 static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
-				   unsigned long ino)
+				   struct inode *inode)
 {
 	int level, flags, nchildren;
 	int ret = 0;
@@ -387,8 +390,9 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
 		     level >= NILFS_BTREE_LEVEL_MAX ||
 		     nchildren < 0 ||
 		     nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
-		pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
-			ino, level, flags, nchildren);
+		nilfs_msg(inode->i_sb, KERN_CRIT,
+			  "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d",
+			  inode->i_ino, level, flags, nchildren);
 		ret = 1;
 	}
 	return ret;
@@ -396,13 +400,15 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
 
 int nilfs_btree_broken_node_block(struct buffer_head *bh)
 {
+	struct inode *inode;
 	int ret;
 
 	if (buffer_nilfs_checked(bh))
 		return 0;
 
+	inode = bh->b_page->mapping->host;
 	ret = nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data,
-				       bh->b_size, bh->b_blocknr);
+				      bh->b_size, inode, bh->b_blocknr);
 	if (likely(!ret))
 		set_buffer_nilfs_checked(bh);
 	return ret;
@@ -448,13 +454,15 @@ nilfs_btree_get_node(const struct nilfs_bmap *btree,
 	return node;
 }
 
-static int
-nilfs_btree_bad_node(struct nilfs_btree_node *node, int level)
+static int nilfs_btree_bad_node(const struct nilfs_bmap *btree,
+				struct nilfs_btree_node *node, int level)
 {
 	if (unlikely(nilfs_btree_node_get_level(node) != level)) {
 		dump_stack();
-		printk(KERN_CRIT "NILFS: btree level mismatch: %d != %d\n",
-		       nilfs_btree_node_get_level(node), level);
+		nilfs_msg(btree->b_inode->i_sb, KERN_CRIT,
+			  "btree level mismatch (ino=%lu): %d != %d",
+			  btree->b_inode->i_ino,
+			  nilfs_btree_node_get_level(node), level);
 		return 1;
 	}
 	return 0;
@@ -509,6 +517,9 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
 
  out_no_wait:
 	if (!buffer_uptodate(bh)) {
+		nilfs_msg(btree->b_inode->i_sb, KERN_ERR,
+			  "I/O error reading b-tree node block (ino=%lu, blocknr=%llu)",
+			  btree->b_inode->i_ino, (unsigned long long)ptr);
 		brelse(bh);
 		return -EIO;
 	}
@@ -568,7 +579,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree,
 			return ret;
 
 		node = nilfs_btree_get_nonroot_node(path, level);
-		if (nilfs_btree_bad_node(node, level))
+		if (nilfs_btree_bad_node(btree, node, level))
 			return -EINVAL;
 		if (!found)
 			found = nilfs_btree_node_lookup(node, key, &index);
@@ -616,7 +627,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree,
 		if (ret < 0)
 			return ret;
 		node = nilfs_btree_get_nonroot_node(path, level);
-		if (nilfs_btree_bad_node(node, level))
+		if (nilfs_btree_bad_node(btree, node, level))
 			return -EINVAL;
 		index = nilfs_btree_node_get_nchildren(node) - 1;
 		ptr = nilfs_btree_node_get_ptr(node, index, ncmax);
@@ -2072,8 +2083,10 @@ static int nilfs_btree_propagate(struct nilfs_bmap *btree,
 	ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0);
 	if (ret < 0) {
 		if (unlikely(ret == -ENOENT))
-			printk(KERN_CRIT "%s: key = %llu, level == %d\n",
-			       __func__, (unsigned long long)key, level);
+			nilfs_msg(btree->b_inode->i_sb, KERN_CRIT,
+				  "writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d",
+				  btree->b_inode->i_ino,
+				  (unsigned long long)key, level);
 		goto out;
 	}
 
@@ -2110,12 +2123,11 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
 	if (level < NILFS_BTREE_LEVEL_NODE_MIN ||
 	    level >= NILFS_BTREE_LEVEL_MAX) {
 		dump_stack();
-		printk(KERN_WARNING
-		       "%s: invalid btree level: %d (key=%llu, ino=%lu, "
-		       "blocknr=%llu)\n",
-		       __func__, level, (unsigned long long)key,
-		       NILFS_BMAP_I(btree)->vfs_inode.i_ino,
-		       (unsigned long long)bh->b_blocknr);
+		nilfs_msg(btree->b_inode->i_sb, KERN_WARNING,
+			  "invalid btree level: %d (key=%llu, ino=%lu, blocknr=%llu)",
+			  level, (unsigned long long)key,
+			  btree->b_inode->i_ino,
+			  (unsigned long long)bh->b_blocknr);
 		return;
 	}
 
@@ -2394,8 +2406,7 @@ int nilfs_btree_init(struct nilfs_bmap *bmap)
 
 	__nilfs_btree_init(bmap);
 
-	if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
-				    bmap->b_inode->i_ino))
+	if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
 		ret = -EIO;
 	return ret;
 }

+ 1 - 1
fs/nilfs2/btree.h

@@ -22,7 +22,7 @@
 #include <linux/types.h>
 #include <linux/buffer_head.h>
 #include <linux/list.h>
-#include <linux/nilfs2_fs.h>
+#include <linux/nilfs2_ondisk.h>	/* nilfs_btree_node */
 #include "btnode.h"
 #include "bmap.h"
 

+ 10 - 13
fs/nilfs2/cpfile.c

@@ -21,7 +21,6 @@
 #include <linux/string.h>
 #include <linux/buffer_head.h>
 #include <linux/errno.h>
-#include <linux/nilfs2_fs.h>
 #include "mdt.h"
 #include "cpfile.h"
 
@@ -332,9 +331,9 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
 	int ret, ncps, nicps, nss, count, i;
 
 	if (unlikely(start == 0 || start > end)) {
-		printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
-		       "[%llu, %llu)\n", __func__,
-		       (unsigned long long)start, (unsigned long long)end);
+		nilfs_msg(cpfile->i_sb, KERN_ERR,
+			  "cannot delete checkpoints: invalid range [%llu, %llu)",
+			  (unsigned long long)start, (unsigned long long)end);
 		return -EINVAL;
 	}
 
@@ -386,9 +385,9 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
 								   cpfile, cno);
 					if (ret == 0)
 						continue;
-					printk(KERN_ERR
-					       "%s: cannot delete block\n",
-					       __func__);
+					nilfs_msg(cpfile->i_sb, KERN_ERR,
+						  "error %d deleting checkpoint block",
+						  ret);
 					break;
 				}
 			}
@@ -991,14 +990,12 @@ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
 	int err;
 
 	if (cpsize > sb->s_blocksize) {
-		printk(KERN_ERR
-		       "NILFS: too large checkpoint size: %zu bytes.\n",
-		       cpsize);
+		nilfs_msg(sb, KERN_ERR,
+			  "too large checkpoint size: %zu bytes", cpsize);
 		return -EINVAL;
 	} else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) {
-		printk(KERN_ERR
-		       "NILFS: too small checkpoint size: %zu bytes.\n",
-		       cpsize);
+		nilfs_msg(sb, KERN_ERR,
+			  "too small checkpoint size: %zu bytes", cpsize);
 		return -EINVAL;
 	}
 

+ 2 - 1
fs/nilfs2/cpfile.h

@@ -21,7 +21,8 @@
 
 #include <linux/fs.h>
 #include <linux/buffer_head.h>
-#include <linux/nilfs2_fs.h>
+#include <linux/nilfs2_api.h>		/* nilfs_cpstat */
+#include <linux/nilfs2_ondisk.h>	/* nilfs_inode, nilfs_checkpoint */
 
 
 int nilfs_cpfile_get_checkpoint(struct inode *, __u64, int,

+ 9 - 10
fs/nilfs2/dat.c

@@ -349,10 +349,11 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
 	kaddr = kmap_atomic(entry_bh->b_page);
 	entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
 	if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
-		printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
-		       (unsigned long long)vblocknr,
-		       (unsigned long long)le64_to_cpu(entry->de_start),
-		       (unsigned long long)le64_to_cpu(entry->de_end));
+		nilfs_msg(dat->i_sb, KERN_CRIT,
+			  "%s: invalid vblocknr = %llu, [%llu, %llu)",
+			  __func__, (unsigned long long)vblocknr,
+			  (unsigned long long)le64_to_cpu(entry->de_start),
+			  (unsigned long long)le64_to_cpu(entry->de_end));
 		kunmap_atomic(kaddr);
 		brelse(entry_bh);
 		return -EINVAL;
@@ -479,14 +480,12 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
 	int err;
 
 	if (entry_size > sb->s_blocksize) {
-		printk(KERN_ERR
-		       "NILFS: too large DAT entry size: %zu bytes.\n",
-		       entry_size);
+		nilfs_msg(sb, KERN_ERR, "too large DAT entry size: %zu bytes",
+			  entry_size);
 		return -EINVAL;
 	} else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
-		printk(KERN_ERR
-		       "NILFS: too small DAT entry size: %zu bytes.\n",
-		       entry_size);
+		nilfs_msg(sb, KERN_ERR, "too small DAT entry size: %zu bytes",
+			  entry_size);
 		return -EINVAL;
 	}
 

+ 1 - 0
fs/nilfs2/dat.h

@@ -22,6 +22,7 @@
 #include <linux/types.h>
 #include <linux/buffer_head.h>
 #include <linux/fs.h>
+#include <linux/nilfs2_ondisk.h>	/* nilfs_inode, nilfs_checkpoint */
 
 
 struct nilfs_palloc_req;

Bu fark içinde çok fazla dosya değişikliği olduğu için bazı dosyalar gösterilmiyor