pci_io.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. #ifndef _ASM_S390_PCI_IO_H
  2. #define _ASM_S390_PCI_IO_H
  3. #ifdef CONFIG_PCI
  4. #include <linux/kernel.h>
  5. #include <linux/slab.h>
  6. #include <asm/pci_insn.h>
  7. /* I/O Map */
  8. #define ZPCI_IOMAP_MAX_ENTRIES 0x7fff
  9. #define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000ULL
  10. #define ZPCI_IOMAP_ADDR_IDX_MASK 0x7fff000000000000ULL
  11. #define ZPCI_IOMAP_ADDR_OFF_MASK 0x0000ffffffffffffULL
  12. struct zpci_iomap_entry {
  13. u32 fh;
  14. u8 bar;
  15. u16 count;
  16. };
  17. extern struct zpci_iomap_entry *zpci_iomap_start;
  18. #define ZPCI_IDX(addr) \
  19. (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48)
  20. #define ZPCI_OFFSET(addr) \
  21. ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
  22. #define ZPCI_CREATE_REQ(handle, space, len) \
  23. ((u64) handle << 32 | space << 16 | len)
  24. #define zpci_read(LENGTH, RETTYPE) \
  25. static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
  26. { \
  27. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
  28. u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
  29. u64 data; \
  30. int rc; \
  31. \
  32. rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
  33. if (rc) \
  34. data = -1ULL; \
  35. return (RETTYPE) data; \
  36. }
  37. #define zpci_write(LENGTH, VALTYPE) \
  38. static inline void zpci_write_##VALTYPE(VALTYPE val, \
  39. const volatile void __iomem *addr) \
  40. { \
  41. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)]; \
  42. u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
  43. u64 data = (VALTYPE) val; \
  44. \
  45. zpci_store(data, req, ZPCI_OFFSET(addr)); \
  46. }
  47. zpci_read(8, u64)
  48. zpci_read(4, u32)
  49. zpci_read(2, u16)
  50. zpci_read(1, u8)
  51. zpci_write(8, u64)
  52. zpci_write(4, u32)
  53. zpci_write(2, u16)
  54. zpci_write(1, u8)
  55. static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len)
  56. {
  57. u64 val;
  58. switch (len) {
  59. case 1:
  60. val = (u64) *((u8 *) data);
  61. break;
  62. case 2:
  63. val = (u64) *((u16 *) data);
  64. break;
  65. case 4:
  66. val = (u64) *((u32 *) data);
  67. break;
  68. case 8:
  69. val = (u64) *((u64 *) data);
  70. break;
  71. default:
  72. val = 0; /* let FW report error */
  73. break;
  74. }
  75. return zpci_store(val, req, offset);
  76. }
  77. static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
  78. {
  79. u64 data;
  80. int cc;
  81. cc = zpci_load(&data, req, offset);
  82. if (cc)
  83. goto out;
  84. switch (len) {
  85. case 1:
  86. *((u8 *) dst) = (u8) data;
  87. break;
  88. case 2:
  89. *((u16 *) dst) = (u16) data;
  90. break;
  91. case 4:
  92. *((u32 *) dst) = (u32) data;
  93. break;
  94. case 8:
  95. *((u64 *) dst) = (u64) data;
  96. break;
  97. }
  98. out:
  99. return cc;
  100. }
  101. static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
  102. {
  103. return zpci_store_block(data, req, offset);
  104. }
  105. static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
  106. {
  107. int count = len > max ? max : len, size = 1;
  108. while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) {
  109. dst = dst >> 1;
  110. src = src >> 1;
  111. size = size << 1;
  112. }
  113. return size;
  114. }
  115. static inline int zpci_memcpy_fromio(void *dst,
  116. const volatile void __iomem *src,
  117. unsigned long n)
  118. {
  119. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(src)];
  120. u64 req, offset = ZPCI_OFFSET(src);
  121. int size, rc = 0;
  122. while (n > 0) {
  123. size = zpci_get_max_write_size((u64 __force) src,
  124. (u64) dst, n, 8);
  125. req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
  126. rc = zpci_read_single(req, dst, offset, size);
  127. if (rc)
  128. break;
  129. offset += size;
  130. dst += size;
  131. n -= size;
  132. }
  133. return rc;
  134. }
  135. static inline int zpci_memcpy_toio(volatile void __iomem *dst,
  136. const void *src, unsigned long n)
  137. {
  138. struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
  139. u64 req, offset = ZPCI_OFFSET(dst);
  140. int size, rc = 0;
  141. if (!src)
  142. return -EINVAL;
  143. while (n > 0) {
  144. size = zpci_get_max_write_size((u64 __force) dst,
  145. (u64) src, n, 128);
  146. req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
  147. if (size > 8) /* main path */
  148. rc = zpci_write_block(req, src, offset);
  149. else
  150. rc = zpci_write_single(req, src, offset, size);
  151. if (rc)
  152. break;
  153. offset += size;
  154. src += size;
  155. n -= size;
  156. }
  157. return rc;
  158. }
  159. static inline int zpci_memset_io(volatile void __iomem *dst,
  160. unsigned char val, size_t count)
  161. {
  162. u8 *src = kmalloc(count, GFP_KERNEL);
  163. int rc;
  164. if (src == NULL)
  165. return -ENOMEM;
  166. memset(src, val, count);
  167. rc = zpci_memcpy_toio(dst, src, count);
  168. kfree(src);
  169. return rc;
  170. }
  171. #endif /* CONFIG_PCI */
  172. #endif /* _ASM_S390_PCI_IO_H */