octeon_main.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /**********************************************************************
  2. * Author: Cavium, Inc.
  3. *
  4. * Contact: support@cavium.com
  5. * Please include "LiquidIO" in the subject.
  6. *
  7. * Copyright (c) 2003-2016 Cavium, Inc.
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more details.
  17. ***********************************************************************/
  18. /*! \file octeon_main.h
  19. * \brief Host Driver: This file is included by all host driver source files
  20. * to include common definitions.
  21. */
  22. #ifndef _OCTEON_MAIN_H_
  23. #define _OCTEON_MAIN_H_
  24. #if BITS_PER_LONG == 32
  25. #define CVM_CAST64(v) ((long long)(v))
  26. #elif BITS_PER_LONG == 64
  27. #define CVM_CAST64(v) ((long long)(long)(v))
  28. #else
  29. #error "Unknown system architecture"
  30. #endif
  31. #define DRV_NAME "LiquidIO"
  32. /** This structure is used by NIC driver to store information required
  33. * to free the sk_buff when the packet has been fetched by Octeon.
  34. * Bytes offset below assume worst-case of a 64-bit system.
  35. */
  36. struct octnet_buf_free_info {
  37. /** Bytes 1-8. Pointer to network device private structure. */
  38. struct lio *lio;
  39. /** Bytes 9-16. Pointer to sk_buff. */
  40. struct sk_buff *skb;
  41. /** Bytes 17-24. Pointer to gather list. */
  42. struct octnic_gather *g;
  43. /** Bytes 25-32. Physical address of skb->data or gather list. */
  44. u64 dptr;
  45. /** Bytes 33-47. Piggybacked soft command, if any */
  46. struct octeon_soft_command *sc;
  47. };
  48. /* BQL-related functions */
  49. void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
  50. void octeon_update_tx_completion_counters(void *buf, int reqtype,
  51. unsigned int *pkts_compl,
  52. unsigned int *bytes_compl);
  53. void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
  54. unsigned int bytes_compl);
  55. void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac);
  56. /** Swap 8B blocks */
  57. static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
  58. {
  59. while (blocks) {
  60. cpu_to_be64s(data);
  61. blocks--;
  62. data++;
  63. }
  64. }
  65. /**
  66. * \brief unmaps a PCI BAR
  67. * @param oct Pointer to Octeon device
  68. * @param baridx bar index
  69. */
  70. static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
  71. {
  72. dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
  73. baridx);
  74. if (oct->mmio[baridx].done)
  75. iounmap(oct->mmio[baridx].hw_addr);
  76. if (oct->mmio[baridx].start)
  77. pci_release_region(oct->pci_dev, baridx * 2);
  78. }
  79. /**
  80. * \brief maps a PCI BAR
  81. * @param oct Pointer to Octeon device
  82. * @param baridx bar index
  83. * @param max_map_len maximum length of mapped memory
  84. */
  85. static inline int octeon_map_pci_barx(struct octeon_device *oct,
  86. int baridx, int max_map_len)
  87. {
  88. u32 mapped_len = 0;
  89. if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
  90. dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
  91. baridx);
  92. return 1;
  93. }
  94. oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
  95. oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
  96. mapped_len = oct->mmio[baridx].len;
  97. if (!mapped_len)
  98. goto err_release_region;
  99. if (max_map_len && (mapped_len > max_map_len))
  100. mapped_len = max_map_len;
  101. oct->mmio[baridx].hw_addr =
  102. ioremap(oct->mmio[baridx].start, mapped_len);
  103. oct->mmio[baridx].mapped_len = mapped_len;
  104. dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
  105. baridx, oct->mmio[baridx].start, mapped_len,
  106. oct->mmio[baridx].len);
  107. if (!oct->mmio[baridx].hw_addr) {
  108. dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
  109. baridx);
  110. goto err_release_region;
  111. }
  112. oct->mmio[baridx].done = 1;
  113. return 0;
  114. err_release_region:
  115. pci_release_region(oct->pci_dev, baridx * 2);
  116. return 1;
  117. }
  118. static inline void *
  119. cnnic_numa_alloc_aligned_dma(u32 size,
  120. u32 *alloc_size,
  121. size_t *orig_ptr,
  122. int numa_node)
  123. {
  124. int retries = 0;
  125. void *ptr = NULL;
  126. #define OCTEON_MAX_ALLOC_RETRIES 1
  127. do {
  128. struct page *page = NULL;
  129. page = alloc_pages_node(numa_node,
  130. GFP_KERNEL,
  131. get_order(size));
  132. if (!page)
  133. page = alloc_pages(GFP_KERNEL,
  134. get_order(size));
  135. ptr = (void *)page_address(page);
  136. if ((unsigned long)ptr & 0x07) {
  137. __free_pages(page, get_order(size));
  138. ptr = NULL;
  139. /* Increment the size required if the first
  140. * attempt failed.
  141. */
  142. if (!retries)
  143. size += 7;
  144. }
  145. retries++;
  146. } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
  147. *alloc_size = size;
  148. *orig_ptr = (unsigned long)ptr;
  149. if ((unsigned long)ptr & 0x07)
  150. ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
  151. return ptr;
  152. }
  153. #define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
  154. free_pages(orig_ptr, get_order(size))
  155. static inline int
  156. sleep_cond(wait_queue_head_t *wait_queue, int *condition)
  157. {
  158. int errno = 0;
  159. wait_queue_t we;
  160. init_waitqueue_entry(&we, current);
  161. add_wait_queue(wait_queue, &we);
  162. while (!(READ_ONCE(*condition))) {
  163. set_current_state(TASK_INTERRUPTIBLE);
  164. if (signal_pending(current)) {
  165. errno = -EINTR;
  166. goto out;
  167. }
  168. schedule();
  169. }
  170. out:
  171. set_current_state(TASK_RUNNING);
  172. remove_wait_queue(wait_queue, &we);
  173. return errno;
  174. }
  175. /* Gives up the CPU for a timeout period.
  176. * Check that the condition is not true before we go to sleep for a
  177. * timeout period.
  178. */
  179. static inline void
  180. sleep_timeout_cond(wait_queue_head_t *wait_queue,
  181. int *condition,
  182. int timeout)
  183. {
  184. wait_queue_t we;
  185. init_waitqueue_entry(&we, current);
  186. add_wait_queue(wait_queue, &we);
  187. set_current_state(TASK_INTERRUPTIBLE);
  188. if (!(*condition))
  189. schedule_timeout(timeout);
  190. set_current_state(TASK_RUNNING);
  191. remove_wait_queue(wait_queue, &we);
  192. }
  193. #ifndef ROUNDUP4
  194. #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
  195. #endif
  196. #ifndef ROUNDUP8
  197. #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
  198. #endif
  199. #ifndef ROUNDUP16
  200. #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
  201. #endif
  202. #ifndef ROUNDUP128
  203. #define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
  204. #endif
  205. #endif /* _OCTEON_MAIN_H_ */